diff --git a/go.mod b/go.mod index 907db42da5..c612391e5f 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/openshift/machine-config-operator go 1.12 require ( - github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 // indirect github.com/BurntSushi/toml v0.3.1 github.com/InVisionApp/go-health v2.1.0+incompatible github.com/Masterminds/goutils v1.1.0 // indirect @@ -15,16 +14,13 @@ require ( github.com/ashcrow/osrelease v0.0.0-20180626175927-9b292693c55c github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 // indirect github.com/containers/image v3.0.2+incompatible - github.com/containers/storage v1.13.4 + github.com/containers/storage v1.13.5 github.com/coreos/container-linux-config-transpiler v0.9.0 github.com/coreos/ignition v0.33.0 github.com/creack/pty v1.1.9 // indirect - github.com/cri-o/cri-o v1.9.0-beta.2.0.20191003162030-4775e1c05c26 + github.com/cri-o/cri-o v1.16.2 github.com/davecgh/go-spew v1.1.1 - github.com/docker/docker-credential-helpers v0.6.3 // indirect - github.com/docker/go-metrics v0.0.1 // indirect github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1 // indirect - github.com/elazarl/goproxy/ext v0.0.0-20190911111923-ecfe977594f1 // indirect github.com/emicklei/go-restful v2.10.0+incompatible // indirect github.com/ghodss/yaml v1.0.0 github.com/go-bindata/go-bindata v3.1.1+incompatible @@ -33,14 +29,12 @@ require ( github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/groupcache v0.0.0-20191002201903-404acd9df4cc // indirect github.com/golangci/golangci-lint v1.18.0 - github.com/google/go-cmp v0.3.1 // indirect github.com/google/renameio v0.1.0 github.com/googleapis/gnostic v0.3.1 // indirect github.com/hashicorp/golang-lru v0.5.3 // indirect github.com/huandu/xstrings v1.2.0 // indirect github.com/imdario/mergo v0.3.7 github.com/joho/godotenv v1.3.0 - github.com/onsi/ginkgo v1.10.2 // indirect github.com/opencontainers/go-digest v1.0.0-rc1 github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible github.com/openshift/client-go v0.0.0-20191001081553-3b0e988f8cb0 @@ -58,7 +52,7 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3 // indirect golang.org/x/sys v0.0.0-20191002091554-b397fe3ad8ed // indirect - golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 + golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 gonum.org/v1/gonum v0.0.0-20190929233944-b20cf7805fc4 // indirect gonum.org/v1/netlib v0.0.0-20190926062253-2d6e29b73a19 // indirect k8s.io/api v0.17.1 diff --git a/go.sum b/go.sum index f2c162ec7c..51f9f0b157 100644 --- a/go.sum +++ b/go.sum @@ -129,14 +129,20 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/cfssl v0.0.0-20190716005913-5fc50ce768d7/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/containerd/cgroups v0.0.0-20191003151125-bec0ecbda146/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa h1:GnRy2maqb8vcJhYRN5L+5WyYNKfUG4otiz2zxE182ng= github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50 h1:WMpHmC6AxwWb9hMqhudkqG7A/p14KiMnl6d3r1iUMjU= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/containerd v1.0.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.2.9 h1:6tyNjBmAMG47QuFPIT9LgiiexoVxC6qpTGR+eD0R0Z8= github.com/containerd/containerd v1.2.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20180216233310-d8fb8589b0e8/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20180814194400-c7c5070e6f6e/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -160,19 +166,35 @@ github.com/containernetworking/plugins v0.8.2/go.mod h1:TxALKWZpWL79BC3GOYKJzzXr github.com/containers/buildah v1.10.1/go.mod h1:ZTyMFo3IQlu9tYndtnAf0Tjf2NdeUL6bY2/TpP9uIuU= github.com/containers/buildah v1.11.2 h1:U6Abrp1J7H19vHvhqIran4Xvw+Z3WIqMM86fIt9L7Qk= github.com/containers/buildah v1.11.2/go.mod h1:CtnP3vsLiU3xgKvkhdb4b0IzYwXNzHRv3ezl4z+RPC0= +github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982 h1:5WUe09k2sJSbmxwLHZLHc41TrIPrP0GlbhX+WDJBqvs= +github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982/go.mod h1:eGWB4tLoo0hIBuytQpvgUC0hk2mvl2ofaYBeDsU/qoc= github.com/containers/conmon v0.3.0/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/conmon v2.0.0+incompatible h1:lloyKRwsrX8i4fYdnNpHYfiWNjMpmTTQtsgiYCRKASE= github.com/containers/conmon v2.0.0+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= +github.com/containers/conmon v2.0.5+incompatible h1:cDCOHkOICRQQUpA+HavlMbnNm7b6ML1i2myajSKfgws= +github.com/containers/conmon v2.0.5+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/image v3.0.2+incompatible h1:B1lqAE8MUPCrsBLE86J0gnXleeRq8zJnQryhiiGQNyE= github.com/containers/image v3.0.2+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= +github.com/containers/image/v5 v5.0.0 h1:arnXgbt1ucsC/ndtSpiQY87rA0UjhF+/xQnPzqdBDn4= +github.com/containers/image/v5 v5.0.0/go.mod h1:MgiLzCfIeo8lrHi+4Lb8HP+rh513sm0Mlk6RrhjFOLY= github.com/containers/libpod v1.5.1 h1:Msp7bvAmNOeVfo8RW2QcRrcCCMYMBVHq6mWok97xq5c= github.com/containers/libpod v1.5.1/go.mod h1:S5TOYh/q+DFAsTeGH0gUS3kYr9MLD2HUC9umQpwwSCk= +github.com/containers/libpod v1.6.3-0.20191101152258-04e8bf3dba50 h1:htMcfTu+mPPx1hcNqxUrGhdaCTGYQ1WB+I7GA/Jiffw= +github.com/containers/libpod v1.6.3-0.20191101152258-04e8bf3dba50/go.mod h1:9VZM1iXAuT2JyITthBM6wzxfqAy2F5nT4sHhsjKDrho= +github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= +github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/psgo v1.3.1 h1:1kE+jJ9Ou5f9zQT/M2IdeSclsKWsXrSFlOcnqc+F2TA= github.com/containers/psgo v1.3.1/go.mod h1:LLiRMmxZ6FWP4bB/fOUu6kDT+4okk/ZCeeykqh0O5Ns= +github.com/containers/psgo v1.3.2 h1:jYfppPih3S/j2Yi5O14AXjd8GfCx1ph9L3YsoK3adko= +github.com/containers/psgo v1.3.2/go.mod h1:ENXXLQ5E1At4K0EUsGogXBJi/C28gwqkONWeLPI9fJ8= +github.com/containers/psgo v1.4.0 h1:D8B4fZCCZhYgc8hDyMPCiShOinmOB1TP1qe46sSC19k= +github.com/containers/psgo v1.4.0/go.mod h1:ENXXLQ5E1At4K0EUsGogXBJi/C28gwqkONWeLPI9fJ8= github.com/containers/storage v1.13.1/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA= github.com/containers/storage v1.13.2/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA= github.com/containers/storage v1.13.4 h1:j0bBaJDKbUHtAW1MXPFnwXJtqcH+foWeuXK1YaBV5GA= github.com/containers/storage v1.13.4/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA= +github.com/containers/storage v1.13.5 h1:/SUzGeOP2HDijpF7Yur21Ch6WTZC1BNeZF917CWcp5c= +github.com/containers/storage v1.13.5/go.mod h1:HELz8Sn+UVbPaUZMI8RvIG9doD4y4z6Gtg4k7xdd2ZY= github.com/coredns/corefile-migration v1.0.4/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -209,6 +231,10 @@ github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cri-o/cri-o v1.9.0-beta.2.0.20191003162030-4775e1c05c26 h1:CncYqSfI3xVfedl3cQCCHKEx+nU4QGjcSVk3W8lyFhE= github.com/cri-o/cri-o v1.9.0-beta.2.0.20191003162030-4775e1c05c26/go.mod h1:Zyf/AWxspq/FDHRYERdpXYUvLQPiC5Vz1lVNl/TpuBY= +github.com/cri-o/cri-o v1.16.1 h1:DX9k1xRqoJsf85cXFzCDv4JDp4II9q7K+vUHEjk9djM= +github.com/cri-o/cri-o v1.16.1/go.mod h1:NK/V2b91qielPYnxaLBeCrq0nns9J+jT8tzGyW+rFs0= +github.com/cri-o/cri-o v1.16.2 h1:yd238JWjr3pz3blXGLq13AIXvP7JMPTaGb1VLY5wVuA= +github.com/cri-o/cri-o v1.16.2/go.mod h1:LPmiJ1xzXhTVxm12+SzrMRSfX1EXHUd7OLEvauCEo4s= github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca h1:CJstDqYy9ClWuPcDHMTCAiUS+ckekluYetGR2iYYWuo= github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca/go.mod h1:BO0al9TKber3XUTucLzKgoG5sq8qiOB41H7zSdfw6r8= github.com/cyphar/filepath-securejoin v0.2.1/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= @@ -230,15 +256,20 @@ github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65/go.mod h1:J2gT github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.0.0-20171019062838-86f080cff091/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v0.0.0-20180522102801-da99009bbb11/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20180827131323-0c5f8d2b9b23/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190309235953-33c3200e0d16/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190410184157-6d18c6a06295 h1:s3aSftc5noimmnhBGn38zd12TbMnhLqHAmyWZcpk+uE= github.com/docker/docker v0.7.3-0.20190410184157-6d18c6a06295/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce h1:H3csZuxZESJeeEiOxq4YXPNmLFbjl7u2qVBrAAGX/sA= +github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.2/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.0.0-20180212134524-7beb39f0b969/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -292,8 +323,11 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsouza/go-dockerclient v1.3.0/go.mod h1:IN9UPc4/w7cXiARH2Yg99XxUHbAM+6rAi9hzBVbkWRU= github.com/fsouza/go-dockerclient v1.4.1 h1:W7wuJ3IB48WYZv/UBk9dCTIb9oX805+L9KIm65HcUYs= github.com/fsouza/go-dockerclient v1.4.1/go.mod h1:PUNHxbowDqRXfRgZqMz1OeGtbWC6VKyZvJ99hDjB0qs= +github.com/fsouza/go-dockerclient v1.5.0 h1:7OtayOe5HnoG+KWMHgyyPymwaodnB2IDYuVfseKyxbA= +github.com/fsouza/go-dockerclient v1.5.0/go.mod h1:AqZZK/zFO3phxYxlTsAaeAMSdQ9mgHuhy+bjN034Qds= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v0.0.0-20161207003320-04f313413ffd/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= @@ -398,6 +432,7 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus v0.0.0-20190623212516-8a1682060722 h1:NNKZiuNXd6lpZRyoFM/uhssj5W9Ps1DbhGHxT49Pm9I= github.com/godbus/dbus v0.0.0-20190623212516-8a1682060722/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/gogo/protobuf v0.0.0-20170815085658-fcdc5011193f/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= @@ -497,6 +532,7 @@ github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEo github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v0.0.0-20170217192616-94e7d24fd285/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -508,6 +544,7 @@ github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3 h1:J github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3 h1:iwp+5/UAyzQSFgQ4uR2sni99sJ8Eo9DEacKWM5pekIg= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -579,6 +616,8 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.7.2 h1:liMOoeIvFpr9kEvalrZ7VVBA4wGf7zfOgwBjzz/5g2Y= github.com/klauspost/compress v1.7.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.8.1 h1:oygt2ychZFHOB6M9gUgajzgKrwRgHbGC77NwA4COVgI= +github.com/klauspost/compress v1.8.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= @@ -634,6 +673,8 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.5 h1:JhhFTIOslh5ZsPrpa3Wdg8bF0WI3b44EMblmU9wIsXc= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6 h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3ZkeUUI= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -663,6 +704,8 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= @@ -694,6 +737,7 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.2 h1:uqH7bpe+ERSiDa34FDOF7RikN6RzXgduUF8yarlZp94= github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c h1:eSfnfIuwhxZyULg1NNuZycJcYkjYVGYe7FczwQReM6U= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -708,6 +752,8 @@ github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2i github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU= +github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190425234816-dae70e8efea4/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -736,6 +782,8 @@ github.com/openshift/cluster-etcd-operator v0.0.0-alpha.0.0.20191025163650-5854b github.com/openshift/cluster-etcd-operator v0.0.0-alpha.0.0.20191025163650-5854b5c48ce4/go.mod h1:vcBAUefK8pQmTPQ3jlCemORXhnRnq3aTsfOSVeaWliY= github.com/openshift/imagebuilder v1.1.0 h1:oT704SkwMEzmIMU/+Uv1Wmvt+p10q3v2WuYMeFI18c4= github.com/openshift/imagebuilder v1.1.0/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= +github.com/openshift/imagebuilder v1.1.1 h1:KAUR31p8UBJdfVO42azWgb+LeMAed2zaKQ19e0C0X2I= +github.com/openshift/imagebuilder v1.1.1/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= github.com/openshift/library-go v0.0.0-20190619114638-6b58b672ee58/go.mod h1:NBttNjZpWwup/nthuLbPAPSYC8Qyo+BBK5bCtFoyYjo= github.com/openshift/library-go v0.0.0-20191003152030-97c62d8a2901 h1:UQkY3zDJG4MMVzS7VFsyACxs/haMJ2aHNR/jXzWhScs= github.com/openshift/library-go v0.0.0-20191003152030-97c62d8a2901/go.mod h1:NBttNjZpWwup/nthuLbPAPSYC8Qyo+BBK5bCtFoyYjo= @@ -748,6 +796,7 @@ github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOy github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= @@ -764,6 +813,8 @@ github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prY github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 h1:gGBSHPOU7g8YjTbhwn+lvFm2VDEhhA+PwDIlstkgSxE= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9 h1:kyf9snWXHvQc+yxE9imhdI8YAm4oKeZISlaAR+x73zs= +github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/prometheus/client_golang v0.0.0-20181207105117-505eaef01726/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -857,6 +908,7 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= @@ -882,7 +934,9 @@ github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec h1:AmoEvWAO3nDx1 github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.19.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.5-pre/go.mod h1:FwP/aQVg39TXzItUBMwnWp9T9gPQnXw4Poh4/oBQZ/0= @@ -922,10 +976,12 @@ github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f/go.mod h1:ZjcWmF github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/weppos/publicsuffix-go v0.4.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20190816131739-be0936907f66/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xeipuuv/gojsonschema v1.1.0 h1:ngVtJC9TY/lg0AA/1k48FYhBrhRoFlEmWzsehpNAaZg= github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -979,6 +1035,7 @@ golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1079,6 +1136,7 @@ golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1087,6 +1145,7 @@ golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdO golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190919044723-0c1ff786ef13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002091554-b397fe3ad8ed h1:5TJcLJn2a55mJjzYk0yOoqN8X1OdvBDUnaZaKKyQtkY= golang.org/x/sys v0.0.0-20191002091554-b397fe3ad8ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1100,6 +1159,8 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 h1:xQwXv67TxFo9nC1GJFyab5eq/5B590r6RlnL/G8Sz7w= +golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1169,14 +1230,18 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= diff --git a/pkg/controller/container-runtime-config/helpers.go b/pkg/controller/container-runtime-config/helpers.go index 81dfad649b..37827cd47e 100644 --- a/pkg/controller/container-runtime-config/helpers.go +++ b/pkg/controller/container-runtime-config/helpers.go @@ -55,6 +55,7 @@ type tomlConfigCRIO struct { Runtime struct{ crioconfig.RuntimeConfig } `toml:"runtime"` Image struct{ crioconfig.ImageConfig } `toml:"image"` Network struct{ crioconfig.NetworkConfig } `toml:"network"` + Metrics struct{ crioconfig.MetricsConfig } `toml:"metrics"` } `toml:"crio"` } @@ -194,7 +195,7 @@ func updateCRIOConfig(data []byte, internal *mcfgv1.ContainerRuntimeConfiguratio if internal.PidsLimit > 0 { tomlConf.Crio.Runtime.PidsLimit = internal.PidsLimit } - if internal.LogSizeMax != (resource.Quantity{}) { + if internal.LogSizeMax.Value() != 0 { tomlConf.Crio.Runtime.LogSizeMax = internal.LogSizeMax.Value() } if internal.LogLevel != "" { diff --git a/templates/master/01-master-container-runtime/_base/files/crio.yaml b/templates/master/01-master-container-runtime/_base/files/crio.yaml index c9f105d178..84d6e9f221 100644 --- a/templates/master/01-master-container-runtime/_base/files/crio.yaml +++ b/templates/master/01-master-container-runtime/_base/files/crio.yaml @@ -3,54 +3,23 @@ mode: 0644 path: "/etc/crio/crio.conf" contents: inline: | - # The CRI-O configuration file specifies all of the available configuration - # options and command-line flags for the crio(8) OCI Kubernetes Container Runtime - # daemon, but in a TOML format that can be more easily modified and versioned. - # - # Please refer to crio.conf(5) for details of all configuration options. - - # CRI-O supports partial configuration reload during runtime, which can be - # done by sending SIGHUP to the running process. Currently supported options - # are explicitly mentioned with: 'This option supports live configuration - # reload'. - - # CRI-O reads its storage defaults from the containers-storage.conf(5) file - # located at /etc/containers/storage.conf. Modify this storage configuration if - # you want to change the system's defaults. If you want to modify storage just - # for CRI-O, you can change the storage configuration options here. [crio] - # Path to the "root directory". CRI-O stores all of its data, including - # containers images, in this directory. - #root = "/var/lib/containers/storage" - - # Path to the "run directory". CRI-O stores all of its state in this directory. - #runroot = "/run/user/1000" - - # Storage driver used to manage the storage of images and containers. Please - # refer to containers-storage.conf(5) to see all available storage drivers. - #storage_driver = "overlay" - - # List to pass options to the storage driver. Please refer to - # containers-storage.conf(5) to see all available storage options. - #storage_option = [ - #] - # The default log directory where all logs will go unless directly specified by # the kubelet. The log directory specified must be an absolute directory. - # log_dir = "/var/log/crio/pods" + log_dir = "/var/log/crio/pods" # Location for CRI-O to lay down the version file - # version_file = "/var/lib/crio/version" + version_file = "/var/lib/crio/version" # The crio.api table contains settings for the kubelet/gRPC interface. [crio.api] # Path to AF_LOCAL socket on which CRI-O will listen. - # listen = "/var/run/crio/crio.sock" + listen = "/var/run/crio/crio.sock" # Host IP considered as the primary IP to use by CRI-O for things such as host network IP. - # host_ip = "" + host_ip = "" # IP address on which the stream server will listen. stream_address = "" @@ -59,45 +28,38 @@ contents: stream_port = "10010" # Enable encrypted TLS transport of the stream server. - # stream_enable_tls = false + stream_enable_tls = false # Path to the x509 certificate file used to serve the encrypted stream. This # file can change, and CRI-O will automatically pick up the changes within 5 # minutes. - # stream_tls_cert = "" + stream_tls_cert = "" # Path to the key file used to serve the encrypted stream. This file can # change and CRI-O will automatically pick up the changes within 5 minutes. - # stream_tls_key = "" + stream_tls_key = "" # Path to the x509 CA(s) file used to verify and authenticate client # communication with the encrypted stream. This file can change and CRI-O will # automatically pick up the changes within 5 minutes. - # stream_tls_ca = "" + stream_tls_ca = "" # Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 16 * 1024 * 1024. - # grpc_max_send_msg_size = 16777216 + grpc_max_send_msg_size = 16777216 # Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 16 * 1024 * 1024. - # grpc_max_recv_msg_size = 16777216 + grpc_max_recv_msg_size = 16777216 # The crio.runtime table contains settings pertaining to the OCI runtime used # and options for how to set up and manage the OCI runtime. [crio.runtime] - # A list of ulimits to be set in containers by default, specified as - # "=:", for example: - # "nofile=1024:2048" - # If nothing is set here, settings will be inherited from the CRI-O daemon - #default_ulimits = [ - #] - # default_runtime is the _name_ of the OCI runtime to be used as the default. # The name is matched against the runtimes map below. - # default_runtime = "runc" + default_runtime = "runc" # If true, the runtime will not use pivot_root, but instead use MS_MOVE. - # no_pivot = false + no_pivot = false # Path to the conmon binary, used for monitoring the OCI runtime. # Will be searched for using $PATH if empty. @@ -108,17 +70,17 @@ contents: # Environment variable list for the conmon process, used for passing necessary # environment variables to conmon or the runtime. - # conmon_env = [ - # "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - # ] + conmon_env = [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + ] # If true, SELinux will be used for pod separation on the host. - # selinux = true + selinux = true # Path to the seccomp.json profile which is used as the default seccomp profile # for the runtime. If not specified, then the internal default seccomp profile # will be used. - # seccomp_profile = "/etc/crio/seccomp.json" + seccomp_profile = "" # Used to change the name of the default AppArmor profile of CRI-O. The default # profile name is "crio-default-" followed by the version string of CRI-O. @@ -130,106 +92,88 @@ contents: # List of default capabilities for containers. If it is empty or commented out, # only the capabilities defined in the containers json file by the user/kube # will be added. - # default_capabilities = [ - # "CHOWN", - # "DAC_OVERRIDE", - # "FSETID", - # "FOWNER", - # "NET_RAW", - # "SETGID", - # "SETUID", - # "SETPCAP", - # "NET_BIND_SERVICE", - # "SYS_CHROOT", - # "KILL", - # ] + default_capabilities = [ + "CHOWN", + "DAC_OVERRIDE", + "FSETID", + "FOWNER", + "NET_RAW", + "SETGID", + "SETUID", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL", + ] # List of default sysctls. If it is empty or commented out, only the sysctls # defined in the container json file by the user/kube will be added. - # default_sysctls = [ - # ] + default_sysctls = [ + ] # List of additional devices. specified as # "::", for example: "--device=/dev/sdc:/dev/xvdc:rwm". #If it is empty or commented out, only the devices # defined in the container json file by the user/kube will be added. - # additional_devices = [ - # ] + additional_devices = [ + ] # Path to OCI hooks directories for automatically executed hooks. - # Note: the default is just /usr/share/containers/oci/hooks.d, but /usr is immutable in RHCOS - # so we add /etc/containers/oci/hooks.d as well hooks_dir = [ "/etc/containers/oci/hooks.d", ] # List of default mounts for each container. **Deprecated:** this option will # be removed in future versions in favor of default_mounts_file. - # default_mounts = [ - # "/usr/share/rhel/secrets:/run/secrets", - # ] - - # Path to the file specifying the defaults mounts for each container. The - # format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads - # its default mounts from the following two files: - # - # 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the - # override file, where users can either add in their own default mounts, or - # override the default mounts shipped with the package. - # - # 2) /usr/share/containers/mounts.conf: This is the default file read for - # mounts. If you want CRI-O to read from a different, specific mounts file, - # you can change the default_mounts_file. Note, if this is done, CRI-O will - # only add mounts it finds in this file. - # - #default_mounts_file = "" + default_mounts = [ + ] # Maximum number of processes allowed in a container. - # pids_limit = 1024 + pids_limit = 1024 # Maximum sized allowed for the container log file. Negative numbers indicate # that no size limit is imposed. If it is positive, it must be >= 8192 to # match/exceed conmon's read buffer. The file is truncated and re-opened so the # limit is never exceeded. - # log_size_max = -1 + log_size_max = -1 # Whether container output should be logged to journald in addition to the kuberentes log file - # log_to_journald = false + log_to_journald = false # Path to directory in which container exit files are written to by conmon. - # container_exits_dir = "/var/run/crio/exits" + container_exits_dir = "/var/run/crio/exits" # Path to directory for container attach sockets. - # container_attach_socket_dir = "/var/run/crio" + container_attach_socket_dir = "/var/run/crio" # The prefix to use for the source of the bind mounts. - # bind_mount_prefix = "" + bind_mount_prefix = "" # If set to true, all containers will run in read-only mode. - # read_only = false + read_only = false # Changes the verbosity of the logs based on the level it is set to. Options # are fatal, panic, error, warn, info, and debug. This option supports live # configuration reload. - # log_level = "error" + log_level = "error" # The UID mappings for the user namespace of each container. A range is # specified in the form containerUID:HostUID:Size. Multiple ranges must be # separated by comma. - # uid_mappings = "" + uid_mappings = "" # The GID mappings for the user namespace of each container. A range is # specified in the form containerGID:HostGID:Size. Multiple ranges must be # separated by comma. - # gid_mappings = "" + gid_mappings = "" # The minimal amount of time in seconds to wait before issuing a timeout # regarding the proper termination of the container. - # ctr_stop_timeout = 0 + ctr_stop_timeout = 0 # ManageNetworkNSLifecycle determines whether we pin and remove network namespace # and manage its lifecycle. - # manage_network_ns_lifecycle = false + manage_network_ns_lifecycle = false # The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes. # The runtime to use is picked based on the runtime_handler provided by the CRI. @@ -251,29 +195,11 @@ contents: # omitted, an "oci" runtime is assumed. # - runtime_root (optional, string): root directory for storage of containers # state. + [crio.runtime.runtimes.runc] + runtime_path = "" + runtime_type = "oci" + runtime_root = "/run/runc" - - # [crio.runtime.runtimes.runc] - # runtime_path = "" - # runtime_type = "oci" - # runtime_root = "/run/runc" - - - # Kata Containers is an OCI runtime, where containers are run inside lightweight - # VMs. Kata provides additional isolation towards the host, minimizing the host attack - # surface and mitigating the consequences of containers breakout. - - # Kata Containers with the default configured VMM - #[crio.runtime.runtimes.kata-runtime] - - # Kata Containers with the QEMU VMM - #[crio.runtime.runtimes.kata-qemu] - - # Kata Containers with the Firecracker VMM - #[crio.runtime.runtimes.kata-fc] - - # The crio.image table contains settings pertaining to the management of OCI images. - # # CRI-O reads its configured registries defaults from the system wide # containers-registries.conf(5) located in /etc/containers/registries.conf. If # you want to modify just CRI-O, you can change the registries configuration in @@ -282,7 +208,7 @@ contents: [crio.image] # Default transport for pulling images from a remote container storage. - # default_transport = "docker://" + default_transport = "docker://" # The path to a file containing credentials necessary for pulling images from # secure registries. The file is similar to that of /var/lib/kubelet/config.json @@ -298,7 +224,9 @@ contents: pause_image_auth_file = "/var/lib/kubelet/config.json" # The command to run to have a container stay in the paused state. - # This option supports live configuration reload. + # When explicitly set to "", it will fallback to the entrypoint and command + # specified in the pause image. When commented out, it will fallback to the + # default: "/pause". This option supports live configuration reload. pause_command = "/usr/bin/pod" # Path to the file which decides what sort of policy we use when deciding @@ -306,36 +234,19 @@ contents: # this option be used, as the default behavior of using the system-wide default # policy (i.e., /etc/containers/policy.json) is most often preferred. Please # refer to containers-policy.json(5) for more details. - # signature_policy = "" - - # List of registries to skip TLS verification for pulling images. Please - # consider configuring the registries via /etc/containers/registries.conf before - # changing them here. - #insecure_registries = "[]" + signature_policy = "" # Controls how image volumes are handled. The valid values are mkdir, bind and # ignore; the latter will ignore volumes entirely. - # image_volumes = "mkdir" - - # List of registries to be used when pulling an unqualified image (e.g., - # "alpine:latest"). By default, registries is set to "docker.io" for - # compatibility reasons. Depending on your workload and usecase you may add more - # registries (e.g., "quay.io", "registry.fedoraproject.org", - # "registry.opensuse.org", etc.). - #registries = [ - # ] - - + image_volumes = "mkdir" + # The crio.network table containers settings pertaining to the management of # CNI plugins. [crio.network] - # Path to the directory where CNI configuration files are located. - # Note this default is changed from the RPM. network_dir = "/etc/kubernetes/cni/net.d/" # Paths to directories where CNI plugin binaries are located. - # Note this default is changed from the RPM. plugin_dirs = [ "/var/lib/cni/bin", ] diff --git a/templates/worker/01-worker-container-runtime/_base/files/crio.yaml b/templates/worker/01-worker-container-runtime/_base/files/crio.yaml index c9f105d178..84d6e9f221 100644 --- a/templates/worker/01-worker-container-runtime/_base/files/crio.yaml +++ b/templates/worker/01-worker-container-runtime/_base/files/crio.yaml @@ -3,54 +3,23 @@ mode: 0644 path: "/etc/crio/crio.conf" contents: inline: | - # The CRI-O configuration file specifies all of the available configuration - # options and command-line flags for the crio(8) OCI Kubernetes Container Runtime - # daemon, but in a TOML format that can be more easily modified and versioned. - # - # Please refer to crio.conf(5) for details of all configuration options. - - # CRI-O supports partial configuration reload during runtime, which can be - # done by sending SIGHUP to the running process. Currently supported options - # are explicitly mentioned with: 'This option supports live configuration - # reload'. - - # CRI-O reads its storage defaults from the containers-storage.conf(5) file - # located at /etc/containers/storage.conf. Modify this storage configuration if - # you want to change the system's defaults. If you want to modify storage just - # for CRI-O, you can change the storage configuration options here. [crio] - # Path to the "root directory". CRI-O stores all of its data, including - # containers images, in this directory. - #root = "/var/lib/containers/storage" - - # Path to the "run directory". CRI-O stores all of its state in this directory. - #runroot = "/run/user/1000" - - # Storage driver used to manage the storage of images and containers. Please - # refer to containers-storage.conf(5) to see all available storage drivers. - #storage_driver = "overlay" - - # List to pass options to the storage driver. Please refer to - # containers-storage.conf(5) to see all available storage options. - #storage_option = [ - #] - # The default log directory where all logs will go unless directly specified by # the kubelet. The log directory specified must be an absolute directory. - # log_dir = "/var/log/crio/pods" + log_dir = "/var/log/crio/pods" # Location for CRI-O to lay down the version file - # version_file = "/var/lib/crio/version" + version_file = "/var/lib/crio/version" # The crio.api table contains settings for the kubelet/gRPC interface. [crio.api] # Path to AF_LOCAL socket on which CRI-O will listen. - # listen = "/var/run/crio/crio.sock" + listen = "/var/run/crio/crio.sock" # Host IP considered as the primary IP to use by CRI-O for things such as host network IP. - # host_ip = "" + host_ip = "" # IP address on which the stream server will listen. stream_address = "" @@ -59,45 +28,38 @@ contents: stream_port = "10010" # Enable encrypted TLS transport of the stream server. - # stream_enable_tls = false + stream_enable_tls = false # Path to the x509 certificate file used to serve the encrypted stream. This # file can change, and CRI-O will automatically pick up the changes within 5 # minutes. - # stream_tls_cert = "" + stream_tls_cert = "" # Path to the key file used to serve the encrypted stream. This file can # change and CRI-O will automatically pick up the changes within 5 minutes. - # stream_tls_key = "" + stream_tls_key = "" # Path to the x509 CA(s) file used to verify and authenticate client # communication with the encrypted stream. This file can change and CRI-O will # automatically pick up the changes within 5 minutes. - # stream_tls_ca = "" + stream_tls_ca = "" # Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 16 * 1024 * 1024. - # grpc_max_send_msg_size = 16777216 + grpc_max_send_msg_size = 16777216 # Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 16 * 1024 * 1024. - # grpc_max_recv_msg_size = 16777216 + grpc_max_recv_msg_size = 16777216 # The crio.runtime table contains settings pertaining to the OCI runtime used # and options for how to set up and manage the OCI runtime. [crio.runtime] - # A list of ulimits to be set in containers by default, specified as - # "=:", for example: - # "nofile=1024:2048" - # If nothing is set here, settings will be inherited from the CRI-O daemon - #default_ulimits = [ - #] - # default_runtime is the _name_ of the OCI runtime to be used as the default. # The name is matched against the runtimes map below. - # default_runtime = "runc" + default_runtime = "runc" # If true, the runtime will not use pivot_root, but instead use MS_MOVE. - # no_pivot = false + no_pivot = false # Path to the conmon binary, used for monitoring the OCI runtime. # Will be searched for using $PATH if empty. @@ -108,17 +70,17 @@ contents: # Environment variable list for the conmon process, used for passing necessary # environment variables to conmon or the runtime. - # conmon_env = [ - # "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - # ] + conmon_env = [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + ] # If true, SELinux will be used for pod separation on the host. - # selinux = true + selinux = true # Path to the seccomp.json profile which is used as the default seccomp profile # for the runtime. If not specified, then the internal default seccomp profile # will be used. - # seccomp_profile = "/etc/crio/seccomp.json" + seccomp_profile = "" # Used to change the name of the default AppArmor profile of CRI-O. The default # profile name is "crio-default-" followed by the version string of CRI-O. @@ -130,106 +92,88 @@ contents: # List of default capabilities for containers. If it is empty or commented out, # only the capabilities defined in the containers json file by the user/kube # will be added. - # default_capabilities = [ - # "CHOWN", - # "DAC_OVERRIDE", - # "FSETID", - # "FOWNER", - # "NET_RAW", - # "SETGID", - # "SETUID", - # "SETPCAP", - # "NET_BIND_SERVICE", - # "SYS_CHROOT", - # "KILL", - # ] + default_capabilities = [ + "CHOWN", + "DAC_OVERRIDE", + "FSETID", + "FOWNER", + "NET_RAW", + "SETGID", + "SETUID", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL", + ] # List of default sysctls. If it is empty or commented out, only the sysctls # defined in the container json file by the user/kube will be added. - # default_sysctls = [ - # ] + default_sysctls = [ + ] # List of additional devices. specified as # "::", for example: "--device=/dev/sdc:/dev/xvdc:rwm". #If it is empty or commented out, only the devices # defined in the container json file by the user/kube will be added. - # additional_devices = [ - # ] + additional_devices = [ + ] # Path to OCI hooks directories for automatically executed hooks. - # Note: the default is just /usr/share/containers/oci/hooks.d, but /usr is immutable in RHCOS - # so we add /etc/containers/oci/hooks.d as well hooks_dir = [ "/etc/containers/oci/hooks.d", ] # List of default mounts for each container. **Deprecated:** this option will # be removed in future versions in favor of default_mounts_file. - # default_mounts = [ - # "/usr/share/rhel/secrets:/run/secrets", - # ] - - # Path to the file specifying the defaults mounts for each container. The - # format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads - # its default mounts from the following two files: - # - # 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the - # override file, where users can either add in their own default mounts, or - # override the default mounts shipped with the package. - # - # 2) /usr/share/containers/mounts.conf: This is the default file read for - # mounts. If you want CRI-O to read from a different, specific mounts file, - # you can change the default_mounts_file. Note, if this is done, CRI-O will - # only add mounts it finds in this file. - # - #default_mounts_file = "" + default_mounts = [ + ] # Maximum number of processes allowed in a container. - # pids_limit = 1024 + pids_limit = 1024 # Maximum sized allowed for the container log file. Negative numbers indicate # that no size limit is imposed. If it is positive, it must be >= 8192 to # match/exceed conmon's read buffer. The file is truncated and re-opened so the # limit is never exceeded. - # log_size_max = -1 + log_size_max = -1 # Whether container output should be logged to journald in addition to the kuberentes log file - # log_to_journald = false + log_to_journald = false # Path to directory in which container exit files are written to by conmon. - # container_exits_dir = "/var/run/crio/exits" + container_exits_dir = "/var/run/crio/exits" # Path to directory for container attach sockets. - # container_attach_socket_dir = "/var/run/crio" + container_attach_socket_dir = "/var/run/crio" # The prefix to use for the source of the bind mounts. - # bind_mount_prefix = "" + bind_mount_prefix = "" # If set to true, all containers will run in read-only mode. - # read_only = false + read_only = false # Changes the verbosity of the logs based on the level it is set to. Options # are fatal, panic, error, warn, info, and debug. This option supports live # configuration reload. - # log_level = "error" + log_level = "error" # The UID mappings for the user namespace of each container. A range is # specified in the form containerUID:HostUID:Size. Multiple ranges must be # separated by comma. - # uid_mappings = "" + uid_mappings = "" # The GID mappings for the user namespace of each container. A range is # specified in the form containerGID:HostGID:Size. Multiple ranges must be # separated by comma. - # gid_mappings = "" + gid_mappings = "" # The minimal amount of time in seconds to wait before issuing a timeout # regarding the proper termination of the container. - # ctr_stop_timeout = 0 + ctr_stop_timeout = 0 # ManageNetworkNSLifecycle determines whether we pin and remove network namespace # and manage its lifecycle. - # manage_network_ns_lifecycle = false + manage_network_ns_lifecycle = false # The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes. # The runtime to use is picked based on the runtime_handler provided by the CRI. @@ -251,29 +195,11 @@ contents: # omitted, an "oci" runtime is assumed. # - runtime_root (optional, string): root directory for storage of containers # state. + [crio.runtime.runtimes.runc] + runtime_path = "" + runtime_type = "oci" + runtime_root = "/run/runc" - - # [crio.runtime.runtimes.runc] - # runtime_path = "" - # runtime_type = "oci" - # runtime_root = "/run/runc" - - - # Kata Containers is an OCI runtime, where containers are run inside lightweight - # VMs. Kata provides additional isolation towards the host, minimizing the host attack - # surface and mitigating the consequences of containers breakout. - - # Kata Containers with the default configured VMM - #[crio.runtime.runtimes.kata-runtime] - - # Kata Containers with the QEMU VMM - #[crio.runtime.runtimes.kata-qemu] - - # Kata Containers with the Firecracker VMM - #[crio.runtime.runtimes.kata-fc] - - # The crio.image table contains settings pertaining to the management of OCI images. - # # CRI-O reads its configured registries defaults from the system wide # containers-registries.conf(5) located in /etc/containers/registries.conf. If # you want to modify just CRI-O, you can change the registries configuration in @@ -282,7 +208,7 @@ contents: [crio.image] # Default transport for pulling images from a remote container storage. - # default_transport = "docker://" + default_transport = "docker://" # The path to a file containing credentials necessary for pulling images from # secure registries. The file is similar to that of /var/lib/kubelet/config.json @@ -298,7 +224,9 @@ contents: pause_image_auth_file = "/var/lib/kubelet/config.json" # The command to run to have a container stay in the paused state. - # This option supports live configuration reload. + # When explicitly set to "", it will fallback to the entrypoint and command + # specified in the pause image. When commented out, it will fallback to the + # default: "/pause". This option supports live configuration reload. pause_command = "/usr/bin/pod" # Path to the file which decides what sort of policy we use when deciding @@ -306,36 +234,19 @@ contents: # this option be used, as the default behavior of using the system-wide default # policy (i.e., /etc/containers/policy.json) is most often preferred. Please # refer to containers-policy.json(5) for more details. - # signature_policy = "" - - # List of registries to skip TLS verification for pulling images. Please - # consider configuring the registries via /etc/containers/registries.conf before - # changing them here. - #insecure_registries = "[]" + signature_policy = "" # Controls how image volumes are handled. The valid values are mkdir, bind and # ignore; the latter will ignore volumes entirely. - # image_volumes = "mkdir" - - # List of registries to be used when pulling an unqualified image (e.g., - # "alpine:latest"). By default, registries is set to "docker.io" for - # compatibility reasons. Depending on your workload and usecase you may add more - # registries (e.g., "quay.io", "registry.fedoraproject.org", - # "registry.opensuse.org", etc.). - #registries = [ - # ] - - + image_volumes = "mkdir" + # The crio.network table containers settings pertaining to the management of # CNI plugins. [crio.network] - # Path to the directory where CNI configuration files are located. - # Note this default is changed from the RPM. network_dir = "/etc/kubernetes/cni/net.d/" # Paths to directories where CNI plugin binaries are located. - # Note this default is changed from the RPM. plugin_dirs = [ "/var/lib/cni/bin", ] diff --git a/test/e2e/ctrcfg_test.go b/test/e2e/ctrcfg_test.go new file mode 100644 index 0000000000..866a72633e --- /dev/null +++ b/test/e2e/ctrcfg_test.go @@ -0,0 +1,176 @@ +package e2e_test + +import ( + "fmt" + "regexp" + "testing" + "time" + + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + "github.com/openshift/machine-config-operator/test/e2e/framework" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apimachinery/pkg/util/wait" +) + +func TestContainerRuntimeConfigPidsLimit(t *testing.T) { + runTestWithCtrcfg(t, "pids-limit", `pids_limit = (\S+)`, "12345", &mcfgv1.ContainerRuntimeConfiguration{ + PidsLimit: 12345, + }) +} + +// runTestWithCtrcfg creates a ctrcfg and checks whether the expected updates were applied, then deletes the ctrcfg and makes +// sure the node rolled back as expected +// testName is a string to identify the objects created (MCP, MC, ctrcfg) +// regex key is the searching critera in the crio.conf. It is expected that a single field is in a capture group, and this field +// should equal expectedConfValue upon update +// cfg is the ctrcfg config to update to and rollback from +func runTestWithCtrcfg(t *testing.T, testName, regexKey, expectedConfValue string, cfg *mcfgv1.ContainerRuntimeConfiguration) { + cs := framework.NewClientSet("") + matchValue := fmt.Sprintf("%s-%s", testName, uuid.NewUUID()) + ctrcfgName := fmt.Sprintf("ctrcfg-%s", matchValue) + poolName := fmt.Sprintf("node-%s", matchValue) + mcName := fmt.Sprintf("mc-%s", matchValue) + + // instead of a bunch of individual defers, we can run through all of them + // in a single one + cleanupFuncs := make([]func(), 0) + defer func() { + for _, f := range cleanupFuncs { + f() + } + }() + + // label one node from the pool to specify which worker to update + cleanupFuncs = append(cleanupFuncs, labelRandomNodeFromPool(t, cs, "worker", mcpNameToRole(poolName))) + // upon cleaning up, we need to wait for the pool to reconcile after unlabelling + cleanupFuncs = append(cleanupFuncs, func() { + // the sleep allows the unlabelling to take effect + time.Sleep(time.Second * 5) + // wait until worker pool updates the node we labelled before we delete the test specific mc and mcp + if err := waitForPoolComplete(t, cs, "worker", getMcName(t, cs, "worker")); err != nil { + t.Logf("failed to wait for pool %v", err) + } + }) + + // cache the old configuration value to check against later + node := getSingleNodeByRole(t, cs, poolName) + oldConfValue := getValueFromCrioConfig(t, cs, node, regexKey) + if oldConfValue == expectedConfValue { + t.Logf("default configuration value %s same as value being tested against. Consider updating the test", oldConfValue) + return + } + + // create an MCP to match the node we tagged + cleanupFuncs = append(cleanupFuncs, createMCP(t, cs, poolName)) + + // create old mc to have something to verify we successfully rolled back + oldMCConfig := createMC(mcName, poolName) + _, err := cs.MachineConfigs().Create(oldMCConfig) + require.Nil(t, err) + cleanupFuncs = append(cleanupFuncs, func() { + err := cs.MachineConfigs().Delete(oldMCConfig.Name, &metav1.DeleteOptions{}) + require.Nil(t, err, "machine config deletion failed") + }) + waitForConfigAndPoolComplete(t, cs, poolName, oldMCConfig.Name) + + // create our ctrcfg and attach it to our created node pool + cleanupCtrcfgFunc := createCtrcfgWithConfig(t, cs, ctrcfgName, poolName, cfg) + + // wait for the ctrcfg to show up + ctrcfgMCName, err := getMCFromCtrcfg(t, cs, ctrcfgName) + require.Nil(t, err, "failed to render machine config from container runtime config") + + // ensure ctrcfg update rolls out to the pool + waitForConfigAndPoolComplete(t, cs, poolName, ctrcfgMCName) + + // verify value was changed + newConfValue := getValueFromCrioConfig(t, cs, node, regexKey) + require.Equal(t, newConfValue, expectedConfValue, "value in crio.conf not updated as expected") + + // cleanup ctrcfg and make sure it doesn't error + err = cleanupCtrcfgFunc() + require.Nil(t, err) + + t.Logf("Deleted ContainerRuntimeConfig %s", ctrcfgName) + // there's a weird race where we observe the pool is updated when in reality + // that update is from before. Sleeping allows a new update cycle to start + time.Sleep(time.Second * 5) + + // ensure config rolls back as expected + waitForConfigAndPoolComplete(t, cs, poolName, oldMCConfig.Name) + + restoredConfValue := getValueFromCrioConfig(t, cs, node, regexKey) + require.Equal(t, restoredConfValue, oldConfValue, "ctrcfg deletion didn't cause node to roll back config") +} + +// createCtrcfgWithConfig takes a config spec and creates a ContainerRuntimeConfig object +// to use this ctrcfg, create a pool label key +// this function assumes there is a mcp with label 'key=' +func createCtrcfgWithConfig(t *testing.T, cs *framework.ClientSet, name, key string, config *mcfgv1.ContainerRuntimeConfiguration) func() error { + ctrcfg := &mcfgv1.ContainerRuntimeConfig{} + ctrcfg.ObjectMeta = metav1.ObjectMeta{ + Name: name, + } + spec := mcfgv1.ContainerRuntimeConfigSpec{ + MachineConfigPoolSelector: &metav1.LabelSelector{ + MatchLabels: make(map[string]string), + }, + ContainerRuntimeConfig: config, + } + spec.MachineConfigPoolSelector.MatchLabels[key] = "" + ctrcfg.Spec = spec + + _, err := cs.ContainerRuntimeConfigs().Create(ctrcfg) + require.Nil(t, err) + return func() error { + return cs.ContainerRuntimeConfigs().Delete(name, &metav1.DeleteOptions{}) + } +} + +// getMCFromCtrcfg returns a rendered machine config that was generated from the ctrcfg ctrcfgName +func getMCFromCtrcfg(t *testing.T, cs *framework.ClientSet, ctrcfgName string) (string, error) { + var mcName string + + // get the machine config created when we deploy the ctrcfg + if err := wait.PollImmediate(2*time.Second, 5*time.Minute, func() (bool, error) { + mcs, err := cs.MachineConfigs().List(metav1.ListOptions{}) + if err != nil { + return false, err + } + for _, mc := range mcs.Items { + ownerRefs := mc.GetOwnerReferences() + for _, ownerRef := range ownerRefs { + // TODO can't find anywhere this value is defined publically + if ownerRef.Kind == "ContainerRuntimeConfig" && ownerRef.Name == ctrcfgName { + mcName = mc.Name + return true, nil + } + } + } + return false, nil + }); err != nil { + return "", errors.Wrapf(err, "can't find machine config created by ctrcfg %s", ctrcfgName) + } + return mcName, nil +} + +// getValueFromCrioConfig jumps onto the node and gets the crio config. It then uses the regexKey to +// find the value that is being searched for +// regexKey is expected to be in the form `key = (\S+)` to search for the value of key +func getValueFromCrioConfig(t *testing.T, cs *framework.ClientSet, node corev1.Node, regexKey string) string { + // get the contents of the crio.conf on nodeName + out := execCmdOnNode(t, cs, node, "cat", "/rootfs/etc/crio/crio.conf") + + // search based on the regex key. The output should have two members: + // one with the entire line `value = key` and one with just the key, in that order + re := regexp.MustCompile(regexKey) + matches := re.FindStringSubmatch(string(out)) + require.Len(t, matches, 2) + + require.NotEmpty(t, matches[1], "regex %s attempted on crio.conf of node %s came back empty", node.Name, regexKey) + return matches[1] +} diff --git a/test/e2e/mcd_test.go b/test/e2e/mcd_test.go index 1832f030c9..eefef92502 100644 --- a/test/e2e/mcd_test.go +++ b/test/e2e/mcd_test.go @@ -2,7 +2,6 @@ package e2e_test import ( "fmt" - "os/exec" "strings" "testing" "time" @@ -12,7 +11,6 @@ import ( ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/pkg/daemon/constants" "github.com/openshift/machine-config-operator/test/e2e/framework" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -73,13 +71,8 @@ func createIgnFile(path, content, fs string, mode int) igntypes.File { } func createMCToAddFileForRole(name, role, filename, data, fs string) *mcfgv1.MachineConfig { - // create a dummy MC - mcadd := &mcfgv1.MachineConfig{} - mcadd.ObjectMeta = metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s", name, uuid.NewUUID()), - // TODO(runcom): hardcoded to workers for safety - Labels: mcLabelForRole(role), - } + mcadd := createMC(fmt.Sprintf("%s-%s", name, uuid.NewUUID()), role) + ignConfig := ctrlcommon.NewIgnConfig() ignFile := createIgnFile(filename, "data:,"+data, fs, 420) ignConfig.Storage.Files = append(ignConfig.Storage.Files, ignFile) @@ -91,53 +84,6 @@ func createMCToAddFile(name, filename, data, fs string) *mcfgv1.MachineConfig { return createMCToAddFileForRole(name, "worker", filename, data, fs) } -// waitForRenderedConfig polls a MachineConfigPool until it has -// included the given mcName in its config, and returns the new -// rendered config name. -func waitForRenderedConfig(t *testing.T, cs *framework.ClientSet, pool, mcName string) (string, error) { - var renderedConfig string - startTime := time.Now() - if err := wait.PollImmediate(2*time.Second, 5*time.Minute, func() (bool, error) { - mcp, err := cs.MachineConfigPools().Get(pool, metav1.GetOptions{}) - if err != nil { - return false, err - } - for _, mc := range mcp.Spec.Configuration.Source { - if mc.Name == mcName { - renderedConfig = mcp.Spec.Configuration.Name - return true, nil - } - } - return false, nil - }); err != nil { - return "", errors.Wrapf(err, "machine config %s hasn't been picked by pool %s", mcName, pool) - } - t.Logf("Pool %s has rendered config %s with %s (waited %v)", pool, mcName, renderedConfig, time.Since(startTime)) - return renderedConfig, nil -} - -// waitForPoolComplete polls a pool until it has completed an update to target -func waitForPoolComplete(t *testing.T, cs *framework.ClientSet, pool, target string) error { - startTime := time.Now() - if err := wait.Poll(2*time.Second, 20*time.Minute, func() (bool, error) { - mcp, err := cs.MachineConfigPools().Get(pool, metav1.GetOptions{}) - if err != nil { - return false, err - } - if mcp.Status.Configuration.Name != target { - return false, nil - } - if mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolUpdated) { - return true, nil - } - return false, nil - }); err != nil { - return errors.Wrapf(err, "pool %s didn't report updated to %s", pool, target) - } - t.Logf("Pool %s has completed %s (waited %v)", pool, target, time.Since(startTime)) - return nil -} - func TestMCDeployed(t *testing.T) { cs := framework.NewClientSet("") @@ -223,18 +169,11 @@ func TestUpdateSSH(t *testing.T) { for _, node := range nodes { assert.Equal(t, node.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) assert.Equal(t, node.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) - mcd, err := mcdForNode(cs, &node) - require.Nil(t, err) - mcdName := mcd.ObjectMeta.Name // now rsh into that daemon and grep the authorized key file to check if 1234_test was written // must do both commands in same shell, combine commands into one exec.Command() - found, err := exec.Command("oc", "rsh", "-n", "openshift-machine-config-operator", mcdName, - "grep", "1234_test", "/rootfs/home/core/.ssh/authorized_keys").CombinedOutput() - if err != nil { - t.Fatalf("unable to read authorized_keys on daemon: %s got: %s got err: %v", mcdName, found, err) - } - if !strings.Contains(string(found), "1234_test") { + found := execCmdOnNode(t, cs, node, "grep", "1234_test", "/rootfs/home/core/.ssh/authorized_keys") + if !strings.Contains(found, "1234_test") { t.Fatalf("updated ssh keys not found in authorized_keys, got %s", found) } t.Logf("Node %s has SSH key", node.Name) @@ -267,13 +206,7 @@ func TestKernelArguments(t *testing.T) { for _, node := range nodes { assert.Equal(t, node.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) assert.Equal(t, node.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) - mcd, err := mcdForNode(cs, &node) - require.Nil(t, err) - mcdName := mcd.ObjectMeta.Name - kargsBytes, err := exec.Command("oc", "rsh", "-n", "openshift-machine-config-operator", mcdName, - "cat", "/rootfs/proc/cmdline").CombinedOutput() - require.Nil(t, err) - kargs := string(kargsBytes) + kargs := execCmdOnNode(t, cs, node, "cat", "/rootfs/proc/cmdline") for _, v := range kargsMC.Spec.KernelArguments { if !strings.Contains(kargs, v) { t.Fatalf("Missing '%s' in kargs", v) @@ -314,14 +247,8 @@ func TestKernelType(t *testing.T) { for _, node := range nodes { assert.Equal(t, node.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) assert.Equal(t, node.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) - mcd, err := mcdForNode(cs, &node) - require.Nil(t, err) - mcdName := mcd.ObjectMeta.Name - // Worker node should have RT kernel - cmd, err := exec.Command("oc", "rsh", "-n", "openshift-machine-config-operator", mcdName, - "uname", "-a").CombinedOutput() - require.Nil(t, err) - kernelInfo := string(cmd) + + kernelInfo := execCmdOnNode(t, cs, node, "uname", "-a") if !strings.Contains(kernelInfo, "PREEMPT RT") { t.Fatalf("Node %s doesn't have expected kernel", node.Name) } @@ -346,14 +273,7 @@ func TestKernelType(t *testing.T) { for _, node := range nodes { assert.Equal(t, node.Annotations[constants.CurrentMachineConfigAnnotationKey], workerOldMC) assert.Equal(t, node.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) - mcd, err := mcdForNode(cs, &node) - require.Nil(t, err) - mcdName := mcd.ObjectMeta.Name - // Worker node should now have default kernel - cmd, err := exec.Command("oc", "rsh", "-n", "openshift-machine-config-operator", mcdName, - "uname", "-a").CombinedOutput() - require.Nil(t, err) - kernelInfo := string(cmd) + kernelInfo := execCmdOnNode(t, cs, node, "uname", "-a") if strings.Contains(kernelInfo, "PREEMPT RT") { t.Fatalf("Node %s did not rollback successfully", node.Name) } @@ -362,17 +282,6 @@ func TestKernelType(t *testing.T) { } -func getNodesByRole(cs *framework.ClientSet, role string) ([]corev1.Node, error) { - listOptions := metav1.ListOptions{ - LabelSelector: labels.SelectorFromSet(labels.Set{fmt.Sprintf("node-role.kubernetes.io/%s", role): ""}).String(), - } - nodes, err := cs.Nodes().List(listOptions) - if err != nil { - return nil, err - } - return nodes.Items, nil -} - func TestPoolDegradedOnFailToRender(t *testing.T) { cs := framework.NewClientSet("") @@ -424,22 +333,17 @@ func TestReconcileAfterBadMC(t *testing.T) { mcadd := createMCToAddFile("add-a-file", "/etc/mytestconfs", "test", "root") mcadd.Spec.Config.Networkd = igntypes.Networkd{ Units: []igntypes.Networkdunit{ - igntypes.Networkdunit{ + { Name: "test.network", Contents: "test contents", }, }, } - // grab the initial machineconfig used by the worker pool - // this MC is gonna be the one which is going to be reapplied once the bad MC is deleted - // and we need it for the final check - mcp, err := cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) - require.Nil(t, err) - workerOldMc := mcp.Status.Configuration.Name + workerOldMc := getMcName(t, cs, "worker") // create the dummy MC now - _, err = cs.MachineConfigs().Create(mcadd) + _, err := cs.MachineConfigs().Create(mcadd) if err != nil { t.Errorf("failed to create machine config %v", err) } @@ -497,7 +401,7 @@ func TestReconcileAfterBadMC(t *testing.T) { if err != nil { return false, err } - mcp, err = cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) + mcp, err := cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) if err != nil { return false, err } @@ -526,14 +430,10 @@ func TestDontDeleteRPMFiles(t *testing.T) { mcHostFile := createMCToAddFile("modify-host-file", "/etc/motd", "mco-test", "root") - // grab the initial machineconfig used by the worker pool - // this MC is gonna be the one which is going to be reapplied once the previous MC is deleted - mcp, err := cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) - require.Nil(t, err) - workerOldMc := mcp.Status.Configuration.Name + workerOldMc := getMcName(t, cs, "worker") // create the dummy MC now - _, err = cs.MachineConfigs().Create(mcHostFile) + _, err := cs.MachineConfigs().Create(mcHostFile) if err != nil { t.Errorf("failed to create machine config %v", err) } @@ -562,16 +462,9 @@ func TestDontDeleteRPMFiles(t *testing.T) { for _, node := range nodes { assert.Equal(t, node.Annotations[constants.CurrentMachineConfigAnnotationKey], workerOldMc) assert.Equal(t, node.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) - mcd, err := mcdForNode(cs, &node) - require.Nil(t, err) - mcdName := mcd.ObjectMeta.Name - found, err := exec.Command("oc", "rsh", "-n", "openshift-machine-config-operator", mcdName, - "cat", "/rootfs/etc/motd").CombinedOutput() - if err != nil { - t.Fatalf("unable to read test file on daemon: %s got: %s got err: %v", mcdName, found, err) - } - if strings.Contains(string(found), "mco-test") { + found := execCmdOnNode(t, cs, node, "cat", "/rootfs/etc/motd") + if strings.Contains(found, "mco-test") { t.Fatalf("updated file doesn't contain expected data, got %s", found) } } @@ -580,60 +473,29 @@ func TestDontDeleteRPMFiles(t *testing.T) { func TestCustomPool(t *testing.T) { cs := framework.NewClientSet("") - nodes, err := getNodesByRole(cs, "worker") - require.Nil(t, err) - require.NotEmpty(t, nodes) - infraNode := nodes[0] - out, err := exec.Command("oc", "label", "node", infraNode.Name, "node-role.kubernetes.io/infra=").CombinedOutput() - require.Nil(t, err, "unable to label worker node %s with infra: %s", infraNode.Name, string(out)) - - infraMCP := &mcfgv1.MachineConfigPool{} - infraMCP.Name = "infra" - nodeSelector := metav1.LabelSelector{} - infraMCP.Spec.NodeSelector = &nodeSelector - infraMCP.Spec.NodeSelector.MatchLabels = make(map[string]string) - infraMCP.Spec.NodeSelector.MatchLabels["node-role.kubernetes.io/infra"] = "" - mcSelector := metav1.LabelSelector{} - infraMCP.Spec.MachineConfigSelector = &mcSelector - infraMCP.Spec.MachineConfigSelector.MatchExpressions = []metav1.LabelSelectorRequirement{ - metav1.LabelSelectorRequirement{ - Key: "machineconfiguration.openshift.io/role", - Operator: metav1.LabelSelectorOpIn, - Values: []string{"worker", "infra"}, - }, - } - _, err = cs.MachineConfigPools().Create(infraMCP) - require.Nil(t, err) + unlabelFunc := labelRandomNodeFromPool(t, cs, "worker", "node-role.kubernetes.io/infra") + + createMCP(t, cs, "infra") infraMC := createMCToAddFileForRole("infra-host-file", "infra", "/etc/mco-custom-pool", "mco-custom-pool", "root") - _, err = cs.MachineConfigs().Create(infraMC) + _, err := cs.MachineConfigs().Create(infraMC) require.Nil(t, err) renderedConfig, err := waitForRenderedConfig(t, cs, "infra", infraMC.Name) require.Nil(t, err) err = waitForPoolComplete(t, cs, "infra", renderedConfig) require.Nil(t, err) - nodes, err = getNodesByRole(cs, "infra") - require.Nil(t, err) - require.Len(t, nodes, 1) + infraNode := getSingleNodeByRole(t, cs, "infra") - for _, node := range nodes { - assert.Equal(t, node.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) - assert.Equal(t, node.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) - mcd, err := mcdForNode(cs, &node) - require.Nil(t, err) - mcdName := mcd.ObjectMeta.Name - out, err := exec.Command("oc", "rsh", "-n", "openshift-machine-config-operator", "-c", "machine-config-daemon", mcdName, - "cat", "/rootfs/etc/mco-custom-pool").CombinedOutput() - require.Nil(t, err, "failed to cat test file: %s", string(out)) - if string(out) != "mco-custom-pool" { - t.Fatalf("Unexpected infra MC content on node %s: %s", node.Name, out) - } - t.Logf("Node %s has expected infra MC content", node.Name) + assert.Equal(t, infraNode.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) + assert.Equal(t, infraNode.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) + out := execCmdOnNode(t, cs, infraNode, "cat", "/rootfs/etc/mco-custom-pool") + if !strings.Contains(out, "mco-custom-pool") { + t.Fatalf("Unexpected infra MC content on node %s: %s", infraNode.Name, out) } + t.Logf("Node %s has expected infra MC content", infraNode.Name) - out, err = exec.Command("oc", "label", "node", infraNode.Name, "node-role.kubernetes.io/infra-").CombinedOutput() - require.Nil(t, err, "unable to remove infra label from node %s: %s", infraNode.Name, string(out)) + unlabelFunc() workerMCP, err := cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) require.Nil(t, err) diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go new file mode 100644 index 0000000000..cf274770f2 --- /dev/null +++ b/test/e2e/utils_test.go @@ -0,0 +1,182 @@ +package e2e_test + +import ( + "fmt" + "math/rand" + "os/exec" + "testing" + "time" + + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + "github.com/openshift/machine-config-operator/test/e2e/framework" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" +) + +// getMcName returns the current configuration name of the machine config pool poolName +func getMcName(t *testing.T, cs *framework.ClientSet, poolName string) string { + // grab the initial machineconfig used by the worker pool + // this MC is gonna be the one which is going to be reapplied once the previous MC is deleted + mcp, err := cs.MachineConfigPools().Get(poolName, metav1.GetOptions{}) + require.Nil(t, err) + return mcp.Status.Configuration.Name +} + +// waitForConfigAndPoolComplete is a helper function that gets a renderedConfig and waits for its pool to complete +func waitForConfigAndPoolComplete(t *testing.T, cs *framework.ClientSet, pool, mcName string) { + config, err := waitForRenderedConfig(t, cs, pool, mcName) + require.Nil(t, err, "failed to render machine config %s from pool %s", mcName, pool) + err = waitForPoolComplete(t, cs, pool, config) + require.Nil(t, err, "pool %s did not update to config %s", pool, config) +} + +// waitForRenderedConfig polls a MachineConfigPool until it has +// included the given mcName in its config, and returns the new +// rendered config name. +func waitForRenderedConfig(t *testing.T, cs *framework.ClientSet, pool, mcName string) (string, error) { + var renderedConfig string + startTime := time.Now() + if err := wait.PollImmediate(2*time.Second, 5*time.Minute, func() (bool, error) { + mcp, err := cs.MachineConfigPools().Get(pool, metav1.GetOptions{}) + if err != nil { + return false, err + } + for _, mc := range mcp.Spec.Configuration.Source { + if mc.Name == mcName { + renderedConfig = mcp.Spec.Configuration.Name + return true, nil + } + } + return false, nil + }); err != nil { + return "", errors.Wrapf(err, "machine config %s hasn't been picked by pool %s (waited %s)", mcName, pool, time.Since(startTime)) + } + t.Logf("Pool %s has rendered config %s with %s (waited %v)", pool, mcName, renderedConfig, time.Since(startTime)) + return renderedConfig, nil +} + +// waitForPoolComplete polls a pool until it has completed an update to target +func waitForPoolComplete(t *testing.T, cs *framework.ClientSet, pool, target string) error { + startTime := time.Now() + if err := wait.Poll(2*time.Second, 20*time.Minute, func() (bool, error) { + mcp, err := cs.MachineConfigPools().Get(pool, metav1.GetOptions{}) + if err != nil { + return false, err + } + if mcp.Status.Configuration.Name != target { + return false, nil + } + if mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolUpdated) { + return true, nil + } + return false, nil + }); err != nil { + return errors.Wrapf(err, "pool %s didn't report %s to updated (waited %s)", pool, target, time.Since(startTime)) + } + t.Logf("Pool %s has completed %s (waited %v)", pool, target, time.Since(startTime)) + return nil +} + +// labelRandomNodeFromPool gets all nodes in pool and chooses one at random to label +func labelRandomNodeFromPool(t *testing.T, cs *framework.ClientSet, pool, label string) func() { + nodes, err := getNodesByRole(cs, pool) + require.Nil(t, err) + require.NotEmpty(t, nodes) + + rand.Seed(time.Now().UnixNano()) + infraNode := nodes[rand.Intn(len(nodes))] + out, err := exec.Command("oc", "label", "node", infraNode.Name, label+"=").CombinedOutput() + require.Nil(t, err, "unable to label worker node %s with infra: %s", infraNode.Name, string(out)) + return func() { + out, err = exec.Command("oc", "label", "node", infraNode.Name, label+"-").CombinedOutput() + require.Nil(t, err, "unable to remove label from node %s: %s", infraNode.Name, string(out)) + } +} + +// getSingleNodeByRoll gets all nodes by role pool, and asserts there should only be one +func getSingleNodeByRole(t *testing.T, cs *framework.ClientSet, role string) corev1.Node { + nodes, err := getNodesByRole(cs, role) + require.Nil(t, err) + require.Len(t, nodes, 1) + return nodes[0] +} + +// getNodesByRole gets all nodes labeled with role role +func getNodesByRole(cs *framework.ClientSet, role string) ([]corev1.Node, error) { + listOptions := metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{fmt.Sprintf("node-role.kubernetes.io/%s", role): ""}).String(), + } + nodes, err := cs.Nodes().List(listOptions) + if err != nil { + return nil, err + } + return nodes.Items, nil +} + +// createMCP create a machine config pool with name mcpName +// it will also use mcpName as the label selector, so any node you want to be included +// in the pool should have a label node-role.kubernetes.io/mcpName = "" +func createMCP(t *testing.T, cs *framework.ClientSet, mcpName string) func() { + infraMCP := &mcfgv1.MachineConfigPool{} + infraMCP.Name = mcpName + nodeSelector := metav1.LabelSelector{} + infraMCP.Spec.NodeSelector = &nodeSelector + infraMCP.Spec.NodeSelector.MatchLabels = make(map[string]string) + infraMCP.Spec.NodeSelector.MatchLabels[mcpNameToRole(mcpName)] = "" + mcSelector := metav1.LabelSelector{} + infraMCP.Spec.MachineConfigSelector = &mcSelector + infraMCP.Spec.MachineConfigSelector.MatchExpressions = []metav1.LabelSelectorRequirement{ + { + Key: "machineconfiguration.openshift.io/role", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"worker", mcpName}, + }, + } + infraMCP.ObjectMeta.Labels = make(map[string]string) + infraMCP.ObjectMeta.Labels[mcpName] = "" + _, err := cs.MachineConfigPools().Create(infraMCP) + require.Nil(t, err) + return func() { + err := cs.MachineConfigPools().Delete(mcpName, &metav1.DeleteOptions{}) + require.Nil(t, err) + } +} + +// mcpNameToRole converts a mcpName to a node role label +func mcpNameToRole(mcpName string) string { + return fmt.Sprintf("node-role.kubernetes.io/%s", mcpName) +} + +// createMC creates a machine config object with name and role +func createMC(name, role string) *mcfgv1.MachineConfig { + // create a dummy MC + mcadd := &mcfgv1.MachineConfig{} + mcadd.ObjectMeta = metav1.ObjectMeta{ + Name: name, + Labels: mcLabelForRole(role), + } + return mcadd +} + +// execCmdOnNode finds a node's mcd, and oc rsh's into it to execute a command on the node +// all commands should use /rootfs as root +func execCmdOnNode(t *testing.T, cs *framework.ClientSet, node corev1.Node, args ...string) string { + mcd, err := mcdForNode(cs, &node) + require.Nil(t, err) + mcdName := mcd.ObjectMeta.Name + + entryPoint := "oc" + cmd := []string{"rsh", + "-n", "openshift-machine-config-operator", + "-c", "machine-config-daemon", + mcdName} + cmd = append(cmd, args...) + + b, err := exec.Command(entryPoint, cmd...).CombinedOutput() + require.Nil(t, err, "failed to exec cmd %v on node %s", args, node.Name) + return string(b) +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion.go new file mode 100644 index 0000000000..916950c023 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/osversion.go @@ -0,0 +1,51 @@ +package osversion + +import ( + "fmt" + + "golang.org/x/sys/windows" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// Get gets the operating system version on Windows. +// The calling application must be manifested to get the correct version information. +func Get() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go new file mode 100644 index 0000000000..2d9567f6f0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -0,0 +1,10 @@ +package osversion + +const ( + + // RS2 was a client-only release in case you're asking why it's not in the list. + RS1 = 14393 + RS3 = 16299 + RS4 = 17134 + RS5 = 17763 +) diff --git a/vendor/github.com/containerd/console/.travis.yml b/vendor/github.com/containerd/console/.travis.yml index 1756a2d2f9..ba93012c76 100644 --- a/vendor/github.com/containerd/console/.travis.yml +++ b/vendor/github.com/containerd/console/.travis.yml @@ -1,27 +1,17 @@ language: go go: - - "1.10.x" - - "1.11.x" + - 1.9.x + - tip go_import_path: github.com/containerd/console install: - go get -d - - GOOS=openbsd go get -d - - GOOS=solaris go get -d - GOOS=windows go get -d - - go get -u github.com/vbatts/git-validation - - go get -u github.com/kunalkushwaha/ltag - -before_script: - - pushd ..; git clone https://github.com/containerd/project; popd + - GOOS=solaris go get -d script: - - DCO_VERBOSITY=-q ../project/script/validate/dco - - ../project/script/validate/fileheader ../project/ - go test -race - - GOOS=openbsd go build - - GOOS=openbsd go test -c + - GOOS=windows go test - GOOS=solaris go build - GOOS=solaris go test -c - - GOOS=windows go test diff --git a/vendor/github.com/containerd/console/LICENSE b/vendor/github.com/containerd/console/LICENSE index 584149b6ee..261eeb9e9f 100644 --- a/vendor/github.com/containerd/console/LICENSE +++ b/vendor/github.com/containerd/console/LICENSE @@ -1,7 +1,6 @@ - Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -176,13 +175,24 @@ END OF TERMS AND CONDITIONS - Copyright The containerd Authors + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/vendor/github.com/containerd/console/README.md b/vendor/github.com/containerd/console/README.md index 5392fdaf19..4c56d9d134 100644 --- a/vendor/github.com/containerd/console/README.md +++ b/vendor/github.com/containerd/console/README.md @@ -15,13 +15,3 @@ if err := current.SetRaw(); err != nil { ws, err := current.Size() current.Resize(ws) ``` - -## Project details - -console is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/console/console.go b/vendor/github.com/containerd/console/console.go index c187a9b412..bf2798fda3 100644 --- a/vendor/github.com/containerd/console/console.go +++ b/vendor/github.com/containerd/console/console.go @@ -1,19 +1,3 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - package console import ( diff --git a/vendor/github.com/containerd/console/console_linux.go b/vendor/github.com/containerd/console/console_linux.go index 42274e100e..c963729296 100644 --- a/vendor/github.com/containerd/console/console_linux.go +++ b/vendor/github.com/containerd/console/console_linux.go @@ -1,21 +1,5 @@ // +build linux -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - package console import ( @@ -72,7 +56,7 @@ func NewEpoller() (*Epoller, error) { }, nil } -// Add creates an epoll console based on the provided console. The console will +// Add creates a epoll console based on the provided console. The console will // be registered with EPOLLET (i.e. using edge-triggered notification) and its // file descriptor will be set to non-blocking mode. After this, user should use // the return console to perform I/O. @@ -134,7 +118,7 @@ func (e *Epoller) Wait() error { } } -// CloseConsole unregisters the console's file descriptor from epoll interface +// Close unregister the console's file descriptor from epoll interface func (e *Epoller) CloseConsole(fd int) error { e.mu.Lock() defer e.mu.Unlock() @@ -149,12 +133,12 @@ func (e *Epoller) getConsole(sysfd int) *EpollConsole { return f } -// Close closes the epoll fd +// Close the epoll fd func (e *Epoller) Close() error { return unix.Close(e.efd) } -// EpollConsole acts like a console but registers its file descriptor with an +// EpollConsole acts like a console but register its file descriptor with a // epoll fd and uses epoll API to perform I/O. type EpollConsole struct { Console @@ -167,7 +151,7 @@ type EpollConsole struct { // Read reads up to len(p) bytes into p. It returns the number of bytes read // (0 <= n <= len(p)) and any error encountered. // -// If the console's read returns EAGAIN or EIO, we assume that it's a +// If the console's read returns EAGAIN or EIO, we assumes that its a // temporary error because the other side went away and wait for the signal // generated by epoll event to continue. func (ec *EpollConsole) Read(p []byte) (n int, err error) { @@ -207,7 +191,7 @@ func (ec *EpollConsole) Read(p []byte) (n int, err error) { // written from p (0 <= n <= len(p)) and any error encountered that caused // the write to stop early. // -// If writes to the console returns EAGAIN or EIO, we assume that it's a +// If writes to the console returns EAGAIN or EIO, we assumes that its a // temporary error because the other side went away and wait for the signal // generated by epoll event to continue. func (ec *EpollConsole) Write(p []byte) (n int, err error) { @@ -224,7 +208,7 @@ func (ec *EpollConsole) Write(p []byte) (n int, err error) { } else { hangup = (err == unix.EAGAIN || err == unix.EIO) } - // if the other end disappears, assume this is temporary and wait for the + // if the other end disappear, assume this is temporary and wait for the // signal to continue again. if hangup { ec.writec.Wait() @@ -242,7 +226,7 @@ func (ec *EpollConsole) Write(p []byte) (n int, err error) { return n, err } -// Shutdown closes the file descriptor and signals call waiters for this fd. +// Close closed the file descriptor and signal call waiters for this fd. // It accepts a callback which will be called with the console's fd. The // callback typically will be used to do further cleanup such as unregister the // console's fd from the epoll interface. @@ -262,14 +246,10 @@ func (ec *EpollConsole) Shutdown(close func(int) error) error { // signalRead signals that the console is readable. func (ec *EpollConsole) signalRead() { - ec.readc.L.Lock() ec.readc.Signal() - ec.readc.L.Unlock() } // signalWrite signals that the console is writable. func (ec *EpollConsole) signalWrite() { - ec.writec.L.Lock() ec.writec.Signal() - ec.writec.L.Unlock() } diff --git a/vendor/github.com/containerd/console/console_unix.go b/vendor/github.com/containerd/console/console_unix.go index a4a8d1267b..118c8c3abf 100644 --- a/vendor/github.com/containerd/console/console_unix.go +++ b/vendor/github.com/containerd/console/console_unix.go @@ -1,20 +1,4 @@ -// +build darwin freebsd linux openbsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// +build darwin freebsd linux solaris package console diff --git a/vendor/github.com/containerd/console/console_windows.go b/vendor/github.com/containerd/console/console_windows.go index 62dbe1c033..d78a0b8419 100644 --- a/vendor/github.com/containerd/console/console_windows.go +++ b/vendor/github.com/containerd/console/console_windows.go @@ -1,19 +1,3 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - package console import ( @@ -150,11 +134,11 @@ func (m *master) Close() error { } func (m *master) Read(b []byte) (int, error) { - return os.Stdin.Read(b) + panic("not implemented on windows") } func (m *master) Write(b []byte) (int, error) { - return os.Stdout.Write(b) + panic("not implemented on windows") } func (m *master) Fd() uintptr { diff --git a/vendor/github.com/containerd/console/tc_darwin.go b/vendor/github.com/containerd/console/tc_darwin.go index b0128abb0c..b102bad743 100644 --- a/vendor/github.com/containerd/console/tc_darwin.go +++ b/vendor/github.com/containerd/console/tc_darwin.go @@ -1,19 +1,3 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - package console import ( diff --git a/vendor/github.com/containerd/console/tc_freebsd.go b/vendor/github.com/containerd/console/tc_freebsd.go index 04583a6156..e2a10e4413 100644 --- a/vendor/github.com/containerd/console/tc_freebsd.go +++ b/vendor/github.com/containerd/console/tc_freebsd.go @@ -1,19 +1,3 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - package console import ( diff --git a/vendor/github.com/containerd/console/tc_linux.go b/vendor/github.com/containerd/console/tc_linux.go index 1bdd68e6d5..80ef2f6fb3 100644 --- a/vendor/github.com/containerd/console/tc_linux.go +++ b/vendor/github.com/containerd/console/tc_linux.go @@ -1,19 +1,3 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - package console import ( @@ -29,21 +13,25 @@ const ( cmdTcSet = unix.TCSETS ) +func ioctl(fd, flag, data uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + // unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. // unlockpt should be called before opening the slave side of a pty. func unlockpt(f *os.File) error { var u int32 - if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))); err != 0 { - return err - } - return nil + return ioctl(f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) } // ptsname retrieves the name of the first available pts for the given master. func ptsname(f *os.File) (string, error) { - var u uint32 - if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&u))); err != 0 { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) + if err != nil { return "", err } - return fmt.Sprintf("/dev/pts/%d", u), nil + return fmt.Sprintf("/dev/pts/%d", n), nil } diff --git a/vendor/github.com/containerd/console/tc_openbsd_cgo.go b/vendor/github.com/containerd/console/tc_openbsd_cgo.go deleted file mode 100644 index f0cec06a72..0000000000 --- a/vendor/github.com/containerd/console/tc_openbsd_cgo.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build openbsd,cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -//#include -import "C" - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - ptspath, err := C.ptsname(C.int(f.Fd())) - if err != nil { - return "", err - } - return C.GoString(ptspath), nil -} - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - if _, err := C.grantpt(C.int(f.Fd())); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/containerd/console/tc_openbsd_nocgo.go b/vendor/github.com/containerd/console/tc_openbsd_nocgo.go deleted file mode 100644 index daccce2058..0000000000 --- a/vendor/github.com/containerd/console/tc_openbsd_nocgo.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build openbsd,!cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// -// Implementing the functions below requires cgo support. Non-cgo stubs -// versions are defined below to enable cross-compilation of source code -// that depends on these functions, but the resultant cross-compiled -// binaries cannot actually be used. If the stub function(s) below are -// actually invoked they will display an error message and cause the -// calling process to exit. -// - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -func ptsname(f *os.File) (string, error) { - panic("ptsname() support requires cgo.") -} - -func unlockpt(f *os.File) error { - panic("unlockpt() support requires cgo.") -} diff --git a/vendor/github.com/containerd/console/tc_solaris_cgo.go b/vendor/github.com/containerd/console/tc_solaris_cgo.go index e36a68edd1..f8066d8e39 100644 --- a/vendor/github.com/containerd/console/tc_solaris_cgo.go +++ b/vendor/github.com/containerd/console/tc_solaris_cgo.go @@ -1,21 +1,5 @@ // +build solaris,cgo -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - package console import ( diff --git a/vendor/github.com/containerd/console/tc_solaris_nocgo.go b/vendor/github.com/containerd/console/tc_solaris_nocgo.go index eb0bd2c36b..0aefa0d2bb 100644 --- a/vendor/github.com/containerd/console/tc_solaris_nocgo.go +++ b/vendor/github.com/containerd/console/tc_solaris_nocgo.go @@ -1,21 +1,5 @@ // +build solaris,!cgo -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - // // Implementing the functions below requires cgo support. Non-cgo stubs // versions are defined below to enable cross-compilation of source code diff --git a/vendor/github.com/containerd/console/tc_unix.go b/vendor/github.com/containerd/console/tc_unix.go index 7ae773c53e..df7dcb9334 100644 --- a/vendor/github.com/containerd/console/tc_unix.go +++ b/vendor/github.com/containerd/console/tc_unix.go @@ -1,20 +1,4 @@ -// +build darwin freebsd linux openbsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// +build darwin freebsd linux solaris package console diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go new file mode 100644 index 0000000000..b7f86da869 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go @@ -0,0 +1,18 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package events defines the event pushing and subscription service. +package events diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go new file mode 100644 index 0000000000..4bd5828a4b --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go @@ -0,0 +1,778 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto + +package events + +import ( + context "context" + fmt "fmt" + github_com_containerd_ttrpc "github.com/containerd/ttrpc" + github_com_containerd_typeurl "github.com/containerd/typeurl" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + types "github.com/gogo/protobuf/types" + io "io" + math "math" + reflect "reflect" + strings "strings" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type ForwardRequest struct { + Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ForwardRequest) Reset() { *m = ForwardRequest{} } +func (*ForwardRequest) ProtoMessage() {} +func (*ForwardRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_19f98672016720b5, []int{0} +} +func (m *ForwardRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ForwardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ForwardRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ForwardRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ForwardRequest.Merge(m, src) +} +func (m *ForwardRequest) XXX_Size() int { + return m.Size() +} +func (m *ForwardRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ForwardRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ForwardRequest proto.InternalMessageInfo + +type Envelope struct { + Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"` + Event *types.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Envelope) Reset() { *m = Envelope{} } +func (*Envelope) ProtoMessage() {} +func (*Envelope) Descriptor() ([]byte, []int) { + return fileDescriptor_19f98672016720b5, []int{1} +} +func (m *Envelope) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Envelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Envelope.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Envelope) XXX_Merge(src proto.Message) { + xxx_messageInfo_Envelope.Merge(m, src) +} +func (m *Envelope) XXX_Size() int { + return m.Size() +} +func (m *Envelope) XXX_DiscardUnknown() { + xxx_messageInfo_Envelope.DiscardUnknown(m) +} + +var xxx_messageInfo_Envelope proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ForwardRequest)(nil), "containerd.services.events.ttrpc.v1.ForwardRequest") + proto.RegisterType((*Envelope)(nil), "containerd.services.events.ttrpc.v1.Envelope") +} + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto", fileDescriptor_19f98672016720b5) +} + +var fileDescriptor_19f98672016720b5 = []byte{ + // 396 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x52, 0xc1, 0x8e, 0xd3, 0x30, + 0x10, 0x8d, 0x61, 0x77, 0x69, 0x8d, 0xc4, 0xc1, 0xaa, 0x50, 0x08, 0x28, 0x59, 0x2d, 0x97, 0x15, + 0x12, 0xb6, 0x76, 0xf7, 0x06, 0x17, 0xa8, 0x28, 0x12, 0x1c, 0x23, 0x84, 0x2a, 0x90, 0x10, 0x6e, + 0x3a, 0x4d, 0x2d, 0x25, 0xb6, 0x49, 0x9c, 0xa0, 0xde, 0xfa, 0x09, 0x7c, 0x0c, 0x17, 0xfe, 0xa0, + 0x47, 0x8e, 0x9c, 0x80, 0xe6, 0x4b, 0x50, 0x9d, 0xa4, 0x81, 0xf6, 0x40, 0xa5, 0xbd, 0xbd, 0xcc, + 0x7b, 0x6f, 0xde, 0xcc, 0xc4, 0xf8, 0x75, 0x2c, 0xcc, 0xbc, 0x98, 0xd0, 0x48, 0xa5, 0x2c, 0x52, + 0xd2, 0x70, 0x21, 0x21, 0x9b, 0xfe, 0x0d, 0xb9, 0x16, 0x2c, 0x87, 0xac, 0x14, 0x11, 0xe4, 0xcc, + 0x98, 0x4c, 0x47, 0x0c, 0x4a, 0x90, 0x26, 0x67, 0xe5, 0x45, 0x83, 0xa8, 0xce, 0x94, 0x51, 0xe4, + 0x61, 0xe7, 0xa2, 0xad, 0x83, 0x36, 0x0a, 0x6b, 0xa4, 0xe5, 0x85, 0xf7, 0xec, 0xbf, 0x81, 0xb6, + 0xd9, 0xa4, 0x98, 0x31, 0x9d, 0x14, 0xb1, 0x90, 0x6c, 0x26, 0x20, 0x99, 0x6a, 0x6e, 0xe6, 0x75, + 0x8c, 0x37, 0x88, 0x55, 0xac, 0x2c, 0x64, 0x1b, 0xd4, 0x54, 0xef, 0xc5, 0x4a, 0xc5, 0x09, 0x74, + 0x6e, 0x2e, 0x17, 0x0d, 0x75, 0x7f, 0x97, 0x82, 0x54, 0x9b, 0x96, 0x0c, 0x76, 0x49, 0x23, 0x52, + 0xc8, 0x0d, 0x4f, 0x75, 0x2d, 0x38, 0x7b, 0x8f, 0xef, 0xbc, 0x54, 0xd9, 0x67, 0x9e, 0x4d, 0x43, + 0xf8, 0x54, 0x40, 0x6e, 0xc8, 0x2b, 0xdc, 0x03, 0x59, 0x42, 0xa2, 0x34, 0xb8, 0xe8, 0x14, 0x9d, + 0xdf, 0xbe, 0x7c, 0x4c, 0x0f, 0x58, 0x9d, 0x8e, 0x1a, 0x53, 0xb8, 0xb5, 0x9f, 0x7d, 0x45, 0xb8, + 0xd7, 0x96, 0xc9, 0x10, 0xf7, 0xb7, 0xe1, 0x4d, 0x63, 0x8f, 0xd6, 0xe3, 0xd1, 0x76, 0x3c, 0xfa, + 0xa6, 0x55, 0x0c, 0x7b, 0xab, 0x9f, 0x81, 0xf3, 0xe5, 0x57, 0x80, 0xc2, 0xce, 0x46, 0x1e, 0xe0, + 0xbe, 0xe4, 0x29, 0xe4, 0x9a, 0x47, 0xe0, 0xde, 0x38, 0x45, 0xe7, 0xfd, 0xb0, 0x2b, 0x90, 0x01, + 0x3e, 0x36, 0x4a, 0x8b, 0xc8, 0xbd, 0x69, 0x99, 0xfa, 0x83, 0x3c, 0xc2, 0xc7, 0x76, 0x54, 0xf7, + 0xc8, 0x66, 0x0e, 0xf6, 0x32, 0x9f, 0xcb, 0x45, 0x58, 0x4b, 0x9e, 0x1c, 0x2d, 0xbf, 0x05, 0xe8, + 0xf2, 0x23, 0x3e, 0x19, 0xd9, 0xe5, 0xc8, 0x5b, 0x7c, 0xab, 0xb9, 0x0e, 0xb9, 0x3a, 0xe8, 0x08, + 0xff, 0xde, 0xd2, 0xbb, 0xbb, 0x17, 0x36, 0xda, 0xfc, 0x9c, 0xe1, 0x87, 0xd5, 0xda, 0x77, 0x7e, + 0xac, 0x7d, 0x67, 0x59, 0xf9, 0x68, 0x55, 0xf9, 0xe8, 0x7b, 0xe5, 0xa3, 0xdf, 0x95, 0x8f, 0xde, + 0xbd, 0xb8, 0xd6, 0x8b, 0x7d, 0x5a, 0xa3, 0xb1, 0x33, 0x46, 0x93, 0x13, 0x9b, 0x79, 0xf5, 0x27, + 0x00, 0x00, 0xff, 0xff, 0xd4, 0x90, 0xbd, 0x09, 0x04, 0x03, 0x00, 0x00, +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *Envelope) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + // unhandled: timestamp + case "namespace": + return string(m.Namespace), len(m.Namespace) > 0 + case "topic": + return string(m.Topic), len(m.Topic) > 0 + case "event": + decoded, err := github_com_containerd_typeurl.UnmarshalAny(m.Event) + if err != nil { + return "", false + } + + adaptor, ok := decoded.(interface{ Field([]string) (string, bool) }) + if !ok { + return "", false + } + return adaptor.Field(fieldpath[1:]) + } + return "", false +} +func (m *ForwardRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ForwardRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Envelope != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Envelope.Size())) + n1, err := m.Envelope.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Envelope) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Envelope) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp))) + n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Namespace) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + } + if len(m.Topic) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic))) + i += copy(dAtA[i:], m.Topic) + } + if m.Event != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Event.Size())) + n3, err := m.Event.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ForwardRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Envelope != nil { + l = m.Envelope.Size() + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Envelope) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovEvents(uint64(l)) + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Topic) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Event != nil { + l = m.Event.Size() + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEvents(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEvents(x uint64) (n int) { + return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ForwardRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ForwardRequest{`, + `Envelope:` + strings.Replace(fmt.Sprintf("%v", this.Envelope), "Envelope", "Envelope", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Envelope) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Envelope{`, + `Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, + `Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "types.Any", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringEvents(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} + +type EventsService interface { + Forward(ctx context.Context, req *ForwardRequest) (*types.Empty, error) +} + +func RegisterEventsService(srv *github_com_containerd_ttrpc.Server, svc EventsService) { + srv.Register("containerd.services.events.ttrpc.v1.Events", map[string]github_com_containerd_ttrpc.Method{ + "Forward": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ForwardRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Forward(ctx, &req) + }, + }) +} + +type eventsClient struct { + client *github_com_containerd_ttrpc.Client +} + +func NewEventsClient(client *github_com_containerd_ttrpc.Client) EventsService { + return &eventsClient{ + client: client, + } +} + +func (c *eventsClient) Forward(ctx context.Context, req *ForwardRequest) (*types.Empty, error) { + var resp types.Empty + if err := c.client.Call(ctx, "containerd.services.events.ttrpc.v1.Events", "Forward", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} +func (m *ForwardRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ForwardRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ForwardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Envelope", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Envelope == nil { + m.Envelope = &Envelope{} + } + if err := m.Envelope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Envelope) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Envelope: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Envelope: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Topic = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Event == nil { + m.Event = &types.Any{} + } + if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvents(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvents + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthEvents + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEvents(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthEvents + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto new file mode 100644 index 0000000000..e140c34395 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package containerd.services.events.ttrpc.v1; + +import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import weak "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/containerd/containerd/api/services/ttrpc/events/v1;events"; + +service Events { + // Forward sends an event that has already been packaged into an envelope + // with a timestamp and namespace. + // + // This is useful if earlier timestamping is required or when forwarding on + // behalf of another component, namespace or publisher. + rpc Forward(ForwardRequest) returns (google.protobuf.Empty); +} + +message ForwardRequest { + Envelope envelope = 1; +} + +message Envelope { + option (containerd.plugin.fieldpath) = true; + google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + string namespace = 2; + string topic = 3; + google.protobuf.Any event = 4; +} diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go index 4f5f6c61fd..c0179d297b 100644 --- a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go @@ -1,36 +1,18 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/types/descriptor.proto -/* - Package types is a generated protocol buffer package. - - It is generated from these files: - github.com/containerd/containerd/api/types/descriptor.proto - github.com/containerd/containerd/api/types/metrics.proto - github.com/containerd/containerd/api/types/mount.proto - github.com/containerd/containerd/api/types/platform.proto - - It has these top-level messages: - Descriptor - Metric - Mount - Platform -*/ package types -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" - -import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - -import strings "strings" -import reflect "reflect" -import sortkeys "github.com/gogo/protobuf/sortkeys" - -import io "io" +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + io "io" + math "math" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -49,19 +31,80 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // oci descriptor found in a manifest. // See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor type Descriptor struct { - MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` - Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Descriptor) Reset() { *m = Descriptor{} } -func (*Descriptor) ProtoMessage() {} -func (*Descriptor) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} } +func (m *Descriptor) Reset() { *m = Descriptor{} } +func (*Descriptor) ProtoMessage() {} +func (*Descriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_37f958df3707db9e, []int{0} +} +func (m *Descriptor) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Descriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Descriptor.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Descriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_Descriptor.Merge(m, src) +} +func (m *Descriptor) XXX_Size() int { + return m.Size() +} +func (m *Descriptor) XXX_DiscardUnknown() { + xxx_messageInfo_Descriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_Descriptor proto.InternalMessageInfo func init() { proto.RegisterType((*Descriptor)(nil), "containerd.types.Descriptor") + proto.RegisterMapType((map[string]string)(nil), "containerd.types.Descriptor.AnnotationsEntry") } + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptor_37f958df3707db9e) +} + +var fileDescriptor_37f958df3707db9e = []byte{ + // 311 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16, + 0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, + 0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, + 0x3a, 0xa5, 0x39, 0x4c, 0x5c, 0x5c, 0x2e, 0x70, 0xcd, 0x42, 0xb2, 0x5c, 0x5c, 0xb9, 0xa9, 0x29, + 0x99, 0x89, 0xf1, 0x20, 0x3d, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x9c, 0x60, 0x91, 0x90, + 0xca, 0x82, 0x54, 0x21, 0x2f, 0x2e, 0xb6, 0x94, 0xcc, 0xf4, 0xd4, 0xe2, 0x12, 0x09, 0x26, 0x90, + 0x94, 0x93, 0xd1, 0x89, 0x7b, 0xf2, 0x0c, 0xb7, 0xee, 0xc9, 0x6b, 0x21, 0x39, 0x35, 0xbf, 0x20, + 0x35, 0x0f, 0x6e, 0x79, 0xb1, 0x7e, 0x7a, 0xbe, 0x2e, 0x44, 0x8b, 0x9e, 0x0b, 0x98, 0x0a, 0x82, + 0x9a, 0x20, 0x24, 0xc4, 0xc5, 0x52, 0x9c, 0x59, 0x95, 0x2a, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x1c, + 0x04, 0x66, 0x0b, 0xf9, 0x73, 0x71, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7, + 0x15, 0x4b, 0xb0, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xe9, 0xea, 0xa1, 0xfb, 0x45, 0x0f, 0xe1, 0x62, + 0x3d, 0x47, 0x84, 0x7a, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x64, 0x13, 0xa4, 0xec, 0xb8, 0x04, + 0xd0, 0x15, 0x08, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x3d, 0x07, 0x62, 0x0a, 0x89, 0x70, + 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x42, 0x7c, 0x15, 0x04, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x3a, + 0x79, 0x9d, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f, + 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x31, 0xca, 0x80, 0xf8, 0xd8, 0xb1, + 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0xe0, 0x30, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x22, + 0x8a, 0x20, 0x4a, 0xda, 0x01, 0x00, 0x00, +} + func (m *Descriptor) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -111,6 +154,9 @@ func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], v) } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -124,6 +170,9 @@ func encodeVarintDescriptor(dAtA []byte, offset int, v uint64) int { return offset + 1 } func (m *Descriptor) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MediaType) @@ -145,6 +194,9 @@ func (m *Descriptor) Size() (n int) { n += mapEntrySize + 1 + sovDescriptor(uint64(mapEntrySize)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -169,7 +221,7 @@ func (this *Descriptor) String() string { for k, _ := range this.Annotations { keysForAnnotations = append(keysForAnnotations, k) } - sortkeys.Strings(keysForAnnotations) + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) mapStringForAnnotations := "map[string]string{" for _, k := range keysForAnnotations { mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) @@ -180,6 +232,7 @@ func (this *Descriptor) String() string { `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, `Annotations:` + mapStringForAnnotations + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -207,7 +260,7 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -235,7 +288,7 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -245,6 +298,9 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error { return ErrInvalidLengthDescriptor } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDescriptor + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -264,7 +320,7 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -274,6 +330,9 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error { return ErrInvalidLengthDescriptor } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDescriptor + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -293,7 +352,7 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Size_ |= (int64(b) & 0x7F) << shift + m.Size_ |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -312,7 +371,7 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -321,6 +380,9 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error { return ErrInvalidLengthDescriptor } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDescriptor + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -341,7 +403,7 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -358,7 +420,7 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -368,6 +430,9 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error { return ErrInvalidLengthDescriptor } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthDescriptor + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -384,7 +449,7 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -394,6 +459,9 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error { return ErrInvalidLengthDescriptor } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthDescriptor + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -425,9 +493,13 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthDescriptor } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDescriptor + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -491,10 +563,13 @@ func skipDescriptor(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthDescriptor } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthDescriptor + } return iNdEx, nil case 3: for { @@ -523,6 +598,9 @@ func skipDescriptor(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthDescriptor + } } return iNdEx, nil case 4: @@ -541,31 +619,3 @@ var ( ErrInvalidLengthDescriptor = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowDescriptor = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptorDescriptor) -} - -var fileDescriptorDescriptor = []byte{ - // 311 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16, - 0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, - 0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, - 0x3a, 0xa5, 0x39, 0x4c, 0x5c, 0x5c, 0x2e, 0x70, 0xcd, 0x42, 0xb2, 0x5c, 0x5c, 0xb9, 0xa9, 0x29, - 0x99, 0x89, 0xf1, 0x20, 0x3d, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x9c, 0x60, 0x91, 0x90, - 0xca, 0x82, 0x54, 0x21, 0x2f, 0x2e, 0xb6, 0x94, 0xcc, 0xf4, 0xd4, 0xe2, 0x12, 0x09, 0x26, 0x90, - 0x94, 0x93, 0xd1, 0x89, 0x7b, 0xf2, 0x0c, 0xb7, 0xee, 0xc9, 0x6b, 0x21, 0x39, 0x35, 0xbf, 0x20, - 0x35, 0x0f, 0x6e, 0x79, 0xb1, 0x7e, 0x7a, 0xbe, 0x2e, 0x44, 0x8b, 0x9e, 0x0b, 0x98, 0x0a, 0x82, - 0x9a, 0x20, 0x24, 0xc4, 0xc5, 0x52, 0x9c, 0x59, 0x95, 0x2a, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x1c, - 0x04, 0x66, 0x0b, 0xf9, 0x73, 0x71, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7, - 0x15, 0x4b, 0xb0, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xe9, 0xea, 0xa1, 0xfb, 0x45, 0x0f, 0xe1, 0x62, - 0x3d, 0x47, 0x84, 0x7a, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x64, 0x13, 0xa4, 0xec, 0xb8, 0x04, - 0xd0, 0x15, 0x08, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x3d, 0x07, 0x62, 0x0a, 0x89, 0x70, - 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x42, 0x7c, 0x15, 0x04, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x3a, - 0x79, 0x9d, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f, - 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x31, 0xca, 0x80, 0xf8, 0xd8, 0xb1, - 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0xe0, 0x30, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x22, - 0x8a, 0x20, 0x4a, 0xda, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go index 52e9f40a5a..c231d343a0 100644 --- a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go @@ -3,22 +3,17 @@ package types -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" -import google_protobuf1 "github.com/gogo/protobuf/types" -import _ "github.com/gogo/protobuf/types" - -import time "time" - -import types1 "github.com/gogo/protobuf/types" - -import strings "strings" -import reflect "reflect" - -import io "io" +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + types "github.com/gogo/protobuf/types" + io "io" + math "math" + reflect "reflect" + strings "strings" + time "time" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -26,19 +21,82 @@ var _ = fmt.Errorf var _ = math.Inf var _ = time.Kitchen +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + type Metric struct { - Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"` - ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Data *google_protobuf1.Any `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` + Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Data *types.Any `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Metric) Reset() { *m = Metric{} } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{0} } +func (m *Metric) Reset() { *m = Metric{} } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_8d594d87edf6e6bc, []int{0} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return m.Size() +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo func init() { proto.RegisterType((*Metric)(nil), "containerd.types.Metric") } + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptor_8d594d87edf6e6bc) +} + +var fileDescriptor_8d594d87edf6e6bc = []byte{ + // 258 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96, + 0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81, + 0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64, + 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09, + 0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50, + 0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54, + 0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70, + 0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e, + 0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c, + 0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1, + 0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c, + 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, + 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24, + 0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01, + 0x00, 0x00, +} + func (m *Metric) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -56,8 +114,8 @@ func (m *Metric) MarshalTo(dAtA []byte) (int, error) { _ = l dAtA[i] = 0xa i++ - i = encodeVarintMetrics(dAtA, i, uint64(types1.SizeOfStdTime(m.Timestamp))) - n1, err := types1.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) + i = encodeVarintMetrics(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp))) + n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) if err != nil { return 0, err } @@ -78,6 +136,9 @@ func (m *Metric) MarshalTo(dAtA []byte) (int, error) { } i += n2 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -91,9 +152,12 @@ func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { return offset + 1 } func (m *Metric) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - l = types1.SizeOfStdTime(m.Timestamp) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) n += 1 + l + sovMetrics(uint64(l)) l = len(m.ID) if l > 0 { @@ -103,6 +167,9 @@ func (m *Metric) Size() (n int) { l = m.Data.Size() n += 1 + l + sovMetrics(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -124,9 +191,10 @@ func (this *Metric) String() string { return "nil" } s := strings.Join([]string{`&Metric{`, - `Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "google_protobuf1.Any", 1) + `,`, + `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "types.Any", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -154,7 +222,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -182,7 +250,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -191,10 +259,13 @@ func (m *Metric) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMetrics } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := types1.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -212,7 +283,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -222,6 +293,9 @@ func (m *Metric) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMetrics } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -241,7 +315,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -250,11 +324,14 @@ func (m *Metric) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMetrics } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Data == nil { - m.Data = &google_protobuf1.Any{} + m.Data = &types.Any{} } if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -269,9 +346,13 @@ func (m *Metric) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthMetrics } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -335,10 +416,13 @@ func skipMetrics(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthMetrics } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthMetrics + } return iNdEx, nil case 3: for { @@ -367,6 +451,9 @@ func skipMetrics(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthMetrics + } } return iNdEx, nil case 4: @@ -385,28 +472,3 @@ var ( ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptorMetrics) -} - -var fileDescriptorMetrics = []byte{ - // 258 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96, - 0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81, - 0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64, - 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09, - 0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50, - 0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54, - 0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70, - 0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e, - 0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c, - 0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1, - 0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c, - 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, - 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24, - 0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01, - 0x00, 0x00, -} diff --git a/vendor/github.com/containerd/containerd/api/types/mount.pb.go b/vendor/github.com/containerd/containerd/api/types/mount.pb.go index f7a9c3c1fe..54af8ea2f8 100644 --- a/vendor/github.com/containerd/containerd/api/types/mount.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/mount.pb.go @@ -3,22 +3,26 @@ package types -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" - -import strings "strings" -import reflect "reflect" - -import io "io" +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + // Mount describes mounts for a container. // // This type is the lingua franca of ContainerD. All services provide mounts @@ -35,16 +39,69 @@ type Mount struct { // Target path in container Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` // Options specifies zero or more fstab style mount options. - Options []string `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"` + Options []string `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mount) Reset() { *m = Mount{} } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { + return fileDescriptor_920196890d4a7b9f, []int{0} +} +func (m *Mount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Mount.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Mount) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mount.Merge(m, src) +} +func (m *Mount) XXX_Size() int { + return m.Size() +} +func (m *Mount) XXX_DiscardUnknown() { + xxx_messageInfo_Mount.DiscardUnknown(m) } -func (m *Mount) Reset() { *m = Mount{} } -func (*Mount) ProtoMessage() {} -func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorMount, []int{0} } +var xxx_messageInfo_Mount proto.InternalMessageInfo func init() { proto.RegisterType((*Mount)(nil), "containerd.types.Mount") } + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptor_920196890d4a7b9f) +} + +var fileDescriptor_920196890d4a7b9f = []byte{ + // 202 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97, + 0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5, + 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab, + 0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, + 0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85, + 0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48, + 0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33, + 0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, + 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01, + 0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00, + 0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00, +} + func (m *Mount) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -93,6 +150,9 @@ func (m *Mount) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -106,6 +166,9 @@ func encodeVarintMount(dAtA []byte, offset int, v uint64) int { return offset + 1 } func (m *Mount) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -126,6 +189,9 @@ func (m *Mount) Size() (n int) { n += 1 + l + sovMount(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -151,6 +217,7 @@ func (this *Mount) String() string { `Source:` + fmt.Sprintf("%v", this.Source) + `,`, `Target:` + fmt.Sprintf("%v", this.Target) + `,`, `Options:` + fmt.Sprintf("%v", this.Options) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -178,7 +245,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -206,7 +273,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -216,6 +283,9 @@ func (m *Mount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMount } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMount + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -235,7 +305,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -245,6 +315,9 @@ func (m *Mount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMount } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMount + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -264,7 +337,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -274,6 +347,9 @@ func (m *Mount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMount } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMount + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -293,7 +369,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -303,6 +379,9 @@ func (m *Mount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMount } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMount + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -317,9 +396,13 @@ func (m *Mount) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthMount } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMount + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -383,10 +466,13 @@ func skipMount(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthMount } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthMount + } return iNdEx, nil case 3: for { @@ -415,6 +501,9 @@ func skipMount(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthMount + } } return iNdEx, nil case 4: @@ -433,24 +522,3 @@ var ( ErrInvalidLengthMount = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowMount = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptorMount) -} - -var fileDescriptorMount = []byte{ - // 202 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97, - 0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5, - 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab, - 0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, - 0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85, - 0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48, - 0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33, - 0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, - 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01, - 0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00, - 0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/containerd/containerd/api/types/platform.pb.go b/vendor/github.com/containerd/containerd/api/types/platform.pb.go index ba9a3bf881..558f94732b 100644 --- a/vendor/github.com/containerd/containerd/api/types/platform.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/platform.pb.go @@ -3,37 +3,94 @@ package types -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" - -import strings "strings" -import reflect "reflect" - -import io "io" +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + // Platform follows the structure of the OCI platform specification, from // descriptors. type Platform struct { - OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"` - Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"` - Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"` + OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"` + Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"` + Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Platform) Reset() { *m = Platform{} } +func (*Platform) ProtoMessage() {} +func (*Platform) Descriptor() ([]byte, []int) { + return fileDescriptor_24ba7a4b83e2367e, []int{0} +} +func (m *Platform) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Platform) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Platform.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Platform) XXX_Merge(src proto.Message) { + xxx_messageInfo_Platform.Merge(m, src) +} +func (m *Platform) XXX_Size() int { + return m.Size() +} +func (m *Platform) XXX_DiscardUnknown() { + xxx_messageInfo_Platform.DiscardUnknown(m) } -func (m *Platform) Reset() { *m = Platform{} } -func (*Platform) ProtoMessage() {} -func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptorPlatform, []int{0} } +var xxx_messageInfo_Platform proto.InternalMessageInfo func init() { proto.RegisterType((*Platform)(nil), "containerd.types.Platform") } + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/types/platform.proto", fileDescriptor_24ba7a4b83e2367e) +} + +var fileDescriptor_24ba7a4b83e2367e = []byte{ + // 205 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24, + 0x96, 0xa4, 0xe5, 0x17, 0xe5, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x14, 0xe9, + 0x81, 0x15, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, 0x3a, 0xa5, + 0x04, 0x2e, 0x8e, 0x00, 0xa8, 0x4e, 0x21, 0x31, 0x2e, 0xa6, 0xfc, 0x62, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0xfc, 0x83, 0x83, 0x98, 0xf2, 0x8b, 0x85, 0x94, + 0xb8, 0x78, 0x12, 0x8b, 0x92, 0x33, 0x32, 0x4b, 0x52, 0x93, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x98, + 0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a, + 0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, + 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x8c, 0x60, 0x48, 0x62, 0x03, 0x3b, 0xdb, 0x18, + 0x10, 0x00, 0x00, 0xff, 0xff, 0x05, 0xaa, 0xda, 0xa1, 0x1b, 0x01, 0x00, 0x00, +} + func (m *Platform) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -67,6 +124,9 @@ func (m *Platform) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintPlatform(dAtA, i, uint64(len(m.Variant))) i += copy(dAtA[i:], m.Variant) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -80,6 +140,9 @@ func encodeVarintPlatform(dAtA []byte, offset int, v uint64) int { return offset + 1 } func (m *Platform) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.OS) @@ -94,6 +157,9 @@ func (m *Platform) Size() (n int) { if l > 0 { n += 1 + l + sovPlatform(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -118,6 +184,7 @@ func (this *Platform) String() string { `OS:` + fmt.Sprintf("%v", this.OS) + `,`, `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, `Variant:` + fmt.Sprintf("%v", this.Variant) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -145,7 +212,7 @@ func (m *Platform) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -173,7 +240,7 @@ func (m *Platform) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -183,6 +250,9 @@ func (m *Platform) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlatform } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlatform + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -202,7 +272,7 @@ func (m *Platform) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -212,6 +282,9 @@ func (m *Platform) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlatform } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlatform + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -231,7 +304,7 @@ func (m *Platform) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -241,6 +314,9 @@ func (m *Platform) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlatform } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlatform + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -255,9 +331,13 @@ func (m *Platform) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthPlatform } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPlatform + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -321,10 +401,13 @@ func skipPlatform(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthPlatform } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthPlatform + } return iNdEx, nil case 3: for { @@ -353,6 +436,9 @@ func skipPlatform(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthPlatform + } } return iNdEx, nil case 4: @@ -371,24 +457,3 @@ var ( ErrInvalidLengthPlatform = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowPlatform = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/types/platform.proto", fileDescriptorPlatform) -} - -var fileDescriptorPlatform = []byte{ - // 205 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24, - 0x96, 0xa4, 0xe5, 0x17, 0xe5, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x14, 0xe9, - 0x81, 0x15, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, 0x3a, 0xa5, - 0x04, 0x2e, 0x8e, 0x00, 0xa8, 0x4e, 0x21, 0x31, 0x2e, 0xa6, 0xfc, 0x62, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0xfc, 0x83, 0x83, 0x98, 0xf2, 0x8b, 0x85, 0x94, - 0xb8, 0x78, 0x12, 0x8b, 0x92, 0x33, 0x32, 0x4b, 0x52, 0x93, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x98, - 0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a, - 0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, - 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x8c, 0x60, 0x48, 0x62, 0x03, 0x3b, 0xdb, 0x18, - 0x10, 0x00, 0x00, 0xff, 0xff, 0x05, 0xaa, 0xda, 0xa1, 0x1b, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go index 437abe8f4c..69d851c938 100644 --- a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go @@ -1,34 +1,19 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/types/task/task.proto -/* - Package task is a generated protocol buffer package. - - It is generated from these files: - github.com/containerd/containerd/api/types/task/task.proto - - It has these top-level messages: - Process - ProcessInfo -*/ package task -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" -import _ "github.com/gogo/protobuf/types" -import google_protobuf2 "github.com/gogo/protobuf/types" - -import time "time" - -import types "github.com/gogo/protobuf/types" - -import strings "strings" -import reflect "reflect" - -import io "io" +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + types "github.com/gogo/protobuf/types" + io "io" + math "math" + reflect "reflect" + strings "strings" + time "time" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -61,6 +46,7 @@ var Status_name = map[int32]string{ 4: "PAUSED", 5: "PAUSING", } + var Status_value = map[string]int32{ "UNKNOWN": 0, "CREATED": 1, @@ -73,24 +59,58 @@ var Status_value = map[string]int32{ func (x Status) String() string { return proto.EnumName(Status_name, int32(x)) } -func (Status) EnumDescriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} } + +func (Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_391ef18c8ab0dc16, []int{0} +} type Process struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` - Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` - Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` - ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"` + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` + Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` + ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Process) Reset() { *m = Process{} } -func (*Process) ProtoMessage() {} -func (*Process) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} } +func (m *Process) Reset() { *m = Process{} } +func (*Process) ProtoMessage() {} +func (*Process) Descriptor() ([]byte, []int) { + return fileDescriptor_391ef18c8ab0dc16, []int{0} +} +func (m *Process) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Process) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Process.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Process) XXX_Merge(src proto.Message) { + xxx_messageInfo_Process.Merge(m, src) +} +func (m *Process) XXX_Size() int { + return m.Size() +} +func (m *Process) XXX_DiscardUnknown() { + xxx_messageInfo_Process.DiscardUnknown(m) +} + +var xxx_messageInfo_Process proto.InternalMessageInfo type ProcessInfo struct { // PID is the process ID. @@ -98,18 +118,93 @@ type ProcessInfo struct { // Info contains additional process information. // // Info varies by platform. - Info *google_protobuf2.Any `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + Info *types.Any `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProcessInfo) Reset() { *m = ProcessInfo{} } +func (*ProcessInfo) ProtoMessage() {} +func (*ProcessInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_391ef18c8ab0dc16, []int{1} +} +func (m *ProcessInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProcessInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProcessInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProcessInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProcessInfo.Merge(m, src) +} +func (m *ProcessInfo) XXX_Size() int { + return m.Size() +} +func (m *ProcessInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ProcessInfo.DiscardUnknown(m) } -func (m *ProcessInfo) Reset() { *m = ProcessInfo{} } -func (*ProcessInfo) ProtoMessage() {} -func (*ProcessInfo) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{1} } +var xxx_messageInfo_ProcessInfo proto.InternalMessageInfo func init() { + proto.RegisterEnum("containerd.v1.types.Status", Status_name, Status_value) proto.RegisterType((*Process)(nil), "containerd.v1.types.Process") proto.RegisterType((*ProcessInfo)(nil), "containerd.v1.types.ProcessInfo") - proto.RegisterEnum("containerd.v1.types.Status", Status_name, Status_value) } + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/types/task/task.proto", fileDescriptor_391ef18c8ab0dc16) +} + +var fileDescriptor_391ef18c8ab0dc16 = []byte{ + // 545 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xd3, 0x40, + 0x18, 0xc6, 0x7d, 0x6e, 0xeb, 0xa6, 0xe7, 0xb6, 0x18, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, 0x64, + 0x31, 0xd8, 0x22, 0xdd, 0xd8, 0xf2, 0x4f, 0xc8, 0x42, 0x72, 0x23, 0x27, 0x11, 0x6c, 0x91, 0x13, + 0x5f, 0xcc, 0xa9, 0xcd, 0x9d, 0x65, 0x9f, 0x81, 0x6c, 0x8c, 0xa8, 0x13, 0x5f, 0xa0, 0x13, 0x7c, + 0x0a, 0x3e, 0x41, 0x46, 0x26, 0xc4, 0x14, 0xa8, 0x3f, 0x09, 0x3a, 0xdb, 0x49, 0x23, 0x60, 0x39, + 0xbd, 0xef, 0xf3, 0x7b, 0xee, 0xbd, 0xf7, 0x1e, 0xf8, 0x22, 0xc6, 0xec, 0x6d, 0x3e, 0x75, 0x66, + 0x74, 0xe1, 0xce, 0x28, 0x61, 0x21, 0x26, 0x28, 0x8d, 0x76, 0xcb, 0x30, 0xc1, 0x2e, 0x5b, 0x26, + 0x28, 0x73, 0x59, 0x98, 0x5d, 0x95, 0x87, 0x93, 0xa4, 0x94, 0x51, 0xf5, 0xd1, 0xbd, 0xcb, 0x79, + 0xf7, 0xdc, 0x29, 0x4d, 0x7a, 0x33, 0xa6, 0x31, 0x2d, 0xb9, 0xcb, 0xab, 0xca, 0xaa, 0x9b, 0x31, + 0xa5, 0xf1, 0x35, 0x72, 0xcb, 0x6e, 0x9a, 0xcf, 0x5d, 0x86, 0x17, 0x28, 0x63, 0xe1, 0x22, 0xa9, + 0x0d, 0x8f, 0xff, 0x36, 0x84, 0x64, 0x59, 0xa1, 0xf3, 0x42, 0x84, 0x87, 0x83, 0x94, 0xce, 0x50, + 0x96, 0xa9, 0x2d, 0x78, 0xbc, 0x7d, 0x74, 0x82, 0x23, 0x0d, 0x58, 0xc0, 0x3e, 0xea, 0x3c, 0x28, + 0xd6, 0xa6, 0xdc, 0xdd, 0xe8, 0x5e, 0x2f, 0x90, 0xb7, 0x26, 0x2f, 0x52, 0xcf, 0xa0, 0x88, 0x23, + 0x4d, 0x2c, 0x9d, 0x52, 0xb1, 0x36, 0x45, 0xaf, 0x17, 0x88, 0x38, 0x52, 0x15, 0xb8, 0x97, 0xe0, + 0x48, 0xdb, 0xb3, 0x80, 0x7d, 0x12, 0xf0, 0x52, 0xbd, 0x80, 0x52, 0xc6, 0x42, 0x96, 0x67, 0xda, + 0xbe, 0x05, 0xec, 0xd3, 0xd6, 0x13, 0xe7, 0x3f, 0x3f, 0x74, 0x86, 0xa5, 0x25, 0xa8, 0xad, 0x6a, + 0x13, 0x1e, 0x64, 0x2c, 0xc2, 0x44, 0x3b, 0xe0, 0x2f, 0x04, 0x55, 0xa3, 0x9e, 0xf1, 0x51, 0x11, + 0xcd, 0x99, 0x26, 0x95, 0x72, 0xdd, 0xd5, 0x3a, 0x4a, 0x53, 0xed, 0x70, 0xab, 0xa3, 0x34, 0x55, + 0x75, 0xd8, 0x60, 0x28, 0x5d, 0x60, 0x12, 0x5e, 0x6b, 0x0d, 0x0b, 0xd8, 0x8d, 0x60, 0xdb, 0xab, + 0x26, 0x94, 0xd1, 0x07, 0xcc, 0x26, 0xf5, 0x6e, 0x47, 0xe5, 0xc2, 0x90, 0x4b, 0xd5, 0x2a, 0x6a, + 0x1b, 0x1e, 0xf1, 0x0e, 0x45, 0x93, 0x90, 0x69, 0xd0, 0x02, 0xb6, 0xdc, 0xd2, 0x9d, 0x2a, 0x50, + 0x67, 0x13, 0xa8, 0x33, 0xda, 0x24, 0xde, 0x69, 0xac, 0xd6, 0xa6, 0xf0, 0xf9, 0x97, 0x09, 0x82, + 0x46, 0x75, 0xad, 0xcd, 0xce, 0x3d, 0x28, 0xd7, 0x19, 0x7b, 0x64, 0x4e, 0x37, 0xd9, 0x80, 0xfb, + 0x6c, 0x6c, 0xb8, 0x8f, 0xc9, 0x9c, 0x96, 0x39, 0xca, 0xad, 0xe6, 0x3f, 0xe3, 0xdb, 0x64, 0x19, + 0x94, 0x8e, 0x67, 0x3f, 0x00, 0x94, 0xea, 0xc5, 0x0c, 0x78, 0x38, 0xf6, 0x5f, 0xf9, 0x97, 0xaf, + 0x7d, 0x45, 0xd0, 0x1f, 0xde, 0xdc, 0x5a, 0x27, 0x15, 0x18, 0x93, 0x2b, 0x42, 0xdf, 0x13, 0xce, + 0xbb, 0x41, 0xbf, 0x3d, 0xea, 0xf7, 0x14, 0xb0, 0xcb, 0xbb, 0x29, 0x0a, 0x19, 0x8a, 0x38, 0x0f, + 0xc6, 0xbe, 0xef, 0xf9, 0x2f, 0x15, 0x71, 0x97, 0x07, 0x39, 0x21, 0x98, 0xc4, 0x9c, 0x0f, 0x47, + 0x97, 0x83, 0x41, 0xbf, 0xa7, 0xec, 0xed, 0xf2, 0x21, 0xa3, 0x49, 0x82, 0x22, 0xf5, 0x29, 0x94, + 0x06, 0xed, 0xf1, 0xb0, 0xdf, 0x53, 0xf6, 0x75, 0xe5, 0xe6, 0xd6, 0x3a, 0xae, 0xf0, 0x20, 0xcc, + 0xb3, 0x6a, 0x3a, 0xa7, 0x7c, 0xfa, 0xc1, 0xee, 0x6d, 0x8e, 0x31, 0x89, 0xf5, 0xd3, 0x4f, 0x5f, + 0x0c, 0xe1, 0xdb, 0x57, 0xa3, 0xfe, 0x4d, 0x47, 0x5b, 0xdd, 0x19, 0xc2, 0xcf, 0x3b, 0x43, 0xf8, + 0x58, 0x18, 0x60, 0x55, 0x18, 0xe0, 0x7b, 0x61, 0x80, 0xdf, 0x85, 0x01, 0xde, 0x08, 0x53, 0xa9, + 0x0c, 0xe2, 0xe2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x32, 0xd2, 0x86, 0x50, 0x03, 0x00, + 0x00, +} + func (m *Process) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -182,12 +277,15 @@ func (m *Process) MarshalTo(dAtA []byte) (int, error) { } dAtA[i] = 0x52 i++ - i = encodeVarintTask(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt))) - n1, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) + i = encodeVarintTask(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt))) + n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) if err != nil { return 0, err } i += n1 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -221,6 +319,9 @@ func (m *ProcessInfo) MarshalTo(dAtA []byte) (int, error) { } i += n2 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -234,6 +335,9 @@ func encodeVarintTask(dAtA []byte, offset int, v uint64) int { return offset + 1 } func (m *Process) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ContainerID) @@ -268,12 +372,18 @@ func (m *Process) Size() (n int) { if m.ExitStatus != 0 { n += 1 + sovTask(uint64(m.ExitStatus)) } - l = types.SizeOfStdTime(m.ExitedAt) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) n += 1 + l + sovTask(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ProcessInfo) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Pid != 0 { @@ -283,6 +393,9 @@ func (m *ProcessInfo) Size() (n int) { l = m.Info.Size() n += 1 + l + sovTask(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -313,7 +426,8 @@ func (this *Process) String() string { `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -324,7 +438,8 @@ func (this *ProcessInfo) String() string { } s := strings.Join([]string{`&ProcessInfo{`, `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `Info:` + strings.Replace(fmt.Sprintf("%v", this.Info), "Any", "google_protobuf2.Any", 1) + `,`, + `Info:` + strings.Replace(fmt.Sprintf("%v", this.Info), "Any", "types.Any", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -352,7 +467,7 @@ func (m *Process) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -380,7 +495,7 @@ func (m *Process) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -390,6 +505,9 @@ func (m *Process) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTask } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -409,7 +527,7 @@ func (m *Process) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -419,6 +537,9 @@ func (m *Process) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTask } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -438,7 +559,7 @@ func (m *Process) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Pid |= (uint32(b) & 0x7F) << shift + m.Pid |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -457,7 +578,7 @@ func (m *Process) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Status |= (Status(b) & 0x7F) << shift + m.Status |= Status(b&0x7F) << shift if b < 0x80 { break } @@ -476,7 +597,7 @@ func (m *Process) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -486,6 +607,9 @@ func (m *Process) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTask } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -505,7 +629,7 @@ func (m *Process) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -515,6 +639,9 @@ func (m *Process) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTask } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -534,7 +661,7 @@ func (m *Process) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -544,6 +671,9 @@ func (m *Process) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTask } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -563,7 +693,7 @@ func (m *Process) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -583,7 +713,7 @@ func (m *Process) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExitStatus |= (uint32(b) & 0x7F) << shift + m.ExitStatus |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -602,7 +732,7 @@ func (m *Process) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -611,10 +741,13 @@ func (m *Process) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTask } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -627,9 +760,13 @@ func (m *Process) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTask } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTask + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -654,7 +791,7 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -682,7 +819,7 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Pid |= (uint32(b) & 0x7F) << shift + m.Pid |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -701,7 +838,7 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -710,11 +847,14 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTask } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Info == nil { - m.Info = &google_protobuf2.Any{} + m.Info = &types.Any{} } if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -729,9 +869,13 @@ func (m *ProcessInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTask } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTask + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -795,10 +939,13 @@ func skipTask(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthTask } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthTask + } return iNdEx, nil case 3: for { @@ -827,6 +974,9 @@ func skipTask(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthTask + } } return iNdEx, nil case 4: @@ -845,46 +995,3 @@ var ( ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowTask = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/types/task/task.proto", fileDescriptorTask) -} - -var fileDescriptorTask = []byte{ - // 545 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xd3, 0x40, - 0x18, 0xc6, 0x7d, 0x6e, 0xeb, 0xa6, 0xe7, 0xb6, 0x18, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, 0x64, - 0x31, 0xd8, 0x22, 0xdd, 0xd8, 0xf2, 0x4f, 0xc8, 0x42, 0x72, 0x23, 0x27, 0x11, 0x6c, 0x91, 0x13, - 0x5f, 0xcc, 0xa9, 0xcd, 0x9d, 0x65, 0x9f, 0x81, 0x6c, 0x8c, 0xa8, 0x13, 0x5f, 0xa0, 0x13, 0x7c, - 0x0a, 0x3e, 0x41, 0x46, 0x26, 0xc4, 0x14, 0xa8, 0x3f, 0x09, 0x3a, 0xdb, 0x49, 0x23, 0x60, 0x39, - 0xbd, 0xef, 0xf3, 0x7b, 0xee, 0xbd, 0xf7, 0x1e, 0xf8, 0x22, 0xc6, 0xec, 0x6d, 0x3e, 0x75, 0x66, - 0x74, 0xe1, 0xce, 0x28, 0x61, 0x21, 0x26, 0x28, 0x8d, 0x76, 0xcb, 0x30, 0xc1, 0x2e, 0x5b, 0x26, - 0x28, 0x73, 0x59, 0x98, 0x5d, 0x95, 0x87, 0x93, 0xa4, 0x94, 0x51, 0xf5, 0xd1, 0xbd, 0xcb, 0x79, - 0xf7, 0xdc, 0x29, 0x4d, 0x7a, 0x33, 0xa6, 0x31, 0x2d, 0xb9, 0xcb, 0xab, 0xca, 0xaa, 0x9b, 0x31, - 0xa5, 0xf1, 0x35, 0x72, 0xcb, 0x6e, 0x9a, 0xcf, 0x5d, 0x86, 0x17, 0x28, 0x63, 0xe1, 0x22, 0xa9, - 0x0d, 0x8f, 0xff, 0x36, 0x84, 0x64, 0x59, 0xa1, 0xf3, 0x42, 0x84, 0x87, 0x83, 0x94, 0xce, 0x50, - 0x96, 0xa9, 0x2d, 0x78, 0xbc, 0x7d, 0x74, 0x82, 0x23, 0x0d, 0x58, 0xc0, 0x3e, 0xea, 0x3c, 0x28, - 0xd6, 0xa6, 0xdc, 0xdd, 0xe8, 0x5e, 0x2f, 0x90, 0xb7, 0x26, 0x2f, 0x52, 0xcf, 0xa0, 0x88, 0x23, - 0x4d, 0x2c, 0x9d, 0x52, 0xb1, 0x36, 0x45, 0xaf, 0x17, 0x88, 0x38, 0x52, 0x15, 0xb8, 0x97, 0xe0, - 0x48, 0xdb, 0xb3, 0x80, 0x7d, 0x12, 0xf0, 0x52, 0xbd, 0x80, 0x52, 0xc6, 0x42, 0x96, 0x67, 0xda, - 0xbe, 0x05, 0xec, 0xd3, 0xd6, 0x13, 0xe7, 0x3f, 0x3f, 0x74, 0x86, 0xa5, 0x25, 0xa8, 0xad, 0x6a, - 0x13, 0x1e, 0x64, 0x2c, 0xc2, 0x44, 0x3b, 0xe0, 0x2f, 0x04, 0x55, 0xa3, 0x9e, 0xf1, 0x51, 0x11, - 0xcd, 0x99, 0x26, 0x95, 0x72, 0xdd, 0xd5, 0x3a, 0x4a, 0x53, 0xed, 0x70, 0xab, 0xa3, 0x34, 0x55, - 0x75, 0xd8, 0x60, 0x28, 0x5d, 0x60, 0x12, 0x5e, 0x6b, 0x0d, 0x0b, 0xd8, 0x8d, 0x60, 0xdb, 0xab, - 0x26, 0x94, 0xd1, 0x07, 0xcc, 0x26, 0xf5, 0x6e, 0x47, 0xe5, 0xc2, 0x90, 0x4b, 0xd5, 0x2a, 0x6a, - 0x1b, 0x1e, 0xf1, 0x0e, 0x45, 0x93, 0x90, 0x69, 0xd0, 0x02, 0xb6, 0xdc, 0xd2, 0x9d, 0x2a, 0x50, - 0x67, 0x13, 0xa8, 0x33, 0xda, 0x24, 0xde, 0x69, 0xac, 0xd6, 0xa6, 0xf0, 0xf9, 0x97, 0x09, 0x82, - 0x46, 0x75, 0xad, 0xcd, 0xce, 0x3d, 0x28, 0xd7, 0x19, 0x7b, 0x64, 0x4e, 0x37, 0xd9, 0x80, 0xfb, - 0x6c, 0x6c, 0xb8, 0x8f, 0xc9, 0x9c, 0x96, 0x39, 0xca, 0xad, 0xe6, 0x3f, 0xe3, 0xdb, 0x64, 0x19, - 0x94, 0x8e, 0x67, 0x3f, 0x00, 0x94, 0xea, 0xc5, 0x0c, 0x78, 0x38, 0xf6, 0x5f, 0xf9, 0x97, 0xaf, - 0x7d, 0x45, 0xd0, 0x1f, 0xde, 0xdc, 0x5a, 0x27, 0x15, 0x18, 0x93, 0x2b, 0x42, 0xdf, 0x13, 0xce, - 0xbb, 0x41, 0xbf, 0x3d, 0xea, 0xf7, 0x14, 0xb0, 0xcb, 0xbb, 0x29, 0x0a, 0x19, 0x8a, 0x38, 0x0f, - 0xc6, 0xbe, 0xef, 0xf9, 0x2f, 0x15, 0x71, 0x97, 0x07, 0x39, 0x21, 0x98, 0xc4, 0x9c, 0x0f, 0x47, - 0x97, 0x83, 0x41, 0xbf, 0xa7, 0xec, 0xed, 0xf2, 0x21, 0xa3, 0x49, 0x82, 0x22, 0xf5, 0x29, 0x94, - 0x06, 0xed, 0xf1, 0xb0, 0xdf, 0x53, 0xf6, 0x75, 0xe5, 0xe6, 0xd6, 0x3a, 0xae, 0xf0, 0x20, 0xcc, - 0xb3, 0x6a, 0x3a, 0xa7, 0x7c, 0xfa, 0xc1, 0xee, 0x6d, 0x8e, 0x31, 0x89, 0xf5, 0xd3, 0x4f, 0x5f, - 0x0c, 0xe1, 0xdb, 0x57, 0xa3, 0xfe, 0x4d, 0x47, 0x5b, 0xdd, 0x19, 0xc2, 0xcf, 0x3b, 0x43, 0xf8, - 0x58, 0x18, 0x60, 0x55, 0x18, 0xe0, 0x7b, 0x61, 0x80, 0xdf, 0x85, 0x01, 0xde, 0x08, 0x53, 0xa9, - 0x0c, 0xe2, 0xe2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x32, 0xd2, 0x86, 0x50, 0x03, 0x00, - 0x00, -} diff --git a/vendor/github.com/containerd/containerd/cio/io.go b/vendor/github.com/containerd/containerd/cio/io.go index 1f8abf5f0c..c7cf4f0bcb 100644 --- a/vendor/github.com/containerd/containerd/cio/io.go +++ b/vendor/github.com/containerd/containerd/cio/io.go @@ -18,10 +18,13 @@ package cio import ( "context" + "errors" "fmt" "io" + "net/url" "os" "path/filepath" + "strings" "sync" "github.com/containerd/containerd/defaults" @@ -222,46 +225,88 @@ type DirectIO struct { cio } -var _ IO = &DirectIO{} +var ( + _ IO = &DirectIO{} + _ IO = &logURI{} +) + +// LogURI provides the raw logging URI +func LogURI(uri *url.URL) Creator { + return func(_ string) (IO, error) { + return &logURI{ + config: Config{ + Stdout: uri.String(), + Stderr: uri.String(), + }, + }, nil + } +} + +// BinaryIO forwards container STDOUT|STDERR directly to a logging binary +func BinaryIO(binary string, args map[string]string) Creator { + return func(_ string) (IO, error) { + binary = filepath.Clean(binary) + if !strings.HasPrefix(binary, "/") { + return nil, errors.New("absolute path needed") + } + uri := &url.URL{ + Scheme: "binary", + Path: binary, + } + q := uri.Query() + for k, v := range args { + q.Set(k, v) + } + uri.RawQuery = q.Encode() + res := uri.String() + return &logURI{ + config: Config{ + Stdout: res, + Stderr: res, + }, + }, nil + } +} // LogFile creates a file on disk that logs the task's STDOUT,STDERR. // If the log file already exists, the logs will be appended to the file. func LogFile(path string) Creator { return func(_ string) (IO, error) { - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return nil, err + path = filepath.Clean(path) + if !strings.HasPrefix(path, "/") { + return nil, errors.New("absolute path needed") } - f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return nil, err + uri := &url.URL{ + Scheme: "file", + Path: path, } - f.Close() - return &logIO{ + res := uri.String() + return &logURI{ config: Config{ - Stdout: path, - Stderr: path, + Stdout: res, + Stderr: res, }, }, nil } } -type logIO struct { +type logURI struct { config Config } -func (l *logIO) Config() Config { +func (l *logURI) Config() Config { return l.config } -func (l *logIO) Cancel() { +func (l *logURI) Cancel() { } -func (l *logIO) Wait() { +func (l *logURI) Wait() { } -func (l *logIO) Close() error { +func (l *logURI) Close() error { return nil } diff --git a/vendor/github.com/containerd/containerd/cio/io_unix.go b/vendor/github.com/containerd/containerd/cio/io_unix.go index eb2ada80bf..42d320933b 100644 --- a/vendor/github.com/containerd/containerd/cio/io_unix.go +++ b/vendor/github.com/containerd/containerd/cio/io_unix.go @@ -72,17 +72,19 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) { } var wg = &sync.WaitGroup{} - wg.Add(1) - go func() { - p := bufPool.Get().(*[]byte) - defer bufPool.Put(p) - - io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p) - pipes.Stdout.Close() - wg.Done() - }() + if fifos.Stdout != "" { + wg.Add(1) + go func() { + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p) + pipes.Stdout.Close() + wg.Done() + }() + } - if !fifos.Terminal { + if !fifos.Terminal && fifos.Stderr != "" { wg.Add(1) go func() { p := bufPool.Get().(*[]byte) diff --git a/vendor/github.com/containerd/containerd/cio/io_windows.go b/vendor/github.com/containerd/containerd/cio/io_windows.go index 4e5d18231d..ca6cd54a44 100644 --- a/vendor/github.com/containerd/containerd/cio/io_windows.go +++ b/vendor/github.com/containerd/containerd/cio/io_windows.go @@ -31,11 +31,15 @@ const pipeRoot = `\\.\pipe` // NewFIFOSetInDir returns a new set of fifos for the task func NewFIFOSetInDir(_, id string, terminal bool) (*FIFOSet, error) { + stderrPipe := "" + if !terminal { + stderrPipe = fmt.Sprintf(`%s\ctr-%s-stderr`, pipeRoot, id) + } return NewFIFOSet(Config{ Terminal: terminal, Stdin: fmt.Sprintf(`%s\ctr-%s-stdin`, pipeRoot, id), Stdout: fmt.Sprintf(`%s\ctr-%s-stdout`, pipeRoot, id), - Stderr: fmt.Sprintf(`%s\ctr-%s-stderr`, pipeRoot, id), + Stderr: stderrPipe, }, nil), nil } diff --git a/vendor/github.com/containerd/containerd/defaults/defaults.go b/vendor/github.com/containerd/containerd/defaults/defaults.go index 7040f5b85a..6f5b122ecf 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults.go @@ -23,4 +23,10 @@ const ( // DefaultMaxSendMsgSize defines the default maximum message size for // sending protobufs passed over the GRPC API. DefaultMaxSendMsgSize = 16 << 20 + // DefaultRuntimeNSLabel defines the namespace label to check for the + // default runtime + DefaultRuntimeNSLabel = "containerd.io/defaults/runtime" + // DefaultSnapshotterNSLabel defines the namespace label to check for the + // default snapshotter + DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter" ) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go index 30ed42235e..319e8777bf 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go @@ -32,4 +32,6 @@ const ( // DefaultFIFODir is the default location used by client-side cio library // to store FIFOs. DefaultFIFODir = "/run/containerd/fifo" + // DefaultRuntime is the default linux runtime + DefaultRuntime = "io.containerd.runc.v2" ) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go index 983bf762f7..5eede8de83 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go @@ -26,10 +26,10 @@ import ( var ( // DefaultRootDir is the default location used by containerd to store // persistent data - DefaultRootDir = filepath.Join(os.Getenv("programfiles"), "containerd", "root") + DefaultRootDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "root") // DefaultStateDir is the default location used by containerd to store // transient data - DefaultStateDir = filepath.Join(os.Getenv("programfiles"), "containerd", "state") + DefaultStateDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "state") ) const ( @@ -40,4 +40,6 @@ const ( // DefaultFIFODir is the default location used by client-side cio library // to store FIFOs. Unused on Windows. DefaultFIFODir = "" + // DefaultRuntime is the default windows runtime + DefaultRuntime = "io.containerd.runhcs.v1" ) diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go index 40427fc5a5..b5200afc0e 100644 --- a/vendor/github.com/containerd/containerd/errdefs/errors.go +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -26,7 +26,11 @@ // client-side errors to the correct types. package errdefs -import "github.com/pkg/errors" +import ( + "context" + + "github.com/pkg/errors" +) // Definitions of common error types used throughout containerd. All containerd // errors returned by most packages will map into one of these errors classes. @@ -76,3 +80,14 @@ func IsUnavailable(err error) bool { func IsNotImplemented(err error) bool { return errors.Cause(err) == ErrNotImplemented } + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Cause(err) == context.Canceled +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Cause(err) == context.DeadlineExceeded +} diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go index 4eab03ab86..209f63bd0f 100644 --- a/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -17,6 +17,7 @@ package errdefs import ( + "context" "strings" "github.com/pkg/errors" @@ -55,6 +56,10 @@ func ToGRPC(err error) error { return status.Errorf(codes.Unavailable, err.Error()) case IsNotImplemented(err): return status.Errorf(codes.Unimplemented, err.Error()) + case IsCanceled(err): + return status.Errorf(codes.Canceled, err.Error()) + case IsDeadlineExceeded(err): + return status.Errorf(codes.DeadlineExceeded, err.Error()) } return err @@ -89,13 +94,17 @@ func FromGRPC(err error) error { cls = ErrFailedPrecondition case codes.Unimplemented: cls = ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded default: cls = ErrUnknown } msg := rebaseMessage(cls, err) if msg != "" { - err = errors.Wrapf(cls, msg) + err = errors.Wrap(cls, msg) } else { err = errors.WithStack(cls) } diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go index 3fab96b858..31f1a3ac09 100644 --- a/vendor/github.com/containerd/containerd/log/context.go +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -30,7 +30,7 @@ var ( // messages. G = GetLogger - // L is an alias for the the standard logger. + // L is an alias for the standard logger. L = logrus.NewEntry(logrus.StandardLogger()) ) diff --git a/vendor/github.com/containerd/containerd/namespaces/context.go b/vendor/github.com/containerd/containerd/namespaces/context.go index cc5621a68f..20596f09dd 100644 --- a/vendor/github.com/containerd/containerd/namespaces/context.go +++ b/vendor/github.com/containerd/containerd/namespaces/context.go @@ -36,10 +36,9 @@ type namespaceKey struct{} // WithNamespace sets a given namespace on the context func WithNamespace(ctx context.Context, namespace string) context.Context { ctx = context.WithValue(ctx, namespaceKey{}, namespace) // set our key for namespace - - // also store on the grpc headers so it gets picked up by any clients that + // also store on the grpc and ttrpc headers so it gets picked up by any clients that // are using this. - return withGRPCNamespaceHeader(ctx, namespace) + return withTTRPCNamespaceHeader(withGRPCNamespaceHeader(ctx, namespace), namespace) } // NamespaceFromEnv uses the namespace defined in CONTAINERD_NAMESPACE or @@ -58,22 +57,21 @@ func NamespaceFromEnv(ctx context.Context) context.Context { func Namespace(ctx context.Context) (string, bool) { namespace, ok := ctx.Value(namespaceKey{}).(string) if !ok { - return fromGRPCHeader(ctx) + if namespace, ok = fromGRPCHeader(ctx); !ok { + return fromTTRPCHeader(ctx) + } } - return namespace, ok } -// NamespaceRequired returns the valid namepace from the context or an error. +// NamespaceRequired returns the valid namespace from the context or an error. func NamespaceRequired(ctx context.Context) (string, error) { namespace, ok := Namespace(ctx) if !ok || namespace == "" { return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace is required") } - if err := Validate(namespace); err != nil { return "", errors.Wrap(err, "namespace validation") } - return namespace, nil } diff --git a/vendor/github.com/containerd/containerd/namespaces/store.go b/vendor/github.com/containerd/containerd/namespaces/store.go index 0b5c985691..5936772cb4 100644 --- a/vendor/github.com/containerd/containerd/namespaces/store.go +++ b/vendor/github.com/containerd/containerd/namespaces/store.go @@ -33,5 +33,14 @@ type Store interface { List(ctx context.Context) ([]string, error) // Delete removes the namespace. The namespace must be empty to be deleted. - Delete(ctx context.Context, namespace string) error + Delete(ctx context.Context, namespace string, opts ...DeleteOpts) error } + +// DeleteInfo specifies information for the deletion of a namespace +type DeleteInfo struct { + // Name of the namespace + Name string +} + +// DeleteOpts allows the caller to set options for namespace deletion +type DeleteOpts func(context.Context, *DeleteInfo) error diff --git a/vendor/github.com/containerd/containerd/namespaces/ttrpc.go b/vendor/github.com/containerd/containerd/namespaces/ttrpc.go new file mode 100644 index 0000000000..bcd2643cf5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/namespaces/ttrpc.go @@ -0,0 +1,51 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package namespaces + +import ( + "context" + + "github.com/containerd/ttrpc" +) + +const ( + // TTRPCHeader defines the header name for specifying a containerd namespace + TTRPCHeader = "containerd-namespace-ttrpc" +) + +func copyMetadata(src ttrpc.MD) ttrpc.MD { + md := ttrpc.MD{} + for k, v := range src { + md[k] = append(md[k], v...) + } + return md +} + +func withTTRPCNamespaceHeader(ctx context.Context, namespace string) context.Context { + md, ok := ttrpc.GetMetadata(ctx) + if !ok { + md = ttrpc.MD{} + } else { + md = copyMetadata(md) + } + md.Set(TTRPCHeader, namespace) + return ttrpc.WithMetadata(ctx, md) +} + +func fromTTRPCHeader(ctx context.Context) (string, bool) { + return ttrpc.GetMetadataValue(ctx, TTRPCHeader) +} diff --git a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go new file mode 100644 index 0000000000..ba3d51d510 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go @@ -0,0 +1,105 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpcutil + +import ( + "sync" + "time" + + v1 "github.com/containerd/containerd/api/services/ttrpc/events/v1" + "github.com/containerd/ttrpc" + "github.com/pkg/errors" +) + +const ttrpcDialTimeout = 5 * time.Second + +type ttrpcConnector func() (*ttrpc.Client, error) + +// Client is the client to interact with TTRPC part of containerd server (plugins, events) +type Client struct { + mu sync.Mutex + connector ttrpcConnector + client *ttrpc.Client + closed bool +} + +// NewClient returns a new containerd TTRPC client that is connected to the containerd instance provided by address +func NewClient(address string, opts ...ttrpc.ClientOpts) (*Client, error) { + connector := func() (*ttrpc.Client, error) { + conn, err := ttrpcDial(address, ttrpcDialTimeout) + if err != nil { + return nil, errors.Wrap(err, "failed to connect") + } + + client := ttrpc.NewClient(conn, opts...) + return client, nil + } + + client, err := connector() + if err != nil { + return nil, err + } + + return &Client{ + connector: connector, + client: client, + }, nil +} + +// Reconnect re-establishes the TTRPC connection to the containerd daemon +func (c *Client) Reconnect() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.connector == nil { + return errors.New("unable to reconnect to containerd, no connector available") + } + + if c.closed { + return errors.New("client is closed") + } + + client, err := c.connector() + if err != nil { + return err + } + + c.client = client + return nil +} + +// EventsService creates an EventsService client +func (c *Client) EventsService() v1.EventsService { + return v1.NewEventsClient(c.Client()) +} + +// Client returns the underlying TTRPC client object +func (c *Client) Client() *ttrpc.Client { + c.mu.Lock() + defer c.mu.Unlock() + + return c.client +} + +// Close closes the clients TTRPC connection to containerd +func (c *Client) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + c.closed = true + return c.client.Close() +} diff --git a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_unix.go b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_unix.go new file mode 100644 index 0000000000..16fb64954d --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_unix.go @@ -0,0 +1,30 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpcutil + +import ( + "net" + "strings" + "time" +) + +func ttrpcDial(address string, timeout time.Duration) (net.Conn, error) { + address = strings.TrimPrefix(address, "unix://") + return net.DialTimeout("unix", address, timeout) +} diff --git a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_windows.go b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_windows.go new file mode 100644 index 0000000000..b8c69ef0ad --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client_windows.go @@ -0,0 +1,59 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpcutil + +import ( + "context" + "net" + "os" + "time" + + winio "github.com/Microsoft/go-winio" + "github.com/pkg/errors" +) + +func ttrpcDial(address string, timeout time.Duration) (net.Conn, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // If there is nobody serving the pipe we limit the timeout for this case to + // 5 seconds because any shim that would serve this endpoint should serve it + // within 5 seconds. + serveTimer := time.NewTimer(5 * time.Second) + defer serveTimer.Stop() + for { + c, err := winio.DialPipeContext(ctx, address) + if err != nil { + if os.IsNotExist(err) { + select { + case <-serveTimer.C: + return nil, errors.Wrap(os.ErrNotExist, "pipe not found before timeout") + default: + // Wait 10ms for the shim to serve and try again. + time.Sleep(10 * time.Millisecond) + continue + } + } else if err == context.DeadlineExceeded { + return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address) + } + return nil, err + } + return c, nil + } +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/publisher.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/publisher.go new file mode 100644 index 0000000000..3dbd0e045b --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/publisher.go @@ -0,0 +1,150 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import ( + "context" + "sync" + "time" + + v1 "github.com/containerd/containerd/api/services/ttrpc/events/v1" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/ttrpcutil" + "github.com/containerd/ttrpc" + "github.com/containerd/typeurl" + "github.com/sirupsen/logrus" +) + +const ( + queueSize = 2048 + maxRequeue = 5 +) + +type item struct { + ev *v1.Envelope + ctx context.Context + count int +} + +func NewPublisher(address string) (*RemoteEventsPublisher, error) { + client, err := ttrpcutil.NewClient(address) + if err != nil { + return nil, err + } + + l := &RemoteEventsPublisher{ + client: client, + closed: make(chan struct{}), + requeue: make(chan *item, queueSize), + } + + go l.processQueue() + return l, nil +} + +type RemoteEventsPublisher struct { + client *ttrpcutil.Client + closed chan struct{} + closer sync.Once + requeue chan *item +} + +func (l *RemoteEventsPublisher) Done() <-chan struct{} { + return l.closed +} + +func (l *RemoteEventsPublisher) Close() (err error) { + err = l.client.Close() + l.closer.Do(func() { + close(l.closed) + }) + return err +} + +func (l *RemoteEventsPublisher) processQueue() { + for i := range l.requeue { + if i.count > maxRequeue { + logrus.Errorf("evicting %s from queue because of retry count", i.ev.Topic) + // drop the event + continue + } + + if err := l.forwardRequest(i.ctx, &v1.ForwardRequest{Envelope: i.ev}); err != nil { + logrus.WithError(err).Error("forward event") + l.queue(i) + } + } +} + +func (l *RemoteEventsPublisher) queue(i *item) { + go func() { + i.count++ + // re-queue after a short delay + time.Sleep(time.Duration(1*i.count) * time.Second) + l.requeue <- i + }() +} + +func (l *RemoteEventsPublisher) Publish(ctx context.Context, topic string, event events.Event) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + any, err := typeurl.MarshalAny(event) + if err != nil { + return err + } + i := &item{ + ev: &v1.Envelope{ + Timestamp: time.Now(), + Namespace: ns, + Topic: topic, + Event: any, + }, + ctx: ctx, + } + + if err := l.forwardRequest(i.ctx, &v1.ForwardRequest{Envelope: i.ev}); err != nil { + l.queue(i) + return err + } + + return nil +} + +func (l *RemoteEventsPublisher) forwardRequest(ctx context.Context, req *v1.ForwardRequest) error { + _, err := l.client.EventsService().Forward(ctx, req) + if err == nil { + return nil + } + + if err != ttrpc.ErrClosed { + return err + } + + // Reconnect and retry request + if err := l.client.Reconnect(); err != nil { + return err + } + + if _, err := l.client.EventsService().Forward(ctx, req); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go index 39484c1912..d540aa87e0 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go @@ -20,6 +20,7 @@ import ( "context" "flag" "fmt" + "io" "os" "runtime" "runtime/debug" @@ -43,14 +44,20 @@ type Client struct { signals chan os.Signal } +// Publisher for events +type Publisher interface { + events.Publisher + io.Closer +} + // Init func for the creation of a shim server -type Init func(context.Context, string, events.Publisher) (Shim, error) +type Init func(context.Context, string, Publisher, func()) (Shim, error) // Shim server interface type Shim interface { shimapi.TaskService Cleanup(ctx context.Context) (*shimapi.DeleteResponse, error) - StartShim(ctx context.Context, id, containerdBinary, containerdAddress string) (string, error) + StartShim(ctx context.Context, id, containerdBinary, containerdAddress, containerdTTRPCAddress string) (string, error) } // OptsKey is the context key for the Opts value. @@ -62,6 +69,19 @@ type Opts struct { Debug bool } +// BinaryOpts allows the configuration of a shims binary setup +type BinaryOpts func(*Config) + +// Config of shim binary options provided by shim implementations +type Config struct { + // NoSubreaper disables setting the shim as a child subreaper + NoSubreaper bool + // NoReaper disables the shim binary from reaping any child process implicitly + NoReaper bool + // NoSetupLogger disables automatic configuration of logrus to use the shim FIFO + NoSetupLogger bool +} + var ( debugFlag bool idFlag string @@ -73,6 +93,10 @@ var ( action string ) +const ( + ttrpcAddressEnv = "TTRPC_ADDRESS" +) + func parseFlags() { flag.BoolVar(&debugFlag, "debug", false, "enable debug output in logs") flag.StringVar(&namespaceFlag, "namespace", "", "namespace that owns the shim") @@ -118,36 +142,49 @@ func setLogger(ctx context.Context, id string) error { } // Run initializes and runs a shim server -func Run(id string, initFunc Init) { - if err := run(id, initFunc); err != nil { +func Run(id string, initFunc Init, opts ...BinaryOpts) { + var config Config + for _, o := range opts { + o(&config) + } + if err := run(id, initFunc, config); err != nil { fmt.Fprintf(os.Stderr, "%s: %s\n", id, err) os.Exit(1) } } -func run(id string, initFunc Init) error { +func run(id string, initFunc Init, config Config) error { parseFlags() setRuntime() - signals, err := setupSignals() + signals, err := setupSignals(config) if err != nil { return err } - if err := subreaper(); err != nil { - return err + if !config.NoSubreaper { + if err := subreaper(); err != nil { + return err + } } - publisher := &remoteEventsPublisher{ - address: addressFlag, - containerdBinaryPath: containerdBinaryFlag, + + ttrpcAddress := os.Getenv(ttrpcAddressEnv) + + publisher, err := NewPublisher(ttrpcAddress) + if err != nil { + return err } + + defer publisher.Close() + if namespaceFlag == "" { return fmt.Errorf("shim namespace cannot be empty") } ctx := namespaces.WithNamespace(context.Background(), namespaceFlag) ctx = context.WithValue(ctx, OptsKey{}, Opts{BundlePath: bundlePath, Debug: debugFlag}) ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", id)) + ctx, cancel := context.WithCancel(ctx) - service, err := initFunc(ctx, idFlag, publisher) + service, err := initFunc(ctx, idFlag, publisher, cancel) if err != nil { return err } @@ -157,7 +194,7 @@ func run(id string, initFunc Init) error { "pid": os.Getpid(), "namespace": namespaceFlag, }) - go handleSignals(logger, signals) + go handleSignals(ctx, logger, signals) response, err := service.Cleanup(ctx) if err != nil { return err @@ -171,7 +208,7 @@ func run(id string, initFunc Init) error { } return nil case "start": - address, err := service.StartShim(ctx, idFlag, containerdBinaryFlag, addressFlag) + address, err := service.StartShim(ctx, idFlag, containerdBinaryFlag, addressFlag, ttrpcAddress) if err != nil { return err } @@ -180,11 +217,23 @@ func run(id string, initFunc Init) error { } return nil default: - if err := setLogger(ctx, idFlag); err != nil { - return err + if !config.NoSetupLogger { + if err := setLogger(ctx, idFlag); err != nil { + return err + } } client := NewShimClient(ctx, service, signals) - return client.Serve() + if err := client.Serve(); err != nil { + if err != context.Canceled { + return err + } + } + select { + case <-publisher.Done(): + return nil + case <-time.After(5 * time.Second): + return errors.New("publisher not closed") + } } } @@ -228,7 +277,7 @@ func (s *Client) Serve() error { dumpStacks(logger) } }() - return handleSignals(logger, s.signals) + return handleSignals(s.context, logger, s.signals) } // serve serves the ttrpc API over a unix socket at the provided path @@ -262,8 +311,3 @@ func dumpStacks(logger *logrus.Entry) { buf = buf[:stackSize] logger.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) } - -type remoteEventsPublisher struct { - address string - containerdBinaryPath string -} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go new file mode 100644 index 0000000000..4bc636280d --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go @@ -0,0 +1,29 @@ +// +build freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import "github.com/containerd/ttrpc" + +func newServer() (*ttrpc.Server, error) { + return ttrpc.NewServer() +} + +func subreaper() error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go index 1a54821637..e6dc3e02fc 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go @@ -19,20 +19,15 @@ package shim import ( - "bytes" "context" "io" "net" "os" - "os/exec" "os/signal" "syscall" - "github.com/containerd/containerd/events" - "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/sys/reaper" "github.com/containerd/fifo" - "github.com/containerd/typeurl" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -40,9 +35,13 @@ import ( // setupSignals creates a new signal handler for all signals and sets the shim as a // sub-reaper so that the container processes are reparented -func setupSignals() (chan os.Signal, error) { +func setupSignals(config Config) (chan os.Signal, error) { signals := make(chan os.Signal, 32) - signal.Notify(signals, unix.SIGTERM, unix.SIGINT, unix.SIGCHLD, unix.SIGPIPE) + smp := []os.Signal{unix.SIGTERM, unix.SIGINT, unix.SIGPIPE} + if !config.NoReaper { + smp = append(smp, unix.SIGCHLD) + } + signal.Notify(signals, smp...) return signals, nil } @@ -71,11 +70,14 @@ func serveListener(path string) (net.Listener, error) { return l, nil } -func handleSignals(logger *logrus.Entry, signals chan os.Signal) error { +func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { logger.Info("starting signal loop") for { - for s := range signals { + select { + case <-ctx.Done(): + return ctx.Err() + case s := <-signals: switch s { case unix.SIGCHLD: if err := reaper.Reap(); err != nil { @@ -90,29 +92,3 @@ func handleSignals(logger *logrus.Entry, signals chan os.Signal) error { func openLog(ctx context.Context, _ string) (io.Writer, error) { return fifo.OpenFifo(ctx, "log", unix.O_WRONLY, 0700) } - -func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event events.Event) error { - ns, _ := namespaces.Namespace(ctx) - encoded, err := typeurl.MarshalAny(event) - if err != nil { - return err - } - data, err := encoded.Marshal() - if err != nil { - return err - } - cmd := exec.CommandContext(ctx, l.containerdBinaryPath, "--address", l.address, "publish", "--topic", topic, "--namespace", ns) - cmd.Stdin = bytes.NewReader(data) - c, err := reaper.Default.Start(cmd) - if err != nil { - return err - } - status, err := reaper.Default.Wait(cmd, c) - if err != nil { - return err - } - if status != 0 { - return errors.New("failed to publish event") - } - return nil -} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go index 614e62974f..7339eb2a2e 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go @@ -19,283 +19,39 @@ package shim import ( - "bytes" "context" - "fmt" "io" "net" "os" - "os/exec" - "sync" - "unsafe" - winio "github.com/Microsoft/go-winio" - "github.com/containerd/containerd/events" - "github.com/containerd/containerd/namespaces" "github.com/containerd/ttrpc" - "github.com/containerd/typeurl" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" ) -// setupSignals creates a new signal handler for all signals -func setupSignals() (chan os.Signal, error) { - signals := make(chan os.Signal, 32) - return signals, nil +func setupSignals(config Config) (chan os.Signal, error) { + return nil, errors.New("not supported") } func newServer() (*ttrpc.Server, error) { - return ttrpc.NewServer() + return nil, errors.New("not supported") } func subreaper() error { - return nil -} - -type fakeSignal struct { -} - -func (fs *fakeSignal) String() string { - return "" -} - -func (fs *fakeSignal) Signal() { + return errors.New("not supported") } func setupDumpStacks(dump chan<- os.Signal) { - // Windows does not support signals like *nix systems. So instead of - // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be - // signaled. ACL'd to builtin administrators and local system - event := "Global\\containerd-shim-runhcs-v1-" + fmt.Sprint(os.Getpid()) - ev, _ := windows.UTF16PtrFromString(event) - sd, err := winio.SddlToSecurityDescriptor("D:P(A;;GA;;;BA)(A;;GA;;;SY)") - if err != nil { - logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", event, err.Error()) - return - } - var sa windows.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) - h, err := windows.CreateEvent(&sa, 0, 0, ev) - if h == 0 || err != nil { - logrus.Errorf("failed to create debug stackdump event %s: %s", event, err.Error()) - return - } - go func() { - logrus.Debugf("Stackdump - waiting signal at %s", event) - for { - windows.WaitForSingleObject(h, windows.INFINITE) - dump <- new(fakeSignal) - } - }() } -// serve serves the ttrpc API over a unix socket at the provided path -// this function does not block func serveListener(path string) (net.Listener, error) { - if path == "" { - return nil, errors.New("'socket' must be npipe path") - } - l, err := winio.ListenPipe(path, nil) - if err != nil { - return nil, err - } - logrus.WithField("socket", path).Debug("serving api on npipe socket") - return l, nil -} - -func handleSignals(logger *logrus.Entry, signals chan os.Signal) error { - logger.Info("starting signal loop") - - for { - for s := range signals { - switch s { - case os.Interrupt: - } - } - } -} - -var _ = (io.WriterTo)(&blockingBuffer{}) -var _ = (io.Writer)(&blockingBuffer{}) - -// blockingBuffer implements the `io.Writer` and `io.WriterTo` interfaces. Once -// `capacity` is reached the calls to `Write` will block until a successful call -// to `WriterTo` frees up the buffer space. -// -// Note: This has the same threadding semantics as bytes.Buffer with no -// additional locking so multithreading is not supported. -type blockingBuffer struct { - c *sync.Cond - - capacity int - - buffer bytes.Buffer -} - -func newBlockingBuffer(capacity int) *blockingBuffer { - return &blockingBuffer{ - c: sync.NewCond(&sync.Mutex{}), - capacity: capacity, - } -} - -func (bb *blockingBuffer) Len() int { - bb.c.L.Lock() - defer bb.c.L.Unlock() - return bb.buffer.Len() -} - -func (bb *blockingBuffer) Write(p []byte) (int, error) { - if len(p) > bb.capacity { - return 0, errors.Errorf("len(p) (%d) too large for capacity (%d)", len(p), bb.capacity) - } - - bb.c.L.Lock() - for bb.buffer.Len()+len(p) > bb.capacity { - bb.c.Wait() - } - defer bb.c.L.Unlock() - return bb.buffer.Write(p) + return nil, errors.New("not supported") } -func (bb *blockingBuffer) WriteTo(w io.Writer) (int64, error) { - bb.c.L.Lock() - defer bb.c.L.Unlock() - defer bb.c.Signal() - return bb.buffer.WriteTo(w) -} - -// deferredShimWriteLogger exists to solve the upstream loggin issue presented -// by using Windows Named Pipes for logging. When containerd restarts it tries -// to reconnect to any shims. This means that the connection to the logger will -// be severed but when containerd starts up it should reconnect and start -// logging again. We abstract all of this logic behind what looks like a simple -// `io.Writer` that can reconnect in the lifetime and buffers logs while -// disconnected. -type deferredShimWriteLogger struct { - mu sync.Mutex - - ctx context.Context - - connected bool - aborted bool - - buffer *blockingBuffer - - l net.Listener - c net.Conn - conerr error -} - -// beginAccept issues an accept to wait for a connection. Once a connection -// occurs drains any outstanding buffer. While draining the buffer any writes -// are blocked. If the buffer fails to fully drain due to a connection drop a -// call to `beginAccept` is re-issued waiting for another connection from -// containerd. -func (dswl *deferredShimWriteLogger) beginAccept() { - dswl.mu.Lock() - if dswl.connected { - return - } - dswl.mu.Unlock() - - c, err := dswl.l.Accept() - if err == winio.ErrPipeListenerClosed { - dswl.mu.Lock() - dswl.aborted = true - dswl.l.Close() - dswl.conerr = errors.New("connection closed") - dswl.mu.Unlock() - return - } - dswl.mu.Lock() - dswl.connected = true - dswl.c = c - - // Drain the buffer - if dswl.buffer.Len() > 0 { - _, err := dswl.buffer.WriteTo(dswl.c) - if err != nil { - // We lost our connection draining the buffer. - dswl.connected = false - dswl.c.Close() - go dswl.beginAccept() - } - } - dswl.mu.Unlock() -} - -func (dswl *deferredShimWriteLogger) Write(p []byte) (int, error) { - dswl.mu.Lock() - defer dswl.mu.Unlock() - - if dswl.aborted { - return 0, dswl.conerr - } - - if dswl.connected { - // We have a connection. beginAccept would have drained the buffer so we just write our data to - // the connection directly. - written, err := dswl.c.Write(p) - if err != nil { - // We lost the connection. - dswl.connected = false - dswl.c.Close() - go dswl.beginAccept() - - // We weren't able to write the full `p` bytes. Buffer the rest - if written != len(p) { - w, err := dswl.buffer.Write(p[written:]) - if err != nil { - // We failed to buffer. Return this error - return written + w, err - } - written += w - } - } - - return written, nil - } - - // We are disconnected. Buffer the contents. - return dswl.buffer.Write(p) -} - -// openLog on Windows acts as the server of the log pipe. This allows the -// containerd daemon to independently restart and reconnect to the logs. -func openLog(ctx context.Context, id string) (io.Writer, error) { - ns, err := namespaces.NamespaceRequired(ctx) - if err != nil { - return nil, err - } - - dswl := &deferredShimWriteLogger{ - ctx: ctx, - buffer: newBlockingBuffer(64 * 1024), // 64KB, - } - l, err := winio.ListenPipe(fmt.Sprintf("\\\\.\\pipe\\containerd-shim-%s-%s-log", ns, id), nil) - if err != nil { - return nil, err - } - dswl.l = l - go dswl.beginAccept() - return dswl, nil +func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { + return errors.New("not supported") } -func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event events.Event) error { - ns, _ := namespaces.Namespace(ctx) - encoded, err := typeurl.MarshalAny(event) - if err != nil { - return err - } - data, err := encoded.Marshal() - if err != nil { - return err - } - cmd := exec.CommandContext(ctx, l.containerdBinaryPath, "--address", l.address, "publish", "--topic", topic, "--namespace", ns) - cmd.Stdin = bytes.NewReader(data) - return cmd.Run() +func openLog(ctx context.Context, _ string) (io.Writer, error) { + return nil, errors.New("not supported") } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go index b7034ce50b..c8efd0dac8 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go @@ -17,23 +17,28 @@ package shim import ( + "bytes" "context" "fmt" + "io/ioutil" "net" "os" "os/exec" "path/filepath" "strings" + "sync" "time" "github.com/containerd/containerd/namespaces" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" "github.com/pkg/errors" ) -const shimBinaryFormat = "containerd-shim-%s-%s" +var runtimePaths sync.Map // Command returns the shim command with the provided args and configuration -func Command(ctx context.Context, runtime, containerdAddress, path string, cmdArgs ...string) (*exec.Cmd, error) { +func Command(ctx context.Context, runtime, containerdAddress, containerdTTRPCAddress, path string, opts *types.Any, cmdArgs ...string) (*exec.Cmd, error) { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { return nil, err @@ -52,23 +57,57 @@ func Command(ctx context.Context, runtime, containerdAddress, path string, cmdAr if name == "" { return nil, fmt.Errorf("invalid runtime name %s, correct runtime name should format like io.containerd.runc.v1", runtime) } + var cmdPath string - var lerr error - if cmdPath, lerr = exec.LookPath(name); lerr != nil { - if eerr, ok := lerr.(*exec.Error); ok { - if eerr.Err == exec.ErrNotFound { - return nil, errors.Wrapf(os.ErrNotExist, "runtime %q binary not installed %q", runtime, name) + cmdPathI, cmdPathFound := runtimePaths.Load(name) + if cmdPathFound { + cmdPath = cmdPathI.(string) + } else { + var lerr error + if cmdPath, lerr = exec.LookPath(name); lerr != nil { + if eerr, ok := lerr.(*exec.Error); ok { + if eerr.Err == exec.ErrNotFound { + // LookPath only finds current directory matches based on + // the callers current directory but the caller is not + // likely in the same directory as the containerd + // executables. Instead match the calling binaries path + // (containerd) and see if they are side by side. If so + // execute the shim found there. + testPath := filepath.Join(filepath.Dir(self), name) + if _, serr := os.Stat(testPath); serr == nil { + cmdPath = testPath + } + if cmdPath == "" { + return nil, errors.Wrapf(os.ErrNotExist, "runtime %q binary not installed %q", runtime, name) + } + } } } + cmdPath, err = filepath.Abs(cmdPath) + if err != nil { + return nil, err + } + if cmdPathI, cmdPathFound = runtimePaths.LoadOrStore(name, cmdPath); cmdPathFound { + // We didn't store cmdPath we loaded an already cached value. Use it. + cmdPath = cmdPathI.(string) + } } - cmdPath, err = filepath.Abs(cmdPath) - if err != nil { - return nil, err - } + cmd := exec.Command(cmdPath, args...) cmd.Dir = path - cmd.Env = append(os.Environ(), "GOMAXPROCS=2") + cmd.Env = append( + os.Environ(), + "GOMAXPROCS=2", + fmt.Sprintf("%s=%s", ttrpcAddressEnv, containerdTTRPCAddress), + ) cmd.SysProcAttr = getSysProcAttr() + if opts != nil { + d, err := proto.Marshal(opts) + if err != nil { + return nil, err + } + cmd.Stdin = bytes.NewReader(d) + } return cmd, nil } @@ -126,3 +165,22 @@ func WriteAddress(path, address string) error { } return os.Rename(tempPath, path) } + +// ErrNoAddress is returned when the address file has no content +var ErrNoAddress = errors.New("no shim address") + +// ReadAddress returns the shim's abstract socket address from the path +func ReadAddress(path string) (string, error) { + path, err := filepath.Abs(path) + if err != nil { + return "", err + } + data, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + if len(data) == 0 { + return "", ErrNoAddress + } + return string(data), nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go index 262fe2b363..7ca65033f9 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go @@ -20,7 +20,10 @@ package shim import ( "context" + "crypto/sha256" + "fmt" "net" + "os" "path/filepath" "strings" "syscall" @@ -31,6 +34,8 @@ import ( "github.com/pkg/errors" ) +const shimBinaryFormat = "containerd-shim-%s-%s" + func getSysProcAttr() *syscall.SysProcAttr { return &syscall.SysProcAttr{ Setpgid: true, @@ -42,13 +47,29 @@ func SetScore(pid int) error { return sys.SetOOMScore(pid, sys.OOMScoreMaxKillable) } +// AdjustOOMScore sets the OOM score for the process to the parents OOM score +1 +// to ensure that they parent has a lower* score than the shim +func AdjustOOMScore(pid int) error { + parent := os.Getppid() + score, err := sys.GetOOMScoreAdj(parent) + if err != nil { + return errors.Wrap(err, "get parent OOM score") + } + shimScore := score + 1 + if err := sys.SetOOMScore(pid, shimScore); err != nil { + return errors.Wrap(err, "set shim OOM score") + } + return nil +} + // SocketAddress returns an abstract socket address func SocketAddress(ctx context.Context, id string) (string, error) { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { return "", err } - return filepath.Join(string(filepath.Separator), "containerd-shim", ns, id, "shim.sock"), nil + d := sha256.Sum256([]byte(filepath.Join(ns, id))) + return filepath.Join(string(filepath.Separator), "containerd-shim", fmt.Sprintf("%x.sock", d)), nil } // AnonDialer returns a dialer for an abstract socket @@ -57,6 +78,10 @@ func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout("unix", "\x00"+address, timeout) } +func AnonReconnectDialer(address string, timeout time.Duration) (net.Conn, error) { + return AnonDialer(address, timeout) +} + // NewSocket returns a new socket func NewSocket(address string) (*net.UnixListener, error) { if len(address) > 106 { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go index 594a0f75b3..a94cdf250e 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go @@ -18,73 +18,64 @@ package shim import ( "context" - "fmt" "net" "os" "syscall" "time" winio "github.com/Microsoft/go-winio" - "github.com/containerd/containerd/namespaces" "github.com/pkg/errors" ) +const shimBinaryFormat = "containerd-shim-%s-%s.exe" + func getSysProcAttr() *syscall.SysProcAttr { return nil } -// SetScore sets the oom score for a process -func SetScore(pid int) error { - return nil -} +// AnonReconnectDialer returns a dialer for an existing npipe on containerd reconnection +func AnonReconnectDialer(address string, timeout time.Duration) (net.Conn, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() -// SocketAddress returns a npipe address -func SocketAddress(ctx context.Context, id string) (string, error) { - ns, err := namespaces.NamespaceRequired(ctx) - if err != nil { - return "", err + c, err := winio.DialPipeContext(ctx, address) + if os.IsNotExist(err) { + return nil, errors.Wrap(os.ErrNotExist, "npipe not found on reconnect") + } else if err == context.DeadlineExceeded { + return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address) + } else if err != nil { + return nil, err } - return fmt.Sprintf("\\\\.\\pipe\\containerd-shim-%s-%s-pipe", ns, id), nil + return c, nil } // AnonDialer returns a dialer for a npipe func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { - var c net.Conn - var lastError error - timedOutError := errors.Errorf("timed out waiting for npipe %s", address) - start := time.Now() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // If there is nobody serving the pipe we limit the timeout for this case to + // 5 seconds because any shim that would serve this endpoint should serve it + // within 5 seconds. + serveTimer := time.NewTimer(5 * time.Second) + defer serveTimer.Stop() for { - remaining := timeout - time.Since(start) - if remaining <= 0 { - lastError = timedOutError - break - } - c, lastError = winio.DialPipe(address, &remaining) - if lastError == nil { - break + c, err := winio.DialPipeContext(ctx, address) + if err != nil { + if os.IsNotExist(err) { + select { + case <-serveTimer.C: + return nil, errors.Wrap(os.ErrNotExist, "pipe not found before timeout") + default: + // Wait 10ms for the shim to serve and try again. + time.Sleep(10 * time.Millisecond) + continue + } + } else if err == context.DeadlineExceeded { + return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address) + } + return nil, err } - if !os.IsNotExist(lastError) { - break - } - // There is nobody serving the pipe. We limit the timeout for this case - // to 5 seconds because any shim that would serve this endpoint should - // serve it within 5 seconds. We use the passed in timeout for the - // `DialPipe` timeout if the pipe exists however to give the pipe time - // to `Accept` the connection. - if time.Since(start) >= 5*time.Second { - lastError = timedOutError - break - } - time.Sleep(10 * time.Millisecond) - } - return c, lastError -} - -// NewSocket returns a new npipe listener -func NewSocket(address string) (net.Listener, error) { - l, err := winio.ListenPipe(address, nil) - if err != nil { - return nil, errors.Wrapf(err, "failed to listen to npipe %s", address) + return c, nil } - return l, nil } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go b/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go index 9caefd7580..7ba9e677aa 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go @@ -1,64 +1,23 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/runtime/v2/task/shim.proto -/* - Package task is a generated protocol buffer package. - - It is generated from these files: - github.com/containerd/containerd/runtime/v2/task/shim.proto - - It has these top-level messages: - CreateTaskRequest - CreateTaskResponse - DeleteRequest - DeleteResponse - ExecProcessRequest - ExecProcessResponse - ResizePtyRequest - StateRequest - StateResponse - KillRequest - CloseIORequest - PidsRequest - PidsResponse - CheckpointTaskRequest - UpdateTaskRequest - StartRequest - StartResponse - WaitRequest - WaitResponse - StatsRequest - StatsResponse - ConnectRequest - ConnectResponse - ShutdownRequest - PauseRequest - ResumeRequest -*/ package task -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/gogo/protobuf/types" -import google_protobuf1 "github.com/gogo/protobuf/types" - -// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" -import _ "github.com/gogo/protobuf/types" -import containerd_types "github.com/containerd/containerd/api/types" -import containerd_v1_types "github.com/containerd/containerd/api/types/task" - -import time "time" - -import types "github.com/gogo/protobuf/types" - -import strings "strings" -import reflect "reflect" - -import context "context" -import ttrpc "github.com/containerd/ttrpc" - -import io "io" +import ( + context "context" + fmt "fmt" + types "github.com/containerd/containerd/api/types" + task "github.com/containerd/containerd/api/types/task" + github_com_containerd_ttrpc "github.com/containerd/ttrpc" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + types1 "github.com/gogo/protobuf/types" + io "io" + math "math" + reflect "reflect" + strings "strings" + time "time" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -73,256 +32,1063 @@ var _ = time.Kitchen const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type CreateTaskRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` - Rootfs []*containerd_types.Mount `protobuf:"bytes,3,rep,name=rootfs" json:"rootfs,omitempty"` - Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` - Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` - Checkpoint string `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` - ParentCheckpoint string `protobuf:"bytes,9,opt,name=parent_checkpoint,json=parentCheckpoint,proto3" json:"parent_checkpoint,omitempty"` - Options *google_protobuf.Any `protobuf:"bytes,10,opt,name=options" json:"options,omitempty"` -} - -func (m *CreateTaskRequest) Reset() { *m = CreateTaskRequest{} } -func (*CreateTaskRequest) ProtoMessage() {} -func (*CreateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{0} } + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` + Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` + Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` + Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` + Checkpoint string `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` + ParentCheckpoint string `protobuf:"bytes,9,opt,name=parent_checkpoint,json=parentCheckpoint,proto3" json:"parent_checkpoint,omitempty"` + Options *types1.Any `protobuf:"bytes,10,opt,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateTaskRequest) Reset() { *m = CreateTaskRequest{} } +func (*CreateTaskRequest) ProtoMessage() {} +func (*CreateTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{0} +} +func (m *CreateTaskRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CreateTaskRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CreateTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateTaskRequest.Merge(m, src) +} +func (m *CreateTaskRequest) XXX_Size() int { + return m.Size() +} +func (m *CreateTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateTaskRequest proto.InternalMessageInfo type CreateTaskResponse struct { - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateTaskResponse) Reset() { *m = CreateTaskResponse{} } +func (*CreateTaskResponse) ProtoMessage() {} +func (*CreateTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{1} +} +func (m *CreateTaskResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CreateTaskResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CreateTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateTaskResponse.Merge(m, src) +} +func (m *CreateTaskResponse) XXX_Size() int { + return m.Size() +} +func (m *CreateTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateTaskResponse.DiscardUnknown(m) } -func (m *CreateTaskResponse) Reset() { *m = CreateTaskResponse{} } -func (*CreateTaskResponse) ProtoMessage() {} -func (*CreateTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{1} } +var xxx_messageInfo_CreateTaskResponse proto.InternalMessageInfo type DeleteRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (*DeleteRequest) ProtoMessage() {} +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{2} +} +func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRequest.Merge(m, src) +} +func (m *DeleteRequest) XXX_Size() int { + return m.Size() +} +func (m *DeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRequest.DiscardUnknown(m) } -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{2} } +var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo type DeleteResponse struct { - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - ExitStatus uint32 `protobuf:"varint,2,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,3,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"` + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + ExitStatus uint32 `protobuf:"varint,2,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt time.Time `protobuf:"bytes,3,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } +func (*DeleteResponse) ProtoMessage() {} +func (*DeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{3} +} +func (m *DeleteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteResponse.Merge(m, src) +} +func (m *DeleteResponse) XXX_Size() int { + return m.Size() +} +func (m *DeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteResponse.DiscardUnknown(m) } -func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } -func (*DeleteResponse) ProtoMessage() {} -func (*DeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{3} } +var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo type ExecProcessRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Terminal bool `protobuf:"varint,3,opt,name=terminal,proto3" json:"terminal,omitempty"` - Stdin string `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"` - Spec *google_protobuf.Any `protobuf:"bytes,7,opt,name=spec" json:"spec,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Terminal bool `protobuf:"varint,3,opt,name=terminal,proto3" json:"terminal,omitempty"` + Stdin string `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"` + Spec *types1.Any `protobuf:"bytes,7,opt,name=spec,proto3" json:"spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecProcessRequest) Reset() { *m = ExecProcessRequest{} } +func (*ExecProcessRequest) ProtoMessage() {} +func (*ExecProcessRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{4} +} +func (m *ExecProcessRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExecProcessRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExecProcessRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecProcessRequest.Merge(m, src) +} +func (m *ExecProcessRequest) XXX_Size() int { + return m.Size() +} +func (m *ExecProcessRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecProcessRequest.DiscardUnknown(m) } -func (m *ExecProcessRequest) Reset() { *m = ExecProcessRequest{} } -func (*ExecProcessRequest) ProtoMessage() {} -func (*ExecProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{4} } +var xxx_messageInfo_ExecProcessRequest proto.InternalMessageInfo type ExecProcessResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecProcessResponse) Reset() { *m = ExecProcessResponse{} } +func (*ExecProcessResponse) ProtoMessage() {} +func (*ExecProcessResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{5} +} +func (m *ExecProcessResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecProcessResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExecProcessResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExecProcessResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecProcessResponse.Merge(m, src) +} +func (m *ExecProcessResponse) XXX_Size() int { + return m.Size() +} +func (m *ExecProcessResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecProcessResponse.DiscardUnknown(m) } -func (m *ExecProcessResponse) Reset() { *m = ExecProcessResponse{} } -func (*ExecProcessResponse) ProtoMessage() {} -func (*ExecProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{5} } +var xxx_messageInfo_ExecProcessResponse proto.InternalMessageInfo type ResizePtyRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Width uint32 `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"` - Height uint32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Width uint32 `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"` + Height uint32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResizePtyRequest) Reset() { *m = ResizePtyRequest{} } +func (*ResizePtyRequest) ProtoMessage() {} +func (*ResizePtyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{6} +} +func (m *ResizePtyRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResizePtyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResizePtyRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResizePtyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResizePtyRequest.Merge(m, src) +} +func (m *ResizePtyRequest) XXX_Size() int { + return m.Size() +} +func (m *ResizePtyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResizePtyRequest.DiscardUnknown(m) } -func (m *ResizePtyRequest) Reset() { *m = ResizePtyRequest{} } -func (*ResizePtyRequest) ProtoMessage() {} -func (*ResizePtyRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{6} } +var xxx_messageInfo_ResizePtyRequest proto.InternalMessageInfo type StateRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StateRequest) Reset() { *m = StateRequest{} } +func (*StateRequest) ProtoMessage() {} +func (*StateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{7} +} +func (m *StateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StateRequest.Merge(m, src) +} +func (m *StateRequest) XXX_Size() int { + return m.Size() +} +func (m *StateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StateRequest.DiscardUnknown(m) } -func (m *StateRequest) Reset() { *m = StateRequest{} } -func (*StateRequest) ProtoMessage() {} -func (*StateRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{7} } +var xxx_messageInfo_StateRequest proto.InternalMessageInfo type StateResponse struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` - Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - Status containerd_v1_types.Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` - Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` - Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` - ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"` -} - -func (m *StateResponse) Reset() { *m = StateResponse{} } -func (*StateResponse) ProtoMessage() {} -func (*StateResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{8} } + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` + Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + Status task.Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` + Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` + ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` + ExecID string `protobuf:"bytes,11,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StateResponse) Reset() { *m = StateResponse{} } +func (*StateResponse) ProtoMessage() {} +func (*StateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{8} +} +func (m *StateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StateResponse.Merge(m, src) +} +func (m *StateResponse) XXX_Size() int { + return m.Size() +} +func (m *StateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StateResponse proto.InternalMessageInfo type KillRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Signal uint32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"` - All bool `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Signal uint32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"` + All bool `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KillRequest) Reset() { *m = KillRequest{} } +func (*KillRequest) ProtoMessage() {} +func (*KillRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{9} +} +func (m *KillRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KillRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KillRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *KillRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_KillRequest.Merge(m, src) +} +func (m *KillRequest) XXX_Size() int { + return m.Size() +} +func (m *KillRequest) XXX_DiscardUnknown() { + xxx_messageInfo_KillRequest.DiscardUnknown(m) } -func (m *KillRequest) Reset() { *m = KillRequest{} } -func (*KillRequest) ProtoMessage() {} -func (*KillRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{9} } +var xxx_messageInfo_KillRequest proto.InternalMessageInfo type CloseIORequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Stdin bool `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Stdin bool `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseIORequest) Reset() { *m = CloseIORequest{} } +func (*CloseIORequest) ProtoMessage() {} +func (*CloseIORequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{10} +} +func (m *CloseIORequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CloseIORequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CloseIORequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CloseIORequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseIORequest.Merge(m, src) +} +func (m *CloseIORequest) XXX_Size() int { + return m.Size() +} +func (m *CloseIORequest) XXX_DiscardUnknown() { + xxx_messageInfo_CloseIORequest.DiscardUnknown(m) } -func (m *CloseIORequest) Reset() { *m = CloseIORequest{} } -func (*CloseIORequest) ProtoMessage() {} -func (*CloseIORequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{10} } +var xxx_messageInfo_CloseIORequest proto.InternalMessageInfo type PidsRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PidsRequest) Reset() { *m = PidsRequest{} } +func (*PidsRequest) ProtoMessage() {} +func (*PidsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{11} +} +func (m *PidsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PidsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PidsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PidsRequest.Merge(m, src) +} +func (m *PidsRequest) XXX_Size() int { + return m.Size() +} +func (m *PidsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PidsRequest.DiscardUnknown(m) } -func (m *PidsRequest) Reset() { *m = PidsRequest{} } -func (*PidsRequest) ProtoMessage() {} -func (*PidsRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{11} } +var xxx_messageInfo_PidsRequest proto.InternalMessageInfo type PidsResponse struct { - Processes []*containerd_v1_types.ProcessInfo `protobuf:"bytes,1,rep,name=processes" json:"processes,omitempty"` + Processes []*task.ProcessInfo `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PidsResponse) Reset() { *m = PidsResponse{} } +func (*PidsResponse) ProtoMessage() {} +func (*PidsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{12} +} +func (m *PidsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PidsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PidsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PidsResponse.Merge(m, src) +} +func (m *PidsResponse) XXX_Size() int { + return m.Size() +} +func (m *PidsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PidsResponse.DiscardUnknown(m) } -func (m *PidsResponse) Reset() { *m = PidsResponse{} } -func (*PidsResponse) ProtoMessage() {} -func (*PidsResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{12} } +var xxx_messageInfo_PidsResponse proto.InternalMessageInfo type CheckpointTaskRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` - Options *google_protobuf.Any `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Options *types1.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CheckpointTaskRequest) Reset() { *m = CheckpointTaskRequest{} } +func (*CheckpointTaskRequest) ProtoMessage() {} +func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{13} +} +func (m *CheckpointTaskRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CheckpointTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CheckpointTaskRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CheckpointTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckpointTaskRequest.Merge(m, src) +} +func (m *CheckpointTaskRequest) XXX_Size() int { + return m.Size() +} +func (m *CheckpointTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CheckpointTaskRequest.DiscardUnknown(m) } -func (m *CheckpointTaskRequest) Reset() { *m = CheckpointTaskRequest{} } -func (*CheckpointTaskRequest) ProtoMessage() {} -func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{13} } +var xxx_messageInfo_CheckpointTaskRequest proto.InternalMessageInfo type UpdateTaskRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Resources *google_protobuf.Any `protobuf:"bytes,2,opt,name=resources" json:"resources,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Resources *types1.Any `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateTaskRequest) Reset() { *m = UpdateTaskRequest{} } +func (*UpdateTaskRequest) ProtoMessage() {} +func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{14} +} +func (m *UpdateTaskRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateTaskRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateTaskRequest.Merge(m, src) +} +func (m *UpdateTaskRequest) XXX_Size() int { + return m.Size() +} +func (m *UpdateTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateTaskRequest.DiscardUnknown(m) } -func (m *UpdateTaskRequest) Reset() { *m = UpdateTaskRequest{} } -func (*UpdateTaskRequest) ProtoMessage() {} -func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{14} } +var xxx_messageInfo_UpdateTaskRequest proto.InternalMessageInfo type StartRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartRequest) Reset() { *m = StartRequest{} } +func (*StartRequest) ProtoMessage() {} +func (*StartRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{15} +} +func (m *StartRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StartRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StartRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartRequest.Merge(m, src) +} +func (m *StartRequest) XXX_Size() int { + return m.Size() +} +func (m *StartRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartRequest.DiscardUnknown(m) } -func (m *StartRequest) Reset() { *m = StartRequest{} } -func (*StartRequest) ProtoMessage() {} -func (*StartRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{15} } +var xxx_messageInfo_StartRequest proto.InternalMessageInfo type StartResponse struct { - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartResponse) Reset() { *m = StartResponse{} } +func (*StartResponse) ProtoMessage() {} +func (*StartResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{16} +} +func (m *StartResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StartResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StartResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StartResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartResponse.Merge(m, src) +} +func (m *StartResponse) XXX_Size() int { + return m.Size() +} +func (m *StartResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartResponse.DiscardUnknown(m) } -func (m *StartResponse) Reset() { *m = StartResponse{} } -func (*StartResponse) ProtoMessage() {} -func (*StartResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{16} } +var xxx_messageInfo_StartResponse proto.InternalMessageInfo type WaitRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WaitRequest) Reset() { *m = WaitRequest{} } +func (*WaitRequest) ProtoMessage() {} +func (*WaitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{17} +} +func (m *WaitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WaitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WaitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WaitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WaitRequest.Merge(m, src) +} +func (m *WaitRequest) XXX_Size() int { + return m.Size() +} +func (m *WaitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WaitRequest.DiscardUnknown(m) } -func (m *WaitRequest) Reset() { *m = WaitRequest{} } -func (*WaitRequest) ProtoMessage() {} -func (*WaitRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{17} } +var xxx_messageInfo_WaitRequest proto.InternalMessageInfo type WaitResponse struct { - ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"` + ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt time.Time `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WaitResponse) Reset() { *m = WaitResponse{} } +func (*WaitResponse) ProtoMessage() {} +func (*WaitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{18} +} +func (m *WaitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WaitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WaitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WaitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WaitResponse.Merge(m, src) +} +func (m *WaitResponse) XXX_Size() int { + return m.Size() +} +func (m *WaitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WaitResponse.DiscardUnknown(m) } -func (m *WaitResponse) Reset() { *m = WaitResponse{} } -func (*WaitResponse) ProtoMessage() {} -func (*WaitResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{18} } +var xxx_messageInfo_WaitResponse proto.InternalMessageInfo type StatsRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatsRequest) Reset() { *m = StatsRequest{} } +func (*StatsRequest) ProtoMessage() {} +func (*StatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{19} +} +func (m *StatsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatsRequest.Merge(m, src) +} +func (m *StatsRequest) XXX_Size() int { + return m.Size() +} +func (m *StatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StatsRequest.DiscardUnknown(m) } -func (m *StatsRequest) Reset() { *m = StatsRequest{} } -func (*StatsRequest) ProtoMessage() {} -func (*StatsRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{19} } +var xxx_messageInfo_StatsRequest proto.InternalMessageInfo type StatsResponse struct { - Stats *google_protobuf.Any `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` + Stats *types1.Any `protobuf:"bytes,1,opt,name=stats,proto3" json:"stats,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatsResponse) Reset() { *m = StatsResponse{} } +func (*StatsResponse) ProtoMessage() {} +func (*StatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{20} +} +func (m *StatsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatsResponse.Merge(m, src) +} +func (m *StatsResponse) XXX_Size() int { + return m.Size() +} +func (m *StatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StatsResponse.DiscardUnknown(m) } -func (m *StatsResponse) Reset() { *m = StatsResponse{} } -func (*StatsResponse) ProtoMessage() {} -func (*StatsResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{20} } +var xxx_messageInfo_StatsResponse proto.InternalMessageInfo type ConnectRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } +func (*ConnectRequest) ProtoMessage() {} +func (*ConnectRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{21} +} +func (m *ConnectRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConnectRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectRequest.Merge(m, src) +} +func (m *ConnectRequest) XXX_Size() int { + return m.Size() +} +func (m *ConnectRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectRequest.DiscardUnknown(m) } -func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } -func (*ConnectRequest) ProtoMessage() {} -func (*ConnectRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{21} } +var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo type ConnectResponse struct { - ShimPid uint32 `protobuf:"varint,1,opt,name=shim_pid,json=shimPid,proto3" json:"shim_pid,omitempty"` - TaskPid uint32 `protobuf:"varint,2,opt,name=task_pid,json=taskPid,proto3" json:"task_pid,omitempty"` - Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + ShimPid uint32 `protobuf:"varint,1,opt,name=shim_pid,json=shimPid,proto3" json:"shim_pid,omitempty"` + TaskPid uint32 `protobuf:"varint,2,opt,name=task_pid,json=taskPid,proto3" json:"task_pid,omitempty"` + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectResponse) Reset() { *m = ConnectResponse{} } +func (*ConnectResponse) ProtoMessage() {} +func (*ConnectResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{22} +} +func (m *ConnectResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConnectResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConnectResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConnectResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectResponse.Merge(m, src) +} +func (m *ConnectResponse) XXX_Size() int { + return m.Size() +} +func (m *ConnectResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectResponse.DiscardUnknown(m) } -func (m *ConnectResponse) Reset() { *m = ConnectResponse{} } -func (*ConnectResponse) ProtoMessage() {} -func (*ConnectResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{22} } +var xxx_messageInfo_ConnectResponse proto.InternalMessageInfo type ShutdownRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Now bool `protobuf:"varint,2,opt,name=now,proto3" json:"now,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Now bool `protobuf:"varint,2,opt,name=now,proto3" json:"now,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShutdownRequest) Reset() { *m = ShutdownRequest{} } +func (*ShutdownRequest) ProtoMessage() {} +func (*ShutdownRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{23} +} +func (m *ShutdownRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ShutdownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ShutdownRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ShutdownRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShutdownRequest.Merge(m, src) +} +func (m *ShutdownRequest) XXX_Size() int { + return m.Size() +} +func (m *ShutdownRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ShutdownRequest.DiscardUnknown(m) } -func (m *ShutdownRequest) Reset() { *m = ShutdownRequest{} } -func (*ShutdownRequest) ProtoMessage() {} -func (*ShutdownRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{23} } +var xxx_messageInfo_ShutdownRequest proto.InternalMessageInfo type PauseRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PauseRequest) Reset() { *m = PauseRequest{} } +func (*PauseRequest) ProtoMessage() {} +func (*PauseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{24} +} +func (m *PauseRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PauseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PauseRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PauseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PauseRequest.Merge(m, src) +} +func (m *PauseRequest) XXX_Size() int { + return m.Size() +} +func (m *PauseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PauseRequest.DiscardUnknown(m) } -func (m *PauseRequest) Reset() { *m = PauseRequest{} } -func (*PauseRequest) ProtoMessage() {} -func (*PauseRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{24} } +var xxx_messageInfo_PauseRequest proto.InternalMessageInfo type ResumeRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResumeRequest) Reset() { *m = ResumeRequest{} } +func (*ResumeRequest) ProtoMessage() {} +func (*ResumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9202ee34bc3ad8ca, []int{25} +} +func (m *ResumeRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResumeRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResumeRequest.Merge(m, src) +} +func (m *ResumeRequest) XXX_Size() int { + return m.Size() +} +func (m *ResumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResumeRequest.DiscardUnknown(m) } -func (m *ResumeRequest) Reset() { *m = ResumeRequest{} } -func (*ResumeRequest) ProtoMessage() {} -func (*ResumeRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{25} } +var xxx_messageInfo_ResumeRequest proto.InternalMessageInfo func init() { proto.RegisterType((*CreateTaskRequest)(nil), "containerd.task.v2.CreateTaskRequest") @@ -352,6 +1118,93 @@ func init() { proto.RegisterType((*PauseRequest)(nil), "containerd.task.v2.PauseRequest") proto.RegisterType((*ResumeRequest)(nil), "containerd.task.v2.ResumeRequest") } + +func init() { + proto.RegisterFile("github.com/containerd/containerd/runtime/v2/task/shim.proto", fileDescriptor_9202ee34bc3ad8ca) +} + +var fileDescriptor_9202ee34bc3ad8ca = []byte{ + // 1246 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0x5d, 0x6f, 0x1b, 0x45, + 0x17, 0xee, 0xfa, 0x63, 0x6d, 0x1f, 0xd7, 0x69, 0x3a, 0x6f, 0x9a, 0x77, 0xeb, 0x4a, 0xb6, 0xbb, + 0xa5, 0xc1, 0x80, 0x64, 0x0b, 0x57, 0x70, 0x41, 0x24, 0x50, 0xbe, 0xa8, 0x4c, 0x0b, 0x89, 0xb6, + 0x45, 0x45, 0xdc, 0x58, 0x1b, 0xef, 0xc4, 0x5e, 0xc5, 0xde, 0x59, 0x76, 0x66, 0xf3, 0x81, 0x84, + 0xc4, 0x15, 0x17, 0x5c, 0xf1, 0xb3, 0x72, 0x89, 0xc4, 0x0d, 0x37, 0x04, 0xea, 0x7f, 0xc0, 0x3f, + 0x40, 0xf3, 0xe1, 0x78, 0xed, 0xec, 0xda, 0x49, 0xe5, 0x9b, 0x68, 0xce, 0xce, 0x33, 0x67, 0xe6, + 0x9c, 0x79, 0xce, 0x73, 0x26, 0x86, 0xcd, 0x9e, 0xcb, 0xfa, 0xe1, 0x61, 0xa3, 0x4b, 0x86, 0xcd, + 0x2e, 0xf1, 0x98, 0xed, 0x7a, 0x38, 0x70, 0xa2, 0xc3, 0x20, 0xf4, 0x98, 0x3b, 0xc4, 0xcd, 0x93, + 0x56, 0x93, 0xd9, 0xf4, 0xb8, 0x49, 0xfb, 0xee, 0xb0, 0xe1, 0x07, 0x84, 0x11, 0x84, 0x26, 0xb0, + 0x06, 0x9f, 0x6b, 0x9c, 0xb4, 0xca, 0x0f, 0x7b, 0x84, 0xf4, 0x06, 0xb8, 0x29, 0x10, 0x87, 0xe1, + 0x51, 0xd3, 0xf6, 0xce, 0x25, 0xbc, 0xfc, 0x68, 0x76, 0x0a, 0x0f, 0x7d, 0x36, 0x9e, 0x5c, 0xeb, + 0x91, 0x1e, 0x11, 0xc3, 0x26, 0x1f, 0xa9, 0xaf, 0xd5, 0xd9, 0x25, 0xfc, 0x28, 0x94, 0xd9, 0x43, + 0x5f, 0x01, 0x3e, 0x5d, 0x78, 0x7e, 0xdb, 0x77, 0x9b, 0xec, 0xdc, 0xc7, 0xb4, 0x39, 0x24, 0xa1, + 0xc7, 0xd4, 0xba, 0xcf, 0x6e, 0xb1, 0x4e, 0x84, 0x2d, 0xe2, 0x13, 0x6b, 0xcd, 0x3f, 0x52, 0x70, + 0x7f, 0x27, 0xc0, 0x36, 0xc3, 0xaf, 0x6d, 0x7a, 0x6c, 0xe1, 0x1f, 0x42, 0x4c, 0x19, 0x5a, 0x87, + 0x94, 0xeb, 0x18, 0x5a, 0x4d, 0xab, 0x17, 0xb6, 0xf5, 0xd1, 0x65, 0x35, 0xd5, 0xde, 0xb5, 0x52, + 0xae, 0x83, 0xd6, 0x41, 0x3f, 0x0c, 0x3d, 0x67, 0x80, 0x8d, 0x14, 0x9f, 0xb3, 0x94, 0x85, 0x9a, + 0xa0, 0x07, 0x84, 0xb0, 0x23, 0x6a, 0xa4, 0x6b, 0xe9, 0x7a, 0xb1, 0xf5, 0xff, 0x46, 0x34, 0x9b, + 0x7c, 0xe3, 0xc6, 0xd7, 0xfc, 0xc0, 0x96, 0x82, 0xa1, 0x32, 0xe4, 0x19, 0x0e, 0x86, 0xae, 0x67, + 0x0f, 0x8c, 0x4c, 0x4d, 0xab, 0xe7, 0xad, 0x2b, 0x1b, 0xad, 0x41, 0x96, 0x32, 0xc7, 0xf5, 0x8c, + 0xac, 0xd8, 0x43, 0x1a, 0x7c, 0x6b, 0xca, 0x1c, 0x12, 0x32, 0x43, 0x97, 0x5b, 0x4b, 0x4b, 0x7d, + 0xc7, 0x41, 0x60, 0xe4, 0xae, 0xbe, 0xe3, 0x20, 0x40, 0x15, 0x80, 0x6e, 0x1f, 0x77, 0x8f, 0x7d, + 0xe2, 0x7a, 0xcc, 0xc8, 0x8b, 0xb9, 0xc8, 0x17, 0xf4, 0x11, 0xdc, 0xf7, 0xed, 0x00, 0x7b, 0xac, + 0x13, 0x81, 0x15, 0x04, 0x6c, 0x55, 0x4e, 0xec, 0x4c, 0xc0, 0x0d, 0xc8, 0x11, 0x9f, 0xb9, 0xc4, + 0xa3, 0x06, 0xd4, 0xb4, 0x7a, 0xb1, 0xb5, 0xd6, 0x90, 0x97, 0xd9, 0x18, 0x5f, 0x66, 0x63, 0xcb, + 0x3b, 0xb7, 0xc6, 0x20, 0x73, 0x03, 0x50, 0x34, 0xa9, 0xd4, 0x27, 0x1e, 0xc5, 0x68, 0x15, 0xd2, + 0xbe, 0x4a, 0x6b, 0xc9, 0xe2, 0x43, 0xf3, 0x25, 0x94, 0x76, 0xf1, 0x00, 0x33, 0xbc, 0x28, 0xf1, + 0x4f, 0x20, 0x87, 0xcf, 0x70, 0xb7, 0xe3, 0x3a, 0x32, 0xf3, 0xdb, 0x30, 0xba, 0xac, 0xea, 0x7b, + 0x67, 0xb8, 0xdb, 0xde, 0xb5, 0x74, 0x3e, 0xd5, 0x76, 0xcc, 0x5f, 0x34, 0x58, 0x19, 0xbb, 0x4b, + 0xda, 0x12, 0x55, 0xa1, 0x88, 0xcf, 0x5c, 0xd6, 0xa1, 0xcc, 0x66, 0x21, 0x15, 0xde, 0x4a, 0x16, + 0xf0, 0x4f, 0xaf, 0xc4, 0x17, 0xb4, 0x05, 0x05, 0x6e, 0x61, 0xa7, 0x63, 0x33, 0x23, 0x2d, 0xa2, + 0x2d, 0x5f, 0x8b, 0xf6, 0xf5, 0x98, 0xba, 0xdb, 0xf9, 0x8b, 0xcb, 0xea, 0x9d, 0xdf, 0xfe, 0xae, + 0x6a, 0x56, 0x5e, 0x2e, 0xdb, 0x62, 0xe6, 0x5f, 0x1a, 0x20, 0x7e, 0xb6, 0x83, 0x80, 0x74, 0x31, + 0xa5, 0xcb, 0x08, 0x6e, 0x8a, 0x31, 0xe9, 0x24, 0xc6, 0x64, 0xe2, 0x19, 0x93, 0x4d, 0x60, 0x8c, + 0x3e, 0xc5, 0x98, 0x3a, 0x64, 0xa8, 0x8f, 0xbb, 0x82, 0x47, 0x49, 0x37, 0x2c, 0x10, 0xe6, 0x03, + 0xf8, 0xdf, 0x54, 0x78, 0x32, 0xd9, 0xe6, 0x4f, 0xb0, 0x6a, 0x61, 0xea, 0xfe, 0x88, 0x0f, 0xd8, + 0xf9, 0x52, 0x62, 0x5e, 0x83, 0xec, 0xa9, 0xeb, 0xb0, 0xbe, 0x08, 0xb8, 0x64, 0x49, 0x83, 0x9f, + 0xbf, 0x8f, 0xdd, 0x5e, 0x9f, 0x89, 0x70, 0x4b, 0x96, 0xb2, 0xcc, 0x17, 0x70, 0x97, 0x5f, 0xe1, + 0x72, 0xb8, 0xf4, 0x6f, 0x0a, 0x4a, 0xca, 0x9b, 0xa2, 0xd2, 0x6d, 0x35, 0x41, 0x51, 0x2f, 0x3d, + 0xa1, 0xde, 0x33, 0x9e, 0x78, 0xc1, 0x3a, 0x7e, 0xf0, 0x95, 0xd6, 0xa3, 0xa8, 0x4a, 0x9c, 0x7c, + 0xac, 0x84, 0x42, 0xd2, 0xd0, 0x52, 0xd0, 0x25, 0xa9, 0x41, 0x94, 0x3d, 0xf9, 0x19, 0xf6, 0xcc, + 0x54, 0x44, 0x61, 0x7e, 0x45, 0xc0, 0xbb, 0x54, 0x44, 0x34, 0xe7, 0xc5, 0xc4, 0x9c, 0x33, 0x28, + 0xbe, 0x70, 0x07, 0x83, 0xa5, 0x50, 0x87, 0x27, 0xc2, 0xed, 0x8d, 0x8b, 0xa5, 0x64, 0x29, 0x8b, + 0xdf, 0x8a, 0x3d, 0x18, 0x6b, 0x2e, 0x1f, 0x9a, 0x5d, 0x58, 0xd9, 0x19, 0x10, 0x8a, 0xdb, 0xfb, + 0xcb, 0xe2, 0xac, 0xbc, 0x2f, 0x59, 0xa4, 0xd2, 0x30, 0x9f, 0x42, 0xf1, 0xc0, 0x75, 0x16, 0x29, + 0x81, 0xf9, 0x0d, 0xdc, 0x95, 0x30, 0xc5, 0xb9, 0xcf, 0xa1, 0xe0, 0xcb, 0x22, 0xc3, 0xd4, 0xd0, + 0x44, 0x6b, 0xa9, 0xc5, 0x92, 0x46, 0x95, 0x62, 0xdb, 0x3b, 0x22, 0xd6, 0x64, 0x89, 0x49, 0xe1, + 0xc1, 0x44, 0xc5, 0x6f, 0xd2, 0xe0, 0x10, 0x64, 0x7c, 0x9b, 0xf5, 0x15, 0x95, 0xc5, 0x38, 0x2a, + 0xfe, 0xe9, 0x9b, 0x88, 0x7f, 0x07, 0xee, 0x7f, 0xeb, 0x3b, 0x37, 0xec, 0xa8, 0x2d, 0x28, 0x04, + 0x98, 0x92, 0x30, 0xe8, 0x62, 0x29, 0xc6, 0x49, 0xee, 0x27, 0x30, 0x55, 0xe8, 0x01, 0x5b, 0x4a, + 0xa1, 0x3f, 0x16, 0x75, 0xce, 0x9d, 0x25, 0x76, 0xa9, 0xaf, 0xa0, 0xf8, 0xc6, 0x76, 0x97, 0xb3, + 0x5d, 0x00, 0x77, 0xa5, 0x2f, 0xb5, 0xdb, 0x4c, 0xf1, 0x69, 0xf3, 0x8b, 0x2f, 0xf5, 0x4e, 0xed, + 0x68, 0x43, 0x0a, 0xe3, 0x42, 0xf6, 0x6d, 0x4a, 0xc9, 0x9b, 0xd0, 0xef, 0x43, 0xce, 0x65, 0x9b, + 0xc9, 0x63, 0x25, 0x5d, 0x8c, 0x84, 0x98, 0x75, 0x58, 0xd9, 0x21, 0x9e, 0x87, 0xbb, 0x8b, 0xf2, + 0x64, 0xda, 0x70, 0xef, 0x0a, 0xa9, 0x36, 0x7a, 0x08, 0x79, 0xfe, 0x14, 0xed, 0x4c, 0x12, 0x9f, + 0xe3, 0xf6, 0x81, 0xeb, 0xf0, 0x29, 0xfe, 0x5c, 0x13, 0x53, 0xb2, 0x59, 0xe7, 0xb8, 0xcd, 0xa7, + 0x0c, 0xc8, 0x9d, 0xe0, 0x80, 0xba, 0x44, 0x16, 0x5b, 0xc1, 0x1a, 0x9b, 0xe6, 0x26, 0xdc, 0x7b, + 0xd5, 0x0f, 0x99, 0x43, 0x4e, 0xbd, 0x45, 0xb7, 0xb6, 0x0a, 0x69, 0x8f, 0x9c, 0x0a, 0xd7, 0x79, + 0x8b, 0x0f, 0x79, 0xba, 0x0e, 0xec, 0x90, 0x2e, 0xea, 0x23, 0xe6, 0xfb, 0x50, 0xb2, 0x30, 0x0d, + 0x87, 0x8b, 0x80, 0xad, 0x5f, 0x01, 0x32, 0xbc, 0x16, 0xd0, 0x4b, 0xc8, 0x8a, 0x9e, 0x82, 0xa6, + 0x8a, 0x58, 0xbd, 0xb6, 0x1b, 0xd1, 0xe6, 0x55, 0x7e, 0x3c, 0x07, 0xa1, 0x92, 0xf6, 0x06, 0x74, + 0xf9, 0xc8, 0x42, 0x4f, 0xe3, 0xc0, 0xd7, 0x5e, 0xb5, 0xe5, 0x8d, 0x45, 0x30, 0xe5, 0x58, 0x1e, + 0x33, 0x60, 0x89, 0xc7, 0xbc, 0x2a, 0xbd, 0xc4, 0x63, 0x46, 0xea, 0x69, 0x1f, 0x74, 0xf9, 0x28, + 0x43, 0xb1, 0xe0, 0xa9, 0xf7, 0x5f, 0xd9, 0x9c, 0x07, 0x51, 0x0e, 0xdb, 0x90, 0xe1, 0x22, 0x89, + 0xaa, 0x71, 0xd8, 0x88, 0xca, 0x96, 0x6b, 0xc9, 0x00, 0xe5, 0x6a, 0x0b, 0xb2, 0xe2, 0xaa, 0xe3, + 0x23, 0x8d, 0xb2, 0xa0, 0xbc, 0x7e, 0x8d, 0xfc, 0x7b, 0xfc, 0x3f, 0x1e, 0xb4, 0x03, 0xba, 0x64, + 0x41, 0x7c, 0x78, 0x53, 0x0c, 0x49, 0x74, 0xb2, 0x0f, 0x10, 0x79, 0x6d, 0x7f, 0x10, 0x7b, 0x4f, + 0x71, 0x3a, 0x9e, 0xe8, 0xf0, 0x0b, 0xc8, 0xf0, 0x56, 0x1a, 0x9f, 0xa3, 0x48, 0x93, 0x4d, 0x74, + 0xf0, 0x25, 0x64, 0xb8, 0x72, 0xa1, 0x58, 0xce, 0x5c, 0x7f, 0xdb, 0x26, 0xfa, 0x69, 0x43, 0xe1, + 0xea, 0x4d, 0x88, 0xde, 0x4b, 0xc8, 0xd0, 0xd4, 0x93, 0x31, 0xd1, 0xd5, 0x1e, 0xe4, 0x54, 0xa3, + 0x46, 0xb1, 0x34, 0x99, 0xee, 0xe2, 0x89, 0x6e, 0x9e, 0x83, 0x2e, 0xdb, 0x53, 0x7c, 0xd9, 0x5c, + 0x6b, 0x5d, 0x73, 0x42, 0xcb, 0x70, 0x29, 0x8f, 0xcf, 0x71, 0xa4, 0x61, 0xc4, 0xf3, 0x70, 0xaa, + 0x0b, 0x28, 0x61, 0xa0, 0xc9, 0xc2, 0x40, 0x17, 0x0a, 0xc3, 0x84, 0xd5, 0x16, 0xe4, 0x94, 0xc0, + 0x26, 0x24, 0x6a, 0x4a, 0xa7, 0xcb, 0x4f, 0xe6, 0x62, 0x94, 0xcf, 0xe7, 0x90, 0x1f, 0x2b, 0x2a, + 0x8a, 0x5d, 0x30, 0xa3, 0xb7, 0x49, 0x59, 0xdb, 0xde, 0xbf, 0x78, 0x5b, 0xb9, 0xf3, 0xe7, 0xdb, + 0xca, 0x9d, 0x9f, 0x47, 0x15, 0xed, 0x62, 0x54, 0xd1, 0x7e, 0x1f, 0x55, 0xb4, 0x7f, 0x46, 0x15, + 0xed, 0xfb, 0x4f, 0x6e, 0xfb, 0xf3, 0xc5, 0x26, 0xff, 0xf3, 0x5d, 0xea, 0x50, 0x17, 0x5b, 0x3c, + 0xfb, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x19, 0x17, 0x87, 0x57, 0x00, 0x11, 0x00, 0x00, +} + func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -441,6 +1294,9 @@ func (m *CreateTaskRequest) MarshalTo(dAtA []byte) (int, error) { } i += n1 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -464,6 +1320,9 @@ func (m *CreateTaskResponse) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintShim(dAtA, i, uint64(m.Pid)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -494,6 +1353,9 @@ func (m *DeleteRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) i += copy(dAtA[i:], m.ExecID) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -524,12 +1386,15 @@ func (m *DeleteResponse) MarshalTo(dAtA []byte) (int, error) { } dAtA[i] = 0x1a i++ - i = encodeVarintShim(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt))) - n2, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) + i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt))) + n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) if err != nil { return 0, err } i += n2 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -598,6 +1463,9 @@ func (m *ExecProcessRequest) MarshalTo(dAtA []byte) (int, error) { } i += n3 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -616,6 +1484,9 @@ func (m *ExecProcessResponse) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -656,6 +1527,9 @@ func (m *ResizePtyRequest) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintShim(dAtA, i, uint64(m.Height)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -686,6 +1560,9 @@ func (m *StateRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) i += copy(dAtA[i:], m.ExecID) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -761,12 +1638,21 @@ func (m *StateResponse) MarshalTo(dAtA []byte) (int, error) { } dAtA[i] = 0x52 i++ - i = encodeVarintShim(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt))) - n4, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) + i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt))) + n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) if err != nil { return 0, err } i += n4 + if len(m.ExecID) > 0 { + dAtA[i] = 0x5a + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -812,6 +1698,9 @@ func (m *KillRequest) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -852,6 +1741,9 @@ func (m *CloseIORequest) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -876,6 +1768,9 @@ func (m *PidsRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) i += copy(dAtA[i:], m.ID) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -906,6 +1801,9 @@ func (m *PidsResponse) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -946,6 +1844,9 @@ func (m *CheckpointTaskRequest) MarshalTo(dAtA []byte) (int, error) { } i += n5 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -980,6 +1881,9 @@ func (m *UpdateTaskRequest) MarshalTo(dAtA []byte) (int, error) { } i += n6 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1010,6 +1914,9 @@ func (m *StartRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) i += copy(dAtA[i:], m.ExecID) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1033,6 +1940,9 @@ func (m *StartResponse) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintShim(dAtA, i, uint64(m.Pid)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1063,6 +1973,9 @@ func (m *WaitRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) i += copy(dAtA[i:], m.ExecID) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1088,12 +2001,15 @@ func (m *WaitResponse) MarshalTo(dAtA []byte) (int, error) { } dAtA[i] = 0x12 i++ - i = encodeVarintShim(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt))) - n7, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) + i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt))) + n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) if err != nil { return 0, err } i += n7 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1118,6 +2034,9 @@ func (m *StatsRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) i += copy(dAtA[i:], m.ID) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1146,6 +2065,9 @@ func (m *StatsResponse) MarshalTo(dAtA []byte) (int, error) { } i += n8 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1170,6 +2092,9 @@ func (m *ConnectRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) i += copy(dAtA[i:], m.ID) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1204,6 +2129,9 @@ func (m *ConnectResponse) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintShim(dAtA, i, uint64(len(m.Version))) i += copy(dAtA[i:], m.Version) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1238,6 +2166,9 @@ func (m *ShutdownRequest) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1262,6 +2193,9 @@ func (m *PauseRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) i += copy(dAtA[i:], m.ID) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1286,6 +2220,9 @@ func (m *ResumeRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) i += copy(dAtA[i:], m.ID) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1299,6 +2236,9 @@ func encodeVarintShim(dAtA []byte, offset int, v uint64) int { return offset + 1 } func (m *CreateTaskRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -1342,19 +2282,31 @@ func (m *CreateTaskRequest) Size() (n int) { l = m.Options.Size() n += 1 + l + sovShim(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *CreateTaskResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Pid != 0 { n += 1 + sovShim(uint64(m.Pid)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *DeleteRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -1365,10 +2317,16 @@ func (m *DeleteRequest) Size() (n int) { if l > 0 { n += 1 + l + sovShim(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *DeleteResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Pid != 0 { @@ -1377,12 +2335,18 @@ func (m *DeleteResponse) Size() (n int) { if m.ExitStatus != 0 { n += 1 + sovShim(uint64(m.ExitStatus)) } - l = types.SizeOfStdTime(m.ExitedAt) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) n += 1 + l + sovShim(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ExecProcessRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -1412,16 +2376,28 @@ func (m *ExecProcessRequest) Size() (n int) { l = m.Spec.Size() n += 1 + l + sovShim(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ExecProcessResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ResizePtyRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -1438,10 +2414,16 @@ func (m *ResizePtyRequest) Size() (n int) { if m.Height != 0 { n += 1 + sovShim(uint64(m.Height)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *StateRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -1452,10 +2434,16 @@ func (m *StateRequest) Size() (n int) { if l > 0 { n += 1 + l + sovShim(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *StateResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -1490,12 +2478,22 @@ func (m *StateResponse) Size() (n int) { if m.ExitStatus != 0 { n += 1 + sovShim(uint64(m.ExitStatus)) } - l = types.SizeOfStdTime(m.ExitedAt) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) n += 1 + l + sovShim(uint64(l)) + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *KillRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -1512,10 +2510,16 @@ func (m *KillRequest) Size() (n int) { if m.All { n += 2 } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *CloseIORequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -1529,20 +2533,32 @@ func (m *CloseIORequest) Size() (n int) { if m.Stdin { n += 2 } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *PidsRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) if l > 0 { n += 1 + l + sovShim(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *PidsResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Processes) > 0 { @@ -1551,10 +2567,16 @@ func (m *PidsResponse) Size() (n int) { n += 1 + l + sovShim(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *CheckpointTaskRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -1569,10 +2591,16 @@ func (m *CheckpointTaskRequest) Size() (n int) { l = m.Options.Size() n += 1 + l + sovShim(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *UpdateTaskRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -1583,10 +2611,16 @@ func (m *UpdateTaskRequest) Size() (n int) { l = m.Resources.Size() n += 1 + l + sovShim(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *StartRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -1597,19 +2631,31 @@ func (m *StartRequest) Size() (n int) { if l > 0 { n += 1 + l + sovShim(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *StartResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Pid != 0 { n += 1 + sovShim(uint64(m.Pid)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *WaitRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -1620,51 +2666,81 @@ func (m *WaitRequest) Size() (n int) { if l > 0 { n += 1 + l + sovShim(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *WaitResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ExitStatus != 0 { n += 1 + sovShim(uint64(m.ExitStatus)) } - l = types.SizeOfStdTime(m.ExitedAt) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) n += 1 + l + sovShim(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *StatsRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) if l > 0 { n += 1 + l + sovShim(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *StatsResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Stats != nil { l = m.Stats.Size() n += 1 + l + sovShim(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ConnectRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) if l > 0 { n += 1 + l + sovShim(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ConnectResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ShimPid != 0 { @@ -1677,10 +2753,16 @@ func (m *ConnectResponse) Size() (n int) { if l > 0 { n += 1 + l + sovShim(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ShutdownRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -1690,26 +2772,41 @@ func (m *ShutdownRequest) Size() (n int) { if m.Now { n += 2 } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *PauseRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) if l > 0 { n += 1 + l + sovShim(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ResumeRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) if l > 0 { n += 1 + l + sovShim(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -1733,14 +2830,15 @@ func (this *CreateTaskRequest) String() string { s := strings.Join([]string{`&CreateTaskRequest{`, `ID:` + fmt.Sprintf("%v", this.ID) + `,`, `Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`, - `Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "containerd_types.Mount", 1) + `,`, + `Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "types.Mount", 1) + `,`, `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, `Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`, `ParentCheckpoint:` + fmt.Sprintf("%v", this.ParentCheckpoint) + `,`, - `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf.Any", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1751,6 +2849,7 @@ func (this *CreateTaskResponse) String() string { } s := strings.Join([]string{`&CreateTaskResponse{`, `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1762,6 +2861,7 @@ func (this *DeleteRequest) String() string { s := strings.Join([]string{`&DeleteRequest{`, `ID:` + fmt.Sprintf("%v", this.ID) + `,`, `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1773,7 +2873,8 @@ func (this *DeleteResponse) String() string { s := strings.Join([]string{`&DeleteResponse{`, `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1789,7 +2890,8 @@ func (this *ExecProcessRequest) String() string { `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf.Any", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "types1.Any", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1799,6 +2901,7 @@ func (this *ExecProcessResponse) String() string { return "nil" } s := strings.Join([]string{`&ExecProcessResponse{`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1812,6 +2915,7 @@ func (this *ResizePtyRequest) String() string { `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, `Width:` + fmt.Sprintf("%v", this.Width) + `,`, `Height:` + fmt.Sprintf("%v", this.Height) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1823,6 +2927,7 @@ func (this *StateRequest) String() string { s := strings.Join([]string{`&StateRequest{`, `ID:` + fmt.Sprintf("%v", this.ID) + `,`, `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1841,7 +2946,9 @@ func (this *StateResponse) String() string { `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1855,6 +2962,7 @@ func (this *KillRequest) String() string { `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, `All:` + fmt.Sprintf("%v", this.All) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1867,6 +2975,7 @@ func (this *CloseIORequest) String() string { `ID:` + fmt.Sprintf("%v", this.ID) + `,`, `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1877,6 +2986,7 @@ func (this *PidsRequest) String() string { } s := strings.Join([]string{`&PidsRequest{`, `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1886,7 +2996,8 @@ func (this *PidsResponse) String() string { return "nil" } s := strings.Join([]string{`&PidsResponse{`, - `Processes:` + strings.Replace(fmt.Sprintf("%v", this.Processes), "ProcessInfo", "containerd_v1_types.ProcessInfo", 1) + `,`, + `Processes:` + strings.Replace(fmt.Sprintf("%v", this.Processes), "ProcessInfo", "task.ProcessInfo", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1898,7 +3009,8 @@ func (this *CheckpointTaskRequest) String() string { s := strings.Join([]string{`&CheckpointTaskRequest{`, `ID:` + fmt.Sprintf("%v", this.ID) + `,`, `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf.Any", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1909,7 +3021,8 @@ func (this *UpdateTaskRequest) String() string { } s := strings.Join([]string{`&UpdateTaskRequest{`, `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Any", "google_protobuf.Any", 1) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Any", "types1.Any", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1921,6 +3034,7 @@ func (this *StartRequest) String() string { s := strings.Join([]string{`&StartRequest{`, `ID:` + fmt.Sprintf("%v", this.ID) + `,`, `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1931,6 +3045,7 @@ func (this *StartResponse) String() string { } s := strings.Join([]string{`&StartResponse{`, `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1942,6 +3057,7 @@ func (this *WaitRequest) String() string { s := strings.Join([]string{`&WaitRequest{`, `ID:` + fmt.Sprintf("%v", this.ID) + `,`, `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1952,7 +3068,8 @@ func (this *WaitResponse) String() string { } s := strings.Join([]string{`&WaitResponse{`, `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1963,6 +3080,7 @@ func (this *StatsRequest) String() string { } s := strings.Join([]string{`&StatsRequest{`, `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1972,7 +3090,8 @@ func (this *StatsResponse) String() string { return "nil" } s := strings.Join([]string{`&StatsResponse{`, - `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "Any", "google_protobuf.Any", 1) + `,`, + `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "Any", "types1.Any", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1983,6 +3102,7 @@ func (this *ConnectRequest) String() string { } s := strings.Join([]string{`&ConnectRequest{`, `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1995,6 +3115,7 @@ func (this *ConnectResponse) String() string { `ShimPid:` + fmt.Sprintf("%v", this.ShimPid) + `,`, `TaskPid:` + fmt.Sprintf("%v", this.TaskPid) + `,`, `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -2006,6 +3127,7 @@ func (this *ShutdownRequest) String() string { s := strings.Join([]string{`&ShutdownRequest{`, `ID:` + fmt.Sprintf("%v", this.ID) + `,`, `Now:` + fmt.Sprintf("%v", this.Now) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -2016,6 +3138,7 @@ func (this *PauseRequest) String() string { } s := strings.Join([]string{`&PauseRequest{`, `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -2026,6 +3149,7 @@ func (this *ResumeRequest) String() string { } s := strings.Join([]string{`&ResumeRequest{`, `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -2045,22 +3169,22 @@ type TaskService interface { Start(ctx context.Context, req *StartRequest) (*StartResponse, error) Delete(ctx context.Context, req *DeleteRequest) (*DeleteResponse, error) Pids(ctx context.Context, req *PidsRequest) (*PidsResponse, error) - Pause(ctx context.Context, req *PauseRequest) (*google_protobuf1.Empty, error) - Resume(ctx context.Context, req *ResumeRequest) (*google_protobuf1.Empty, error) - Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*google_protobuf1.Empty, error) - Kill(ctx context.Context, req *KillRequest) (*google_protobuf1.Empty, error) - Exec(ctx context.Context, req *ExecProcessRequest) (*google_protobuf1.Empty, error) - ResizePty(ctx context.Context, req *ResizePtyRequest) (*google_protobuf1.Empty, error) - CloseIO(ctx context.Context, req *CloseIORequest) (*google_protobuf1.Empty, error) - Update(ctx context.Context, req *UpdateTaskRequest) (*google_protobuf1.Empty, error) + Pause(ctx context.Context, req *PauseRequest) (*types1.Empty, error) + Resume(ctx context.Context, req *ResumeRequest) (*types1.Empty, error) + Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*types1.Empty, error) + Kill(ctx context.Context, req *KillRequest) (*types1.Empty, error) + Exec(ctx context.Context, req *ExecProcessRequest) (*types1.Empty, error) + ResizePty(ctx context.Context, req *ResizePtyRequest) (*types1.Empty, error) + CloseIO(ctx context.Context, req *CloseIORequest) (*types1.Empty, error) + Update(ctx context.Context, req *UpdateTaskRequest) (*types1.Empty, error) Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) Stats(ctx context.Context, req *StatsRequest) (*StatsResponse, error) Connect(ctx context.Context, req *ConnectRequest) (*ConnectResponse, error) - Shutdown(ctx context.Context, req *ShutdownRequest) (*google_protobuf1.Empty, error) + Shutdown(ctx context.Context, req *ShutdownRequest) (*types1.Empty, error) } -func RegisterTaskService(srv *ttrpc.Server, svc TaskService) { - srv.Register("containerd.task.v2.Task", map[string]ttrpc.Method{ +func RegisterTaskService(srv *github_com_containerd_ttrpc.Server, svc TaskService) { + srv.Register("containerd.task.v2.Task", map[string]github_com_containerd_ttrpc.Method{ "State": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { var req StateRequest if err := unmarshal(&req); err != nil { @@ -2184,10 +3308,10 @@ func RegisterTaskService(srv *ttrpc.Server, svc TaskService) { } type taskClient struct { - client *ttrpc.Client + client *github_com_containerd_ttrpc.Client } -func NewTaskClient(client *ttrpc.Client) TaskService { +func NewTaskClient(client *github_com_containerd_ttrpc.Client) TaskService { return &taskClient{ client: client, } @@ -2233,64 +3357,64 @@ func (c *taskClient) Pids(ctx context.Context, req *PidsRequest) (*PidsResponse, return &resp, nil } -func (c *taskClient) Pause(ctx context.Context, req *PauseRequest) (*google_protobuf1.Empty, error) { - var resp google_protobuf1.Empty +func (c *taskClient) Pause(ctx context.Context, req *PauseRequest) (*types1.Empty, error) { + var resp types1.Empty if err := c.client.Call(ctx, "containerd.task.v2.Task", "Pause", req, &resp); err != nil { return nil, err } return &resp, nil } -func (c *taskClient) Resume(ctx context.Context, req *ResumeRequest) (*google_protobuf1.Empty, error) { - var resp google_protobuf1.Empty +func (c *taskClient) Resume(ctx context.Context, req *ResumeRequest) (*types1.Empty, error) { + var resp types1.Empty if err := c.client.Call(ctx, "containerd.task.v2.Task", "Resume", req, &resp); err != nil { return nil, err } return &resp, nil } -func (c *taskClient) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*google_protobuf1.Empty, error) { - var resp google_protobuf1.Empty +func (c *taskClient) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*types1.Empty, error) { + var resp types1.Empty if err := c.client.Call(ctx, "containerd.task.v2.Task", "Checkpoint", req, &resp); err != nil { return nil, err } return &resp, nil } -func (c *taskClient) Kill(ctx context.Context, req *KillRequest) (*google_protobuf1.Empty, error) { - var resp google_protobuf1.Empty +func (c *taskClient) Kill(ctx context.Context, req *KillRequest) (*types1.Empty, error) { + var resp types1.Empty if err := c.client.Call(ctx, "containerd.task.v2.Task", "Kill", req, &resp); err != nil { return nil, err } return &resp, nil } -func (c *taskClient) Exec(ctx context.Context, req *ExecProcessRequest) (*google_protobuf1.Empty, error) { - var resp google_protobuf1.Empty +func (c *taskClient) Exec(ctx context.Context, req *ExecProcessRequest) (*types1.Empty, error) { + var resp types1.Empty if err := c.client.Call(ctx, "containerd.task.v2.Task", "Exec", req, &resp); err != nil { return nil, err } return &resp, nil } -func (c *taskClient) ResizePty(ctx context.Context, req *ResizePtyRequest) (*google_protobuf1.Empty, error) { - var resp google_protobuf1.Empty +func (c *taskClient) ResizePty(ctx context.Context, req *ResizePtyRequest) (*types1.Empty, error) { + var resp types1.Empty if err := c.client.Call(ctx, "containerd.task.v2.Task", "ResizePty", req, &resp); err != nil { return nil, err } return &resp, nil } -func (c *taskClient) CloseIO(ctx context.Context, req *CloseIORequest) (*google_protobuf1.Empty, error) { - var resp google_protobuf1.Empty +func (c *taskClient) CloseIO(ctx context.Context, req *CloseIORequest) (*types1.Empty, error) { + var resp types1.Empty if err := c.client.Call(ctx, "containerd.task.v2.Task", "CloseIO", req, &resp); err != nil { return nil, err } return &resp, nil } -func (c *taskClient) Update(ctx context.Context, req *UpdateTaskRequest) (*google_protobuf1.Empty, error) { - var resp google_protobuf1.Empty +func (c *taskClient) Update(ctx context.Context, req *UpdateTaskRequest) (*types1.Empty, error) { + var resp types1.Empty if err := c.client.Call(ctx, "containerd.task.v2.Task", "Update", req, &resp); err != nil { return nil, err } @@ -2321,8 +3445,8 @@ func (c *taskClient) Connect(ctx context.Context, req *ConnectRequest) (*Connect return &resp, nil } -func (c *taskClient) Shutdown(ctx context.Context, req *ShutdownRequest) (*google_protobuf1.Empty, error) { - var resp google_protobuf1.Empty +func (c *taskClient) Shutdown(ctx context.Context, req *ShutdownRequest) (*types1.Empty, error) { + var resp types1.Empty if err := c.client.Call(ctx, "containerd.task.v2.Task", "Shutdown", req, &resp); err != nil { return nil, err } @@ -2343,7 +3467,7 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2371,7 +3495,7 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2381,6 +3505,9 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2400,7 +3527,7 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2410,6 +3537,9 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2429,7 +3559,7 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2438,10 +3568,13 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rootfs = append(m.Rootfs, &containerd_types.Mount{}) + m.Rootfs = append(m.Rootfs, &types.Mount{}) if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2460,7 +3593,7 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2480,7 +3613,7 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2490,6 +3623,9 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2509,7 +3645,7 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2519,6 +3655,9 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2538,7 +3677,7 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2548,6 +3687,9 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2567,7 +3709,7 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2577,6 +3719,9 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2596,7 +3741,7 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2606,6 +3751,9 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2625,7 +3773,7 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2634,11 +3782,14 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Options == nil { - m.Options = &google_protobuf.Any{} + m.Options = &types1.Any{} } if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2653,9 +3804,13 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2680,7 +3835,7 @@ func (m *CreateTaskResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2708,7 +3863,7 @@ func (m *CreateTaskResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Pid |= (uint32(b) & 0x7F) << shift + m.Pid |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -2722,9 +3877,13 @@ func (m *CreateTaskResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2749,7 +3908,7 @@ func (m *DeleteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2777,7 +3936,7 @@ func (m *DeleteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2787,6 +3946,9 @@ func (m *DeleteRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2806,7 +3968,7 @@ func (m *DeleteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2816,6 +3978,9 @@ func (m *DeleteRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2830,9 +3995,13 @@ func (m *DeleteRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2857,7 +4026,7 @@ func (m *DeleteResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2885,7 +4054,7 @@ func (m *DeleteResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Pid |= (uint32(b) & 0x7F) << shift + m.Pid |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -2904,7 +4073,7 @@ func (m *DeleteResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExitStatus |= (uint32(b) & 0x7F) << shift + m.ExitStatus |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -2923,7 +4092,7 @@ func (m *DeleteResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2932,10 +4101,13 @@ func (m *DeleteResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2948,9 +4120,13 @@ func (m *DeleteResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2975,7 +4151,7 @@ func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3003,7 +4179,7 @@ func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3013,6 +4189,9 @@ func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3032,7 +4211,7 @@ func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3042,6 +4221,9 @@ func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3061,7 +4243,7 @@ func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3081,7 +4263,7 @@ func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3091,6 +4273,9 @@ func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3110,7 +4295,7 @@ func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3120,6 +4305,9 @@ func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3139,7 +4327,7 @@ func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3149,6 +4337,9 @@ func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3168,7 +4359,7 @@ func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3177,11 +4368,14 @@ func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Spec == nil { - m.Spec = &google_protobuf.Any{} + m.Spec = &types1.Any{} } if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3196,9 +4390,13 @@ func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -3223,7 +4421,7 @@ func (m *ExecProcessResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3246,9 +4444,13 @@ func (m *ExecProcessResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -3273,7 +4475,7 @@ func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3301,7 +4503,7 @@ func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3311,6 +4513,9 @@ func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3330,7 +4535,7 @@ func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3340,6 +4545,9 @@ func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3359,7 +4567,7 @@ func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Width |= (uint32(b) & 0x7F) << shift + m.Width |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -3378,7 +4586,7 @@ func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= (uint32(b) & 0x7F) << shift + m.Height |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -3392,9 +4600,13 @@ func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -3419,7 +4631,7 @@ func (m *StateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3447,7 +4659,7 @@ func (m *StateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3457,6 +4669,9 @@ func (m *StateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3476,7 +4691,7 @@ func (m *StateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3486,6 +4701,9 @@ func (m *StateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3500,9 +4718,13 @@ func (m *StateRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -3527,7 +4749,7 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3555,7 +4777,7 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3565,6 +4787,9 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3584,7 +4809,7 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3594,6 +4819,9 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3613,7 +4841,7 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Pid |= (uint32(b) & 0x7F) << shift + m.Pid |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -3632,7 +4860,7 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Status |= (containerd_v1_types.Status(b) & 0x7F) << shift + m.Status |= task.Status(b&0x7F) << shift if b < 0x80 { break } @@ -3651,7 +4879,7 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3661,6 +4889,9 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3680,7 +4911,7 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3690,6 +4921,9 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3709,7 +4943,7 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3719,6 +4953,9 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3738,7 +4975,7 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3758,7 +4995,7 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExitStatus |= (uint32(b) & 0x7F) << shift + m.ExitStatus |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -3777,7 +5014,7 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3786,13 +5023,48 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipShim(dAtA[iNdEx:]) @@ -3802,9 +5074,13 @@ func (m *StateResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -3829,7 +5105,7 @@ func (m *KillRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3857,7 +5133,7 @@ func (m *KillRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3867,6 +5143,9 @@ func (m *KillRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3886,7 +5165,7 @@ func (m *KillRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3896,6 +5175,9 @@ func (m *KillRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3915,7 +5197,7 @@ func (m *KillRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Signal |= (uint32(b) & 0x7F) << shift + m.Signal |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -3934,7 +5216,7 @@ func (m *KillRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3949,9 +5231,13 @@ func (m *KillRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -3976,7 +5262,7 @@ func (m *CloseIORequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4004,7 +5290,7 @@ func (m *CloseIORequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4014,6 +5300,9 @@ func (m *CloseIORequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4033,7 +5322,7 @@ func (m *CloseIORequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4043,6 +5332,9 @@ func (m *CloseIORequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4062,7 +5354,7 @@ func (m *CloseIORequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4077,9 +5369,13 @@ func (m *CloseIORequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -4104,7 +5400,7 @@ func (m *PidsRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4132,7 +5428,7 @@ func (m *PidsRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4142,6 +5438,9 @@ func (m *PidsRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4156,9 +5455,13 @@ func (m *PidsRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -4183,7 +5486,7 @@ func (m *PidsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4211,7 +5514,7 @@ func (m *PidsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4220,10 +5523,13 @@ func (m *PidsResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Processes = append(m.Processes, &containerd_v1_types.ProcessInfo{}) + m.Processes = append(m.Processes, &task.ProcessInfo{}) if err := m.Processes[len(m.Processes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -4237,9 +5543,13 @@ func (m *PidsResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -4264,7 +5574,7 @@ func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4292,7 +5602,7 @@ func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4302,6 +5612,9 @@ func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4321,7 +5634,7 @@ func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4331,6 +5644,9 @@ func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4350,7 +5666,7 @@ func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4359,11 +5675,14 @@ func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Options == nil { - m.Options = &google_protobuf.Any{} + m.Options = &types1.Any{} } if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4378,9 +5697,13 @@ func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -4405,7 +5728,7 @@ func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4433,7 +5756,7 @@ func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4443,6 +5766,9 @@ func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4462,7 +5788,7 @@ func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4471,11 +5797,14 @@ func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Resources == nil { - m.Resources = &google_protobuf.Any{} + m.Resources = &types1.Any{} } if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4490,9 +5819,13 @@ func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -4517,7 +5850,7 @@ func (m *StartRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4545,7 +5878,7 @@ func (m *StartRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4555,6 +5888,9 @@ func (m *StartRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4574,7 +5910,7 @@ func (m *StartRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4584,6 +5920,9 @@ func (m *StartRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4598,9 +5937,13 @@ func (m *StartRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -4625,7 +5968,7 @@ func (m *StartResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4653,7 +5996,7 @@ func (m *StartResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Pid |= (uint32(b) & 0x7F) << shift + m.Pid |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -4667,9 +6010,13 @@ func (m *StartResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -4694,7 +6041,7 @@ func (m *WaitRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4722,7 +6069,7 @@ func (m *WaitRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4732,6 +6079,9 @@ func (m *WaitRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4751,7 +6101,7 @@ func (m *WaitRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4761,6 +6111,9 @@ func (m *WaitRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4775,9 +6128,13 @@ func (m *WaitRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -4802,7 +6159,7 @@ func (m *WaitResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4830,7 +6187,7 @@ func (m *WaitResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExitStatus |= (uint32(b) & 0x7F) << shift + m.ExitStatus |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -4849,7 +6206,7 @@ func (m *WaitResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4858,10 +6215,13 @@ func (m *WaitResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4874,9 +6234,13 @@ func (m *WaitResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -4901,7 +6265,7 @@ func (m *StatsRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4929,7 +6293,7 @@ func (m *StatsRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4939,6 +6303,9 @@ func (m *StatsRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4953,9 +6320,13 @@ func (m *StatsRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -4980,7 +6351,7 @@ func (m *StatsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5008,7 +6379,7 @@ func (m *StatsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5017,11 +6388,14 @@ func (m *StatsResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Stats == nil { - m.Stats = &google_protobuf.Any{} + m.Stats = &types1.Any{} } if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5036,9 +6410,13 @@ func (m *StatsResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -5063,7 +6441,7 @@ func (m *ConnectRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5091,7 +6469,7 @@ func (m *ConnectRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5101,6 +6479,9 @@ func (m *ConnectRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5115,9 +6496,13 @@ func (m *ConnectRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -5142,7 +6527,7 @@ func (m *ConnectResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5170,7 +6555,7 @@ func (m *ConnectResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ShimPid |= (uint32(b) & 0x7F) << shift + m.ShimPid |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -5189,7 +6574,7 @@ func (m *ConnectResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TaskPid |= (uint32(b) & 0x7F) << shift + m.TaskPid |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -5208,7 +6593,7 @@ func (m *ConnectResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5218,6 +6603,9 @@ func (m *ConnectResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5232,9 +6620,13 @@ func (m *ConnectResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -5259,7 +6651,7 @@ func (m *ShutdownRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5287,7 +6679,7 @@ func (m *ShutdownRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5297,6 +6689,9 @@ func (m *ShutdownRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5316,7 +6711,7 @@ func (m *ShutdownRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5331,9 +6726,13 @@ func (m *ShutdownRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -5358,7 +6757,7 @@ func (m *PauseRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5386,7 +6785,7 @@ func (m *PauseRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5396,6 +6795,9 @@ func (m *PauseRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5410,9 +6812,13 @@ func (m *PauseRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -5437,7 +6843,7 @@ func (m *ResumeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5465,7 +6871,7 @@ func (m *ResumeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5475,6 +6881,9 @@ func (m *ResumeRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthShim } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthShim + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5489,9 +6898,13 @@ func (m *ResumeRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthShim } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthShim + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -5555,10 +6968,13 @@ func skipShim(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthShim } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthShim + } return iNdEx, nil case 3: for { @@ -5587,6 +7003,9 @@ func skipShim(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthShim + } } return iNdEx, nil case 4: @@ -5605,89 +7024,3 @@ var ( ErrInvalidLengthShim = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowShim = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("github.com/containerd/containerd/runtime/v2/task/shim.proto", fileDescriptorShim) -} - -var fileDescriptorShim = []byte{ - // 1240 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xdb, 0x6f, 0x1b, 0xc5, - 0x17, 0xee, 0xfa, 0xb2, 0xb6, 0x8f, 0xeb, 0x34, 0x9d, 0x5f, 0x9a, 0xdf, 0xd6, 0x95, 0x6c, 0x77, - 0x4b, 0x83, 0x01, 0xc9, 0x16, 0xae, 0xe0, 0x81, 0x48, 0xa0, 0xdc, 0xa8, 0x4c, 0x0b, 0x89, 0xb6, - 0x45, 0x45, 0xbc, 0x58, 0x1b, 0xef, 0xc4, 0x5e, 0xc5, 0xde, 0x59, 0x76, 0x66, 0x73, 0x41, 0x42, - 0xe2, 0x89, 0x07, 0x9e, 0xf8, 0xb3, 0xf2, 0xc0, 0x03, 0x12, 0x2f, 0xbc, 0x10, 0xa8, 0xff, 0x12, - 0x34, 0x17, 0xc7, 0x6b, 0x67, 0xd7, 0x4e, 0x2a, 0xbf, 0x44, 0x73, 0x76, 0xbe, 0x39, 0x33, 0xe7, - 0xcc, 0x77, 0xbe, 0x33, 0x31, 0x6c, 0xf6, 0x5c, 0xd6, 0x0f, 0x0f, 0x1b, 0x5d, 0x32, 0x6c, 0x76, - 0x89, 0xc7, 0x6c, 0xd7, 0xc3, 0x81, 0x13, 0x1d, 0x06, 0xa1, 0xc7, 0xdc, 0x21, 0x6e, 0x9e, 0xb4, - 0x9a, 0xcc, 0xa6, 0xc7, 0x4d, 0xda, 0x77, 0x87, 0x0d, 0x3f, 0x20, 0x8c, 0x20, 0x34, 0x81, 0x35, - 0xf8, 0x5c, 0xe3, 0xa4, 0x55, 0x7e, 0xd8, 0x23, 0xa4, 0x37, 0xc0, 0x4d, 0x81, 0x38, 0x0c, 0x8f, - 0x9a, 0xb6, 0x77, 0x2e, 0xe1, 0xe5, 0x47, 0xb3, 0x53, 0x78, 0xe8, 0xb3, 0xf1, 0xe4, 0x5a, 0x8f, - 0xf4, 0x88, 0x18, 0x36, 0xf9, 0x48, 0x7d, 0xad, 0xce, 0x2e, 0xe1, 0x47, 0xa1, 0xcc, 0x1e, 0xfa, - 0x0a, 0xf0, 0xe9, 0xc2, 0xf3, 0xdb, 0xbe, 0xdb, 0x64, 0xe7, 0x3e, 0xa6, 0xcd, 0x21, 0x09, 0x3d, - 0xa6, 0xd6, 0x7d, 0x76, 0x8b, 0x75, 0x22, 0x6c, 0x11, 0x9f, 0x58, 0x6b, 0xfe, 0x99, 0x82, 0xfb, - 0x3b, 0x01, 0xb6, 0x19, 0x7e, 0x6d, 0xd3, 0x63, 0x0b, 0xff, 0x10, 0x62, 0xca, 0xd0, 0x3a, 0xa4, - 0x5c, 0xc7, 0xd0, 0x6a, 0x5a, 0xbd, 0xb0, 0xad, 0x8f, 0x2e, 0xab, 0xa9, 0xf6, 0xae, 0x95, 0x72, - 0x1d, 0xb4, 0x0e, 0xfa, 0x61, 0xe8, 0x39, 0x03, 0x6c, 0xa4, 0xf8, 0x9c, 0xa5, 0x2c, 0xd4, 0x04, - 0x3d, 0x20, 0x84, 0x1d, 0x51, 0x23, 0x5d, 0x4b, 0xd7, 0x8b, 0xad, 0xff, 0x37, 0xa2, 0xd9, 0xe4, - 0x1b, 0x37, 0xbe, 0xe6, 0x07, 0xb6, 0x14, 0x0c, 0x95, 0x21, 0xcf, 0x70, 0x30, 0x74, 0x3d, 0x7b, - 0x60, 0x64, 0x6a, 0x5a, 0x3d, 0x6f, 0x5d, 0xd9, 0x68, 0x0d, 0xb2, 0x94, 0x39, 0xae, 0x67, 0x64, - 0xc5, 0x1e, 0xd2, 0xe0, 0x5b, 0x53, 0xe6, 0x90, 0x90, 0x19, 0xba, 0xdc, 0x5a, 0x5a, 0xea, 0x3b, - 0x0e, 0x02, 0x23, 0x77, 0xf5, 0x1d, 0x07, 0x01, 0xaa, 0x00, 0x74, 0xfb, 0xb8, 0x7b, 0xec, 0x13, - 0xd7, 0x63, 0x46, 0x5e, 0xcc, 0x45, 0xbe, 0xa0, 0x8f, 0xe0, 0xbe, 0x6f, 0x07, 0xd8, 0x63, 0x9d, - 0x08, 0xac, 0x20, 0x60, 0xab, 0x72, 0x62, 0x67, 0x02, 0x6e, 0x40, 0x8e, 0xf8, 0xcc, 0x25, 0x1e, - 0x35, 0xa0, 0xa6, 0xd5, 0x8b, 0xad, 0xb5, 0x86, 0xbc, 0xcc, 0xc6, 0xf8, 0x32, 0x1b, 0x5b, 0xde, - 0xb9, 0x35, 0x06, 0x99, 0x1b, 0x80, 0xa2, 0x49, 0xa5, 0x3e, 0xf1, 0x28, 0x46, 0xab, 0x90, 0xf6, - 0x55, 0x5a, 0x4b, 0x16, 0x1f, 0x9a, 0x2f, 0xa1, 0xb4, 0x8b, 0x07, 0x98, 0xe1, 0x45, 0x89, 0x7f, - 0x02, 0x39, 0x7c, 0x86, 0xbb, 0x1d, 0xd7, 0x91, 0x99, 0xdf, 0x86, 0xd1, 0x65, 0x55, 0xdf, 0x3b, - 0xc3, 0xdd, 0xf6, 0xae, 0xa5, 0xf3, 0xa9, 0xb6, 0x63, 0xfe, 0xa2, 0xc1, 0xca, 0xd8, 0x5d, 0xd2, - 0x96, 0xa8, 0x0a, 0x45, 0x7c, 0xe6, 0xb2, 0x0e, 0x65, 0x36, 0x0b, 0xa9, 0xf0, 0x56, 0xb2, 0x80, - 0x7f, 0x7a, 0x25, 0xbe, 0xa0, 0x2d, 0x28, 0x70, 0x0b, 0x3b, 0x1d, 0x9b, 0x19, 0x69, 0x11, 0x6d, - 0xf9, 0x5a, 0xb4, 0xaf, 0xc7, 0xd4, 0xdd, 0xce, 0x5f, 0x5c, 0x56, 0xef, 0xfc, 0xf6, 0x4f, 0x55, - 0xb3, 0xf2, 0x72, 0xd9, 0x16, 0x33, 0xff, 0xd6, 0x00, 0xf1, 0xb3, 0x1d, 0x04, 0xa4, 0x8b, 0x29, - 0x5d, 0x46, 0x70, 0x53, 0x8c, 0x49, 0x27, 0x31, 0x26, 0x13, 0xcf, 0x98, 0x6c, 0x02, 0x63, 0xf4, - 0x29, 0xc6, 0xd4, 0x21, 0x43, 0x7d, 0xdc, 0x15, 0x3c, 0x4a, 0xba, 0x61, 0x81, 0x30, 0x1f, 0xc0, - 0xff, 0xa6, 0xc2, 0x93, 0xc9, 0x36, 0x7f, 0x82, 0x55, 0x0b, 0x53, 0xf7, 0x47, 0x7c, 0xc0, 0xce, - 0x97, 0x12, 0xf3, 0x1a, 0x64, 0x4f, 0x5d, 0x87, 0xf5, 0x45, 0xc0, 0x25, 0x4b, 0x1a, 0xfc, 0xfc, - 0x7d, 0xec, 0xf6, 0xfa, 0x4c, 0x84, 0x5b, 0xb2, 0x94, 0x65, 0xbe, 0x80, 0xbb, 0xfc, 0x0a, 0x97, - 0xc3, 0xa5, 0xdf, 0x53, 0x50, 0x52, 0xde, 0x14, 0x95, 0x6e, 0xab, 0x09, 0x8a, 0x7a, 0xe9, 0x09, - 0xf5, 0x9e, 0xf1, 0xc4, 0x0b, 0xd6, 0xf1, 0x83, 0xaf, 0xb4, 0x1e, 0x45, 0x55, 0xe2, 0xe4, 0x63, - 0x25, 0x14, 0x92, 0x86, 0x96, 0x82, 0x2e, 0x49, 0x0d, 0xa2, 0xec, 0xc9, 0xcf, 0xb0, 0x67, 0xa6, - 0x22, 0x0a, 0xf3, 0x2b, 0x02, 0xde, 0xa9, 0x22, 0x18, 0x14, 0x5f, 0xb8, 0x83, 0xc1, 0x52, 0x58, - 0xc1, 0x63, 0x74, 0x7b, 0xe3, 0x3a, 0x28, 0x59, 0xca, 0xe2, 0x09, 0xb7, 0x07, 0x63, 0x39, 0xe5, - 0x43, 0xb3, 0x0b, 0x2b, 0x3b, 0x03, 0x42, 0x71, 0x7b, 0x7f, 0x59, 0x74, 0x94, 0x57, 0x21, 0xeb, - 0x4f, 0x1a, 0xe6, 0x53, 0x28, 0x1e, 0xb8, 0xce, 0xa2, 0x22, 0x37, 0xbf, 0x81, 0xbb, 0x12, 0xa6, - 0xe8, 0xf4, 0x39, 0x14, 0x7c, 0x59, 0x3f, 0x98, 0x1a, 0x9a, 0xe8, 0x1a, 0xb5, 0x58, 0x3e, 0xa8, - 0x2a, 0x6b, 0x7b, 0x47, 0xc4, 0x9a, 0x2c, 0x31, 0x29, 0x3c, 0x98, 0x08, 0xf4, 0x4d, 0x7a, 0x17, - 0x82, 0x8c, 0x6f, 0xb3, 0xbe, 0x62, 0xa9, 0x18, 0x47, 0x75, 0x3d, 0x7d, 0x13, 0x5d, 0xef, 0xc0, - 0xfd, 0x6f, 0x7d, 0xe7, 0x86, 0xcd, 0xb2, 0x05, 0x85, 0x00, 0x53, 0x12, 0x06, 0x5d, 0x2c, 0x75, - 0x36, 0xc9, 0xfd, 0x04, 0xa6, 0x6a, 0x38, 0x60, 0x4b, 0xa9, 0xe1, 0xc7, 0xa2, 0x84, 0xb9, 0xb3, - 0xc4, 0x06, 0xf4, 0x15, 0x14, 0xdf, 0xd8, 0xee, 0x72, 0xb6, 0x0b, 0xe0, 0xae, 0xf4, 0xa5, 0x76, - 0x9b, 0xa9, 0x2b, 0x6d, 0x7e, 0x5d, 0xa5, 0xde, 0xa9, 0xae, 0x36, 0xa4, 0xe6, 0x2d, 0x64, 0xdf, - 0xa6, 0x54, 0xb3, 0x09, 0xfd, 0x3e, 0xe4, 0x5c, 0xb6, 0x99, 0x3c, 0x56, 0xd2, 0xc5, 0x48, 0x88, - 0x59, 0x87, 0x95, 0x1d, 0xe2, 0x79, 0xb8, 0xbb, 0x28, 0x4f, 0xa6, 0x0d, 0xf7, 0xae, 0x90, 0x6a, - 0xa3, 0x87, 0x90, 0xe7, 0xaf, 0xcc, 0xce, 0x24, 0xf1, 0x39, 0x6e, 0x1f, 0xb8, 0x0e, 0x9f, 0xe2, - 0x2f, 0x31, 0x31, 0x25, 0xfb, 0x70, 0x8e, 0xdb, 0x7c, 0xca, 0x80, 0xdc, 0x09, 0x0e, 0xa8, 0x4b, - 0x64, 0xb1, 0x15, 0xac, 0xb1, 0x69, 0x6e, 0xc2, 0xbd, 0x57, 0xfd, 0x90, 0x39, 0xe4, 0xd4, 0x5b, - 0x74, 0x6b, 0xab, 0x90, 0xf6, 0xc8, 0xa9, 0x70, 0x9d, 0xb7, 0xf8, 0x90, 0xa7, 0xeb, 0xc0, 0x0e, - 0xe9, 0xa2, 0x16, 0x61, 0xbe, 0x0f, 0x25, 0x0b, 0xd3, 0x70, 0xb8, 0x08, 0xd8, 0xfa, 0x15, 0x20, - 0xc3, 0x6b, 0x01, 0xbd, 0x84, 0xac, 0x68, 0x17, 0x68, 0xaa, 0x88, 0xd5, 0x43, 0xba, 0x11, 0xed, - 0x4b, 0xe5, 0xc7, 0x73, 0x10, 0x2a, 0x69, 0x6f, 0x40, 0x97, 0xef, 0x27, 0xf4, 0x34, 0x0e, 0x7c, - 0xed, 0xc1, 0x5a, 0xde, 0x58, 0x04, 0x53, 0x8e, 0xe5, 0x31, 0x03, 0x96, 0x78, 0xcc, 0xab, 0xd2, - 0x4b, 0x3c, 0x66, 0xa4, 0x9e, 0xf6, 0x41, 0x97, 0xef, 0x2d, 0x14, 0x0b, 0x9e, 0x7a, 0xda, 0x95, - 0xcd, 0x79, 0x10, 0xe5, 0xb0, 0x0d, 0x19, 0x2e, 0x92, 0xa8, 0x1a, 0x87, 0x8d, 0xa8, 0x6c, 0xb9, - 0x96, 0x0c, 0x50, 0xae, 0xb6, 0x20, 0x2b, 0xae, 0x3a, 0x3e, 0xd2, 0x28, 0x0b, 0xca, 0xeb, 0xd7, - 0xc8, 0xbf, 0xc7, 0xff, 0x99, 0x41, 0x3b, 0xa0, 0x4b, 0x16, 0xc4, 0x87, 0x37, 0xc5, 0x90, 0x44, - 0x27, 0xfb, 0x00, 0x91, 0x87, 0xf4, 0x07, 0xb1, 0xf7, 0x14, 0xa7, 0xe3, 0x89, 0x0e, 0xbf, 0x80, - 0x0c, 0x6f, 0xa5, 0xf1, 0x39, 0x8a, 0x34, 0xd9, 0x44, 0x07, 0x5f, 0x42, 0x86, 0x2b, 0x17, 0x8a, - 0xe5, 0xcc, 0xf5, 0x67, 0x6b, 0xa2, 0x9f, 0x36, 0x14, 0xae, 0x9e, 0x7b, 0xe8, 0xbd, 0x84, 0x0c, - 0x4d, 0xbd, 0x06, 0x13, 0x5d, 0xed, 0x41, 0x4e, 0x35, 0x6a, 0x14, 0x4b, 0x93, 0xe9, 0x2e, 0x9e, - 0xe8, 0xe6, 0x39, 0xe8, 0xb2, 0x3d, 0xc5, 0x97, 0xcd, 0xb5, 0xd6, 0x35, 0x27, 0xb4, 0x0c, 0x97, - 0xf2, 0xf8, 0x1c, 0x47, 0x1a, 0x46, 0x3c, 0x0f, 0xa7, 0xba, 0x80, 0x12, 0x06, 0x9a, 0x2c, 0x0c, - 0x74, 0xa1, 0x30, 0x4c, 0x58, 0x6d, 0x41, 0x4e, 0x09, 0x6c, 0x42, 0xa2, 0xa6, 0x74, 0xba, 0xfc, - 0x64, 0x2e, 0x46, 0xf9, 0x7c, 0x0e, 0xf9, 0xb1, 0xa2, 0xa2, 0xd8, 0x05, 0x33, 0x7a, 0x9b, 0x94, - 0xb5, 0xed, 0xfd, 0x8b, 0xb7, 0x95, 0x3b, 0x7f, 0xbd, 0xad, 0xdc, 0xf9, 0x79, 0x54, 0xd1, 0x2e, - 0x46, 0x15, 0xed, 0x8f, 0x51, 0x45, 0xfb, 0x77, 0x54, 0xd1, 0xbe, 0xff, 0xe4, 0xb6, 0xbf, 0x4c, - 0x6c, 0xf2, 0x3f, 0xdf, 0xa5, 0x0e, 0x75, 0xb1, 0xc5, 0xb3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, - 0x11, 0x4c, 0x17, 0x02, 0xdb, 0x10, 0x00, 0x00, -} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.proto b/vendor/github.com/containerd/containerd/runtime/v2/task/shim.proto index 5521ff7993..d390462dc4 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.proto +++ b/vendor/github.com/containerd/containerd/runtime/v2/task/shim.proto @@ -99,6 +99,7 @@ message StateResponse { bool terminal = 8; uint32 exit_status = 9; google.protobuf.Timestamp exited_at = 10 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + string exec_id = 11; } message KillRequest { diff --git a/vendor/github.com/containerd/containerd/sys/filesys_unix.go b/vendor/github.com/containerd/containerd/sys/filesys_unix.go index 700f44efa9..d8329af9fb 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_unix.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_unix.go @@ -24,3 +24,8 @@ import "os" func ForceRemoveAll(path string) error { return os.RemoveAll(path) } + +// MkdirAllWithACL is a wrapper for os.MkdirAll on Unix systems. +func MkdirAllWithACL(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} diff --git a/vendor/github.com/containerd/containerd/sys/filesys_windows.go b/vendor/github.com/containerd/containerd/sys/filesys_windows.go index dc880c3427..1bdc5317a9 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_windows.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_windows.go @@ -30,6 +30,11 @@ import ( "github.com/Microsoft/hcsshim" ) +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +) + // MkdirAllWithACL is a wrapper for MkdirAll that creates a directory // ACL'd for Builtin Administrators and Local System. func MkdirAllWithACL(path string, perm os.FileMode) error { @@ -78,7 +83,7 @@ func mkdirall(path string, adminAndLocalSystem bool) error { if j > 1 { // Create parent - err = mkdirall(path[0:j-1], false) + err = mkdirall(path[0:j-1], adminAndLocalSystem) if err != nil { return err } @@ -112,8 +117,7 @@ func mkdirall(path string, adminAndLocalSystem bool) error { // and Local System. func mkdirWithACL(name string) error { sa := syscall.SecurityAttributes{Length: 0} - sddl := "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" - sd, err := winio.SddlToSecurityDescriptor(sddl) + sd, err := winio.SddlToSecurityDescriptor(SddlAdministratorsLocalSystem) if err != nil { return &os.PathError{Op: "mkdir", Path: name, Err: err} } diff --git a/vendor/github.com/containerd/containerd/sys/oom_unix.go b/vendor/github.com/containerd/containerd/sys/oom_unix.go index 7192efec1c..54412e9c3c 100644 --- a/vendor/github.com/containerd/containerd/sys/oom_unix.go +++ b/vendor/github.com/containerd/containerd/sys/oom_unix.go @@ -20,8 +20,10 @@ package sys import ( "fmt" + "io/ioutil" "os" "strconv" + "strings" "github.com/opencontainers/runc/libcontainer/system" ) @@ -45,3 +47,13 @@ func SetOOMScore(pid, score int) error { } return nil } + +// GetOOMScoreAdj gets the oom score for a process +func GetOOMScoreAdj(pid int) (int, error) { + path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.Atoi(strings.TrimSpace(string(data))) +} diff --git a/vendor/github.com/containerd/containerd/sys/oom_windows.go b/vendor/github.com/containerd/containerd/sys/oom_windows.go index f44bcebd1e..a917ba635b 100644 --- a/vendor/github.com/containerd/containerd/sys/oom_windows.go +++ b/vendor/github.com/containerd/containerd/sys/oom_windows.go @@ -22,3 +22,10 @@ package sys func SetOOMScore(pid, score int) error { return nil } + +// GetOOMScoreAdj gets the oom score for a process +// +// Not implemented on Windows +func GetOOMScoreAdj(pid int) (int, error) { + return 0, nil +} diff --git a/vendor/github.com/containerd/continuity/fs/copy.go b/vendor/github.com/containerd/continuity/fs/copy.go new file mode 100644 index 0000000000..97053d7e5c --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy.go @@ -0,0 +1,172 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/pkg/errors" +) + +var bufferPool = &sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32*1024) + return &buffer + }, +} + +// XAttrErrorHandlers transform a non-nil xattr error. +// Return nil to ignore an error. +// xattrKey can be empty for listxattr operation. +type XAttrErrorHandler func(dst, src, xattrKey string, err error) error + +type copyDirOpts struct { + xeh XAttrErrorHandler +} + +type CopyDirOpt func(*copyDirOpts) error + +// WithXAttrErrorHandler allows specifying XAttrErrorHandler +// If nil XAttrErrorHandler is specified (default), CopyDir stops +// on a non-nil xattr error. +func WithXAttrErrorHandler(xeh XAttrErrorHandler) CopyDirOpt { + return func(o *copyDirOpts) error { + o.xeh = xeh + return nil + } +} + +// WithAllowXAttrErrors allows ignoring xattr errors. +func WithAllowXAttrErrors() CopyDirOpt { + xeh := func(dst, src, xattrKey string, err error) error { + return nil + } + return WithXAttrErrorHandler(xeh) +} + +// CopyDir copies the directory from src to dst. +// Most efficient copy of files is attempted. +func CopyDir(dst, src string, opts ...CopyDirOpt) error { + var o copyDirOpts + for _, opt := range opts { + if err := opt(&o); err != nil { + return err + } + } + inodes := map[uint64]string{} + return copyDirectory(dst, src, inodes, &o) +} + +func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) error { + stat, err := os.Stat(src) + if err != nil { + return errors.Wrapf(err, "failed to stat %s", src) + } + if !stat.IsDir() { + return errors.Errorf("source %s is not directory", src) + } + + if st, err := os.Stat(dst); err != nil { + if err := os.Mkdir(dst, stat.Mode()); err != nil { + return errors.Wrapf(err, "failed to mkdir %s", dst) + } + } else if !st.IsDir() { + return errors.Errorf("cannot copy to non-directory: %s", dst) + } else { + if err := os.Chmod(dst, stat.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod on %s", dst) + } + } + + fis, err := ioutil.ReadDir(src) + if err != nil { + return errors.Wrapf(err, "failed to read %s", src) + } + + if err := copyFileInfo(stat, dst); err != nil { + return errors.Wrapf(err, "failed to copy file info for %s", dst) + } + + for _, fi := range fis { + source := filepath.Join(src, fi.Name()) + target := filepath.Join(dst, fi.Name()) + + switch { + case fi.IsDir(): + if err := copyDirectory(target, source, inodes, o); err != nil { + return err + } + continue + case (fi.Mode() & os.ModeType) == 0: + link, err := getLinkSource(target, fi, inodes) + if err != nil { + return errors.Wrap(err, "failed to get hardlink") + } + if link != "" { + if err := os.Link(link, target); err != nil { + return errors.Wrap(err, "failed to create hard link") + } + } else if err := CopyFile(target, source); err != nil { + return errors.Wrap(err, "failed to copy files") + } + case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink: + link, err := os.Readlink(source) + if err != nil { + return errors.Wrapf(err, "failed to read link: %s", source) + } + if err := os.Symlink(link, target); err != nil { + return errors.Wrapf(err, "failed to create symlink: %s", target) + } + case (fi.Mode() & os.ModeDevice) == os.ModeDevice: + if err := copyDevice(target, fi); err != nil { + return errors.Wrapf(err, "failed to create device") + } + default: + // TODO: Support pipes and sockets + return errors.Wrapf(err, "unsupported mode %s", fi.Mode()) + } + if err := copyFileInfo(fi, target); err != nil { + return errors.Wrap(err, "failed to copy file info") + } + + if err := copyXAttrs(target, source, o.xeh); err != nil { + return errors.Wrap(err, "failed to copy xattrs") + } + } + + return nil +} + +// CopyFile copies the source file to the target. +// The most efficient means of copying is used for the platform. +func CopyFile(target, source string) error { + src, err := os.Open(source) + if err != nil { + return errors.Wrapf(err, "failed to open source %s", source) + } + defer src.Close() + tgt, err := os.Create(target) + if err != nil { + return errors.Wrapf(err, "failed to open target %s", target) + } + defer tgt.Close() + + return copyFileContent(tgt, src) +} diff --git a/vendor/github.com/containerd/continuity/fs/copy_linux.go b/vendor/github.com/containerd/continuity/fs/copy_linux.go new file mode 100644 index 0000000000..81c71522aa --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy_linux.go @@ -0,0 +1,144 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io" + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func copyFileInfo(fi os.FileInfo, name string) error { + st := fi.Sys().(*syscall.Stat_t) + if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil { + if os.IsPermission(err) { + // Normally if uid/gid are the same this would be a no-op, but some + // filesystems may still return EPERM... for instance NFS does this. + // In such a case, this is not an error. + if dstStat, err2 := os.Lstat(name); err2 == nil { + st2 := dstStat.Sys().(*syscall.Stat_t) + if st.Uid == st2.Uid && st.Gid == st2.Gid { + err = nil + } + } + } + if err != nil { + return errors.Wrapf(err, "failed to chown %s", name) + } + } + + if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + } + + timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) + } + + return nil +} + +const maxSSizeT = int64(^uint(0) >> 1) + +func copyFileContent(dst, src *os.File) error { + st, err := src.Stat() + if err != nil { + return errors.Wrap(err, "unable to stat source") + } + + size := st.Size() + first := true + srcFd := int(src.Fd()) + dstFd := int(dst.Fd()) + + for size > 0 { + // Ensure that we are never trying to copy more than SSIZE_MAX at a + // time and at the same time avoids overflows when the file is larger + // than 4GB on 32-bit systems. + var copySize int + if size > maxSSizeT { + copySize = int(maxSSizeT) + } else { + copySize = int(size) + } + n, err := unix.CopyFileRange(srcFd, nil, dstFd, nil, copySize, 0) + if err != nil { + if (err != unix.ENOSYS && err != unix.EXDEV) || !first { + return errors.Wrap(err, "copy file range failed") + } + + buf := bufferPool.Get().(*[]byte) + _, err = io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + return errors.Wrap(err, "userspace copy failed") + } + + first = false + size -= int64(n) + } + + return nil +} + +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + xattrKeys, err := sysx.LListxattr(src) + if err != nil { + e := errors.Wrapf(err, "failed to list xattrs on %s", src) + if xeh != nil { + e = xeh(dst, src, "", e) + } + return e + } + for _, xattr := range xattrKeys { + data, err := sysx.LGetxattr(src, xattr) + if err != nil { + e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { + e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + } + + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +} diff --git a/vendor/github.com/containerd/continuity/fs/copy_unix.go b/vendor/github.com/containerd/continuity/fs/copy_unix.go new file mode 100644 index 0000000000..73c01a46dd --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy_unix.go @@ -0,0 +1,112 @@ +// +build solaris darwin freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io" + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func copyFileInfo(fi os.FileInfo, name string) error { + st := fi.Sys().(*syscall.Stat_t) + if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil { + if os.IsPermission(err) { + // Normally if uid/gid are the same this would be a no-op, but some + // filesystems may still return EPERM... for instance NFS does this. + // In such a case, this is not an error. + if dstStat, err2 := os.Lstat(name); err2 == nil { + st2 := dstStat.Sys().(*syscall.Stat_t) + if st.Uid == st2.Uid && st.Gid == st2.Gid { + err = nil + } + } + } + if err != nil { + return errors.Wrapf(err, "failed to chown %s", name) + } + } + + if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + } + + timespec := []syscall.Timespec{StatAtime(st), StatMtime(st)} + if err := syscall.UtimesNano(name, timespec); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) + } + + return nil +} + +func copyFileContent(dst, src *os.File) error { + buf := bufferPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + + return err +} + +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + xattrKeys, err := sysx.LListxattr(src) + if err != nil { + e := errors.Wrapf(err, "failed to list xattrs on %s", src) + if xeh != nil { + e = xeh(dst, src, "", e) + } + return e + } + for _, xattr := range xattrKeys { + data, err := sysx.LGetxattr(src, xattr) + if err != nil { + e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { + e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + } + + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +} diff --git a/vendor/github.com/containerd/continuity/fs/copy_windows.go b/vendor/github.com/containerd/continuity/fs/copy_windows.go new file mode 100644 index 0000000000..27c7d7dbb9 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy_windows.go @@ -0,0 +1,49 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io" + "os" + + "github.com/pkg/errors" +) + +func copyFileInfo(fi os.FileInfo, name string) error { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + + // TODO: copy windows specific metadata + + return nil +} + +func copyFileContent(dst, src *os.File) error { + buf := bufferPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + return err +} + +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + return errors.New("device copy not supported") +} diff --git a/vendor/github.com/containerd/continuity/fs/diff.go b/vendor/github.com/containerd/continuity/fs/diff.go new file mode 100644 index 0000000000..e64f9e73d3 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/diff.go @@ -0,0 +1,326 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "context" + "os" + "path/filepath" + "strings" + + "golang.org/x/sync/errgroup" + + "github.com/sirupsen/logrus" +) + +// ChangeKind is the type of modification that +// a change is making. +type ChangeKind int + +const ( + // ChangeKindUnmodified represents an unmodified + // file + ChangeKindUnmodified = iota + + // ChangeKindAdd represents an addition of + // a file + ChangeKindAdd + + // ChangeKindModify represents a change to + // an existing file + ChangeKindModify + + // ChangeKindDelete represents a delete of + // a file + ChangeKindDelete +) + +func (k ChangeKind) String() string { + switch k { + case ChangeKindUnmodified: + return "unmodified" + case ChangeKindAdd: + return "add" + case ChangeKindModify: + return "modify" + case ChangeKindDelete: + return "delete" + default: + return "" + } +} + +// Change represents single change between a diff and its parent. +type Change struct { + Kind ChangeKind + Path string +} + +// ChangeFunc is the type of function called for each change +// computed during a directory changes calculation. +type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error + +// Changes computes changes between two directories calling the +// given change function for each computed change. The first +// directory is intended to the base directory and second +// directory the changed directory. +// +// The change callback is called by the order of path names and +// should be appliable in that order. +// Due to this apply ordering, the following is true +// - Removed directory trees only create a single change for the root +// directory removed. Remaining changes are implied. +// - A directory which is modified to become a file will not have +// delete entries for sub-path items, their removal is implied +// by the removal of the parent directory. +// +// Opaque directories will not be treated specially and each file +// removed from the base directory will show up as a removal. +// +// File content comparisons will be done on files which have timestamps +// which may have been truncated. If either of the files being compared +// has a zero value nanosecond value, each byte will be compared for +// differences. If 2 files have the same seconds value but different +// nanosecond values where one of those values is zero, the files will +// be considered unchanged if the content is the same. This behavior +// is to account for timestamp truncation during archiving. +func Changes(ctx context.Context, a, b string, changeFn ChangeFunc) error { + if a == "" { + logrus.Debugf("Using single walk diff for %s", b) + return addDirChanges(ctx, changeFn, b) + } else if diffOptions := detectDirDiff(b, a); diffOptions != nil { + logrus.Debugf("Using single walk diff for %s from %s", diffOptions.diffDir, a) + return diffDirChanges(ctx, changeFn, a, diffOptions) + } + + logrus.Debugf("Using double walk diff for %s from %s", b, a) + return doubleWalkDiff(ctx, changeFn, a, b) +} + +func addDirChanges(ctx context.Context, changeFn ChangeFunc, root string) error { + return filepath.Walk(root, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(root, path) + if err != nil { + return err + } + + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + return changeFn(ChangeKindAdd, path, f, nil) + }) +} + +// diffDirOptions is used when the diff can be directly calculated from +// a diff directory to its base, without walking both trees. +type diffDirOptions struct { + diffDir string + skipChange func(string) (bool, error) + deleteChange func(string, string, os.FileInfo) (string, error) +} + +// diffDirChanges walks the diff directory and compares changes against the base. +func diffDirChanges(ctx context.Context, changeFn ChangeFunc, base string, o *diffDirOptions) error { + changedDirs := make(map[string]struct{}) + return filepath.Walk(o.diffDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(o.diffDir, path) + if err != nil { + return err + } + + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + // TODO: handle opaqueness, start new double walker at this + // location to get deletes, and skip tree in single walker + + if o.skipChange != nil { + if skip, err := o.skipChange(path); skip { + return err + } + } + + var kind ChangeKind + + deletedFile, err := o.deleteChange(o.diffDir, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + path = deletedFile + kind = ChangeKindDelete + f = nil + } else { + // Otherwise, the file was added + kind = ChangeKindAdd + + // ...Unless it already existed in a base, in which case, it's a modification + stat, err := os.Stat(filepath.Join(base, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the base, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + kind = ChangeKindModify + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if kind == ChangeKindAdd || kind == ChangeKindDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + pi, err := os.Stat(filepath.Join(o.diffDir, parent)) + if err := changeFn(ChangeKindModify, parent, pi, err); err != nil { + return err + } + changedDirs[parent] = struct{}{} + } + } + + return changeFn(kind, path, f, nil) + }) +} + +// doubleWalkDiff walks both directories to create a diff +func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err error) { + g, ctx := errgroup.WithContext(ctx) + + var ( + c1 = make(chan *currentPath) + c2 = make(chan *currentPath) + + f1, f2 *currentPath + rmdir string + ) + g.Go(func() error { + defer close(c1) + return pathWalk(ctx, a, c1) + }) + g.Go(func() error { + defer close(c2) + return pathWalk(ctx, b, c2) + }) + g.Go(func() error { + for c1 != nil || c2 != nil { + if f1 == nil && c1 != nil { + f1, err = nextPath(ctx, c1) + if err != nil { + return err + } + if f1 == nil { + c1 = nil + } + } + + if f2 == nil && c2 != nil { + f2, err = nextPath(ctx, c2) + if err != nil { + return err + } + if f2 == nil { + c2 = nil + } + } + if f1 == nil && f2 == nil { + continue + } + + var f os.FileInfo + k, p := pathChange(f1, f2) + switch k { + case ChangeKindAdd: + if rmdir != "" { + rmdir = "" + } + f = f2.f + f2 = nil + case ChangeKindDelete: + // Check if this file is already removed by being + // under of a removed directory + if rmdir != "" && strings.HasPrefix(f1.path, rmdir) { + f1 = nil + continue + } else if f1.f.IsDir() { + rmdir = f1.path + string(os.PathSeparator) + } else if rmdir != "" { + rmdir = "" + } + f1 = nil + case ChangeKindModify: + same, err := sameFile(f1, f2) + if err != nil { + return err + } + if f1.f.IsDir() && !f2.f.IsDir() { + rmdir = f1.path + string(os.PathSeparator) + } else if rmdir != "" { + rmdir = "" + } + f = f2.f + f1 = nil + f2 = nil + if same { + if !isLinked(f) { + continue + } + k = ChangeKindUnmodified + } + } + if err := changeFn(k, p, f, nil); err != nil { + return err + } + } + return nil + }) + + return g.Wait() +} diff --git a/vendor/github.com/containerd/continuity/fs/diff_unix.go b/vendor/github.com/containerd/continuity/fs/diff_unix.go new file mode 100644 index 0000000000..7913af27d9 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/diff_unix.go @@ -0,0 +1,74 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "bytes" + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" +) + +// detectDirDiff returns diff dir options if a directory could +// be found in the mount info for upper which is the direct +// diff with the provided lower directory +func detectDirDiff(upper, lower string) *diffDirOptions { + // TODO: get mount options for upper + // TODO: detect AUFS + // TODO: detect overlay + return nil +} + +// compareSysStat returns whether the stats are equivalent, +// whether the files are considered the same file, and +// an error +func compareSysStat(s1, s2 interface{}) (bool, error) { + ls1, ok := s1.(*syscall.Stat_t) + if !ok { + return false, nil + } + ls2, ok := s2.(*syscall.Stat_t) + if !ok { + return false, nil + } + + return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil +} + +func compareCapabilities(p1, p2 string) (bool, error) { + c1, err := sysx.LGetxattr(p1, "security.capability") + if err != nil && err != sysx.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p1) + } + c2, err := sysx.LGetxattr(p2, "security.capability") + if err != nil && err != sysx.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p2) + } + return bytes.Equal(c1, c2), nil +} + +func isLinked(f os.FileInfo) bool { + s, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return false + } + return !f.IsDir() && s.Nlink > 1 +} diff --git a/vendor/github.com/containerd/continuity/fs/diff_windows.go b/vendor/github.com/containerd/continuity/fs/diff_windows.go new file mode 100644 index 0000000000..4bfa72d3a1 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/diff_windows.go @@ -0,0 +1,48 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func detectDirDiff(upper, lower string) *diffDirOptions { + return nil +} + +func compareSysStat(s1, s2 interface{}) (bool, error) { + f1, ok := s1.(windows.Win32FileAttributeData) + if !ok { + return false, nil + } + f2, ok := s2.(windows.Win32FileAttributeData) + if !ok { + return false, nil + } + return f1.FileAttributes == f2.FileAttributes, nil +} + +func compareCapabilities(p1, p2 string) (bool, error) { + // TODO: Use windows equivalent + return true, nil +} + +func isLinked(os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/containerd/continuity/fs/dtype_linux.go b/vendor/github.com/containerd/continuity/fs/dtype_linux.go new file mode 100644 index 0000000000..10510d8dec --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/dtype_linux.go @@ -0,0 +1,103 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "unsafe" +) + +func locateDummyIfEmpty(path string) (string, error) { + children, err := ioutil.ReadDir(path) + if err != nil { + return "", err + } + if len(children) != 0 { + return "", nil + } + dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + if err != nil { + return "", err + } + name := dummyFile.Name() + err = dummyFile.Close() + return name, err +} + +// SupportsDType returns whether the filesystem mounted on path supports d_type +func SupportsDType(path string) (bool, error) { + // locate dummy so that we have at least one dirent + dummy, err := locateDummyIfEmpty(path) + if err != nil { + return false, err + } + if dummy != "" { + defer os.Remove(dummy) + } + + visited := 0 + supportsDType := true + fn := func(ent *syscall.Dirent) bool { + visited++ + if ent.Type == syscall.DT_UNKNOWN { + supportsDType = false + // stop iteration + return true + } + // continue iteration + return false + } + if err = iterateReadDir(path, fn); err != nil { + return false, err + } + if visited == 0 { + return false, fmt.Errorf("did not hit any dirent during iteration %s", path) + } + return supportsDType, nil +} + +func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error { + d, err := os.Open(path) + if err != nil { + return err + } + defer d.Close() + fd := int(d.Fd()) + buf := make([]byte, 4096) + for { + nbytes, err := syscall.ReadDirent(fd, buf) + if err != nil { + return err + } + if nbytes == 0 { + break + } + for off := 0; off < nbytes; { + ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off])) + if stop := fn(ent); stop { + return nil + } + off += int(ent.Reclen) + } + } + return nil +} diff --git a/vendor/github.com/containerd/continuity/fs/du.go b/vendor/github.com/containerd/continuity/fs/du.go new file mode 100644 index 0000000000..fccc985dc5 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/du.go @@ -0,0 +1,38 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "context" + +// Usage of disk information +type Usage struct { + Inodes int64 + Size int64 +} + +// DiskUsage counts the number of inodes and disk usage for the resources under +// path. +func DiskUsage(ctx context.Context, roots ...string) (Usage, error) { + return diskUsage(ctx, roots...) +} + +// DiffUsage counts the numbers of inodes and disk usage in the +// diff between the 2 directories. The first path is intended +// as the base directory and the second as the changed directory. +func DiffUsage(ctx context.Context, a, b string) (Usage, error) { + return diffUsage(ctx, a, b) +} diff --git a/vendor/github.com/containerd/continuity/fs/du_unix.go b/vendor/github.com/containerd/continuity/fs/du_unix.go new file mode 100644 index 0000000000..e22ffbea37 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/du_unix.go @@ -0,0 +1,110 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "context" + "os" + "path/filepath" + "syscall" +) + +type inode struct { + // TODO(stevvooe): Can probably reduce memory usage by not tracking + // device, but we can leave this right for now. + dev, ino uint64 +} + +func newInode(stat *syscall.Stat_t) inode { + return inode{ + // Dev is uint32 on darwin/bsd, uint64 on linux/solaris + dev: uint64(stat.Dev), // nolint: unconvert + // Ino is uint32 on bsd, uint64 on darwin/linux/solaris + ino: uint64(stat.Ino), // nolint: unconvert + } +} + +func diskUsage(ctx context.Context, roots ...string) (Usage, error) { + + var ( + size int64 + inodes = map[inode]struct{}{} // expensive! + ) + + for _, root := range roots { + if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + inoKey := newInode(fi.Sys().(*syscall.Stat_t)) + if _, ok := inodes[inoKey]; !ok { + inodes[inoKey] = struct{}{} + size += fi.Size() + } + + return nil + }); err != nil { + return Usage{}, err + } + } + + return Usage{ + Inodes: int64(len(inodes)), + Size: size, + }, nil +} + +func diffUsage(ctx context.Context, a, b string) (Usage, error) { + var ( + size int64 + inodes = map[inode]struct{}{} // expensive! + ) + + if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if kind == ChangeKindAdd || kind == ChangeKindModify { + inoKey := newInode(fi.Sys().(*syscall.Stat_t)) + if _, ok := inodes[inoKey]; !ok { + inodes[inoKey] = struct{}{} + size += fi.Size() + } + + return nil + + } + return nil + }); err != nil { + return Usage{}, err + } + + return Usage{ + Inodes: int64(len(inodes)), + Size: size, + }, nil +} diff --git a/vendor/github.com/containerd/continuity/fs/du_windows.go b/vendor/github.com/containerd/continuity/fs/du_windows.go new file mode 100644 index 0000000000..8f25ec59c5 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/du_windows.go @@ -0,0 +1,82 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "context" + "os" + "path/filepath" +) + +func diskUsage(ctx context.Context, roots ...string) (Usage, error) { + var ( + size int64 + ) + + // TODO(stevvooe): Support inodes (or equivalent) for windows. + + for _, root := range roots { + if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + size += fi.Size() + return nil + }); err != nil { + return Usage{}, err + } + } + + return Usage{ + Size: size, + }, nil +} + +func diffUsage(ctx context.Context, a, b string) (Usage, error) { + var ( + size int64 + ) + + if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if kind == ChangeKindAdd || kind == ChangeKindModify { + size += fi.Size() + + return nil + + } + return nil + }); err != nil { + return Usage{}, err + } + + return Usage{ + Size: size, + }, nil +} diff --git a/vendor/github.com/containerd/continuity/fs/hardlink.go b/vendor/github.com/containerd/continuity/fs/hardlink.go new file mode 100644 index 0000000000..762aa45e69 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/hardlink.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "os" + +// GetLinkInfo returns an identifier representing the node a hardlink is pointing +// to. If the file is not hard linked then 0 will be returned. +func GetLinkInfo(fi os.FileInfo) (uint64, bool) { + return getLinkInfo(fi) +} + +// getLinkSource returns a path for the given name and +// file info to its link source in the provided inode +// map. If the given file name is not in the map and +// has other links, it is added to the inode map +// to be a source for other link locations. +func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) { + inode, isHardlink := getLinkInfo(fi) + if !isHardlink { + return "", nil + } + + path, ok := inodes[inode] + if !ok { + inodes[inode] = name + } + return path, nil +} diff --git a/vendor/github.com/containerd/continuity/fs/hardlink_unix.go b/vendor/github.com/containerd/continuity/fs/hardlink_unix.go new file mode 100644 index 0000000000..f95f0904c1 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/hardlink_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "os" + "syscall" +) + +func getLinkInfo(fi os.FileInfo) (uint64, bool) { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return 0, false + } + + // Ino is uint32 on bsd, uint64 on darwin/linux/solaris + return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1 // nolint: unconvert +} diff --git a/vendor/github.com/containerd/continuity/fs/hardlink_windows.go b/vendor/github.com/containerd/continuity/fs/hardlink_windows.go new file mode 100644 index 0000000000..7485547147 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/hardlink_windows.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "os" + +func getLinkInfo(fi os.FileInfo) (uint64, bool) { + return 0, false +} diff --git a/vendor/github.com/containerd/continuity/fs/path.go b/vendor/github.com/containerd/continuity/fs/path.go new file mode 100644 index 0000000000..8863caa9df --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/path.go @@ -0,0 +1,313 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "bytes" + "context" + "io" + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +var ( + errTooManyLinks = errors.New("too many links") +) + +type currentPath struct { + path string + f os.FileInfo + fullPath string +} + +func pathChange(lower, upper *currentPath) (ChangeKind, string) { + if lower == nil { + if upper == nil { + panic("cannot compare nil paths") + } + return ChangeKindAdd, upper.path + } + if upper == nil { + return ChangeKindDelete, lower.path + } + + switch i := directoryCompare(lower.path, upper.path); { + case i < 0: + // File in lower that is not in upper + return ChangeKindDelete, lower.path + case i > 0: + // File in upper that is not in lower + return ChangeKindAdd, upper.path + default: + return ChangeKindModify, upper.path + } +} + +func directoryCompare(a, b string) int { + l := len(a) + if len(b) < l { + l = len(b) + } + for i := 0; i < l; i++ { + c1, c2 := a[i], b[i] + if c1 == filepath.Separator { + c1 = byte(0) + } + if c2 == filepath.Separator { + c2 = byte(0) + } + if c1 < c2 { + return -1 + } + if c1 > c2 { + return +1 + } + } + if len(a) < len(b) { + return -1 + } + if len(a) > len(b) { + return +1 + } + return 0 +} + +func sameFile(f1, f2 *currentPath) (bool, error) { + if os.SameFile(f1.f, f2.f) { + return true, nil + } + + equalStat, err := compareSysStat(f1.f.Sys(), f2.f.Sys()) + if err != nil || !equalStat { + return equalStat, err + } + + if eq, err := compareCapabilities(f1.fullPath, f2.fullPath); err != nil || !eq { + return eq, err + } + + // If not a directory also check size, modtime, and content + if !f1.f.IsDir() { + if f1.f.Size() != f2.f.Size() { + return false, nil + } + t1 := f1.f.ModTime() + t2 := f2.f.ModTime() + + if t1.Unix() != t2.Unix() { + return false, nil + } + + // If the timestamp may have been truncated in both of the + // files, check content of file to determine difference + if t1.Nanosecond() == 0 && t2.Nanosecond() == 0 { + var eq bool + if (f1.f.Mode() & os.ModeSymlink) == os.ModeSymlink { + eq, err = compareSymlinkTarget(f1.fullPath, f2.fullPath) + } else if f1.f.Size() > 0 { + eq, err = compareFileContent(f1.fullPath, f2.fullPath) + } + if err != nil || !eq { + return eq, err + } + } else if t1.Nanosecond() != t2.Nanosecond() { + return false, nil + } + } + + return true, nil +} + +func compareSymlinkTarget(p1, p2 string) (bool, error) { + t1, err := os.Readlink(p1) + if err != nil { + return false, err + } + t2, err := os.Readlink(p2) + if err != nil { + return false, err + } + return t1 == t2, nil +} + +const compareChuckSize = 32 * 1024 + +// compareFileContent compares the content of 2 same sized files +// by comparing each byte. +func compareFileContent(p1, p2 string) (bool, error) { + f1, err := os.Open(p1) + if err != nil { + return false, err + } + defer f1.Close() + f2, err := os.Open(p2) + if err != nil { + return false, err + } + defer f2.Close() + + b1 := make([]byte, compareChuckSize) + b2 := make([]byte, compareChuckSize) + for { + n1, err1 := f1.Read(b1) + if err1 != nil && err1 != io.EOF { + return false, err1 + } + n2, err2 := f2.Read(b2) + if err2 != nil && err2 != io.EOF { + return false, err2 + } + if n1 != n2 || !bytes.Equal(b1[:n1], b2[:n2]) { + return false, nil + } + if err1 == io.EOF && err2 == io.EOF { + return true, nil + } + } +} + +func pathWalk(ctx context.Context, root string, pathC chan<- *currentPath) error { + return filepath.Walk(root, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(root, path) + if err != nil { + return err + } + + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + p := ¤tPath{ + path: path, + f: f, + fullPath: filepath.Join(root, path), + } + + select { + case <-ctx.Done(): + return ctx.Err() + case pathC <- p: + return nil + } + }) +} + +func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case p := <-pathC: + return p, nil + } +} + +// RootPath joins a path with a root, evaluating and bounding any +// symlink to the root directory. +func RootPath(root, path string) (string, error) { + if path == "" { + return root, nil + } + var linksWalked int // to protect against cycles + for { + i := linksWalked + newpath, err := walkLinks(root, path, &linksWalked) + if err != nil { + return "", err + } + path = newpath + if i == linksWalked { + newpath = filepath.Join("/", newpath) + if path == newpath { + return filepath.Join(root, newpath), nil + } + path = newpath + } + } +} + +func walkLink(root, path string, linksWalked *int) (newpath string, islink bool, err error) { + if *linksWalked > 255 { + return "", false, errTooManyLinks + } + + path = filepath.Join("/", path) + if path == "/" { + return path, false, nil + } + realPath := filepath.Join(root, path) + + fi, err := os.Lstat(realPath) + if err != nil { + // If path does not yet exist, treat as non-symlink + if os.IsNotExist(err) { + return path, false, nil + } + return "", false, err + } + if fi.Mode()&os.ModeSymlink == 0 { + return path, false, nil + } + newpath, err = os.Readlink(realPath) + if err != nil { + return "", false, err + } + *linksWalked++ + return newpath, true, nil +} + +func walkLinks(root, path string, linksWalked *int) (string, error) { + switch dir, file := filepath.Split(path); { + case dir == "": + newpath, _, err := walkLink(root, file, linksWalked) + return newpath, err + case file == "": + if os.IsPathSeparator(dir[len(dir)-1]) { + if dir == "/" { + return dir, nil + } + return walkLinks(root, dir[:len(dir)-1], linksWalked) + } + newpath, _, err := walkLink(root, dir, linksWalked) + return newpath, err + default: + newdir, err := walkLinks(root, dir, linksWalked) + if err != nil { + return "", err + } + newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked) + if err != nil { + return "", err + } + if !islink { + return newpath, nil + } + if filepath.IsAbs(newpath) { + return newpath, nil + } + return filepath.Join(newdir, newpath), nil + } +} diff --git a/vendor/github.com/containerd/continuity/fs/stat_bsd.go b/vendor/github.com/containerd/continuity/fs/stat_bsd.go new file mode 100644 index 0000000000..cb7400a33e --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/stat_bsd.go @@ -0,0 +1,44 @@ +// +build darwin freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "syscall" + "time" +) + +// StatAtime returns the access time from a stat struct +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atimespec +} + +// StatCtime returns the created time from a stat struct +func StatCtime(st *syscall.Stat_t) syscall.Timespec { + return st.Ctimespec +} + +// StatMtime returns the modified time from a stat struct +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtimespec +} + +// StatATimeAsTime returns the access time as a time.Time +func StatATimeAsTime(st *syscall.Stat_t) time.Time { + return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert +} diff --git a/vendor/github.com/containerd/continuity/fs/stat_linux.go b/vendor/github.com/containerd/continuity/fs/stat_linux.go new file mode 100644 index 0000000000..4a678dd1fd --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/stat_linux.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "syscall" + "time" +) + +// StatAtime returns the Atim +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atim +} + +// StatCtime returns the Ctim +func StatCtime(st *syscall.Stat_t) syscall.Timespec { + return st.Ctim +} + +// StatMtime returns the Mtim +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtim +} + +// StatATimeAsTime returns st.Atim as a time.Time +func StatATimeAsTime(st *syscall.Stat_t) time.Time { + // The int64 conversions ensure the line compiles for 32-bit systems as well. + return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert +} diff --git a/vendor/github.com/containerd/continuity/fs/time.go b/vendor/github.com/containerd/continuity/fs/time.go new file mode 100644 index 0000000000..cde4561233 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/time.go @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "time" + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} diff --git a/vendor/github.com/containerd/continuity/pathdriver/path_driver.go b/vendor/github.com/containerd/continuity/pathdriver/path_driver.go deleted file mode 100644 index b0d5a6b567..0000000000 --- a/vendor/github.com/containerd/continuity/pathdriver/path_driver.go +++ /dev/null @@ -1,101 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pathdriver - -import ( - "path/filepath" -) - -// PathDriver provides all of the path manipulation functions in a common -// interface. The context should call these and never use the `filepath` -// package or any other package to manipulate paths. -type PathDriver interface { - Join(paths ...string) string - IsAbs(path string) bool - Rel(base, target string) (string, error) - Base(path string) string - Dir(path string) string - Clean(path string) string - Split(path string) (dir, file string) - Separator() byte - Abs(path string) (string, error) - Walk(string, filepath.WalkFunc) error - FromSlash(path string) string - ToSlash(path string) string - Match(pattern, name string) (matched bool, err error) -} - -// pathDriver is a simple default implementation calls the filepath package. -type pathDriver struct{} - -// LocalPathDriver is the exported pathDriver struct for convenience. -var LocalPathDriver PathDriver = &pathDriver{} - -func (*pathDriver) Join(paths ...string) string { - return filepath.Join(paths...) -} - -func (*pathDriver) IsAbs(path string) bool { - return filepath.IsAbs(path) -} - -func (*pathDriver) Rel(base, target string) (string, error) { - return filepath.Rel(base, target) -} - -func (*pathDriver) Base(path string) string { - return filepath.Base(path) -} - -func (*pathDriver) Dir(path string) string { - return filepath.Dir(path) -} - -func (*pathDriver) Clean(path string) string { - return filepath.Clean(path) -} - -func (*pathDriver) Split(path string) (dir, file string) { - return filepath.Split(path) -} - -func (*pathDriver) Separator() byte { - return filepath.Separator -} - -func (*pathDriver) Abs(path string) (string, error) { - return filepath.Abs(path) -} - -// Note that filepath.Walk calls os.Stat, so if the context wants to -// to call Driver.Stat() for Walk, they need to create a new struct that -// overrides this method. -func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error { - return filepath.Walk(root, walkFn) -} - -func (*pathDriver) FromSlash(path string) string { - return filepath.FromSlash(path) -} - -func (*pathDriver) ToSlash(path string) string { - return filepath.ToSlash(path) -} - -func (*pathDriver) Match(pattern, name string) (bool, error) { - return filepath.Match(pattern, name) -} diff --git a/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go b/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go new file mode 100644 index 0000000000..0bfa6a0409 --- /dev/null +++ b/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go @@ -0,0 +1,26 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package syscallx + +import "syscall" + +// Readlink returns the destination of the named symbolic link. +func Readlink(path string, buf []byte) (n int, err error) { + return syscall.Readlink(path, buf) +} diff --git a/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go b/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go new file mode 100644 index 0000000000..2ba8149905 --- /dev/null +++ b/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go @@ -0,0 +1,112 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package syscallx + +import ( + "syscall" + "unsafe" +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + + // GenericReparseBuffer + reparseBuffer byte +} + +type mountPointReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + PathBuffer [1]uint16 +} + +type symbolicLinkReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + Flags uint32 + PathBuffer [1]uint16 +} + +const ( + _IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 + _SYMLINK_FLAG_RELATIVE = 1 +) + +// Readlink returns the destination of the named symbolic link. +func Readlink(path string, buf []byte) (n int, err error) { + fd, err := syscall.CreateFile(syscall.StringToUTF16Ptr(path), syscall.GENERIC_READ, 0, nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return -1, err + } + defer syscall.CloseHandle(fd) + + rdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE) + var bytesReturned uint32 + err = syscall.DeviceIoControl(fd, syscall.FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil) + if err != nil { + return -1, err + } + + rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0])) + var s string + switch rdb.ReparseTag { + case syscall.IO_REPARSE_TAG_SYMLINK: + data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2]) + if data.Flags&_SYMLINK_FLAG_RELATIVE == 0 { + if len(s) >= 4 && s[:4] == `\??\` { + s = s[4:] + switch { + case len(s) >= 2 && s[1] == ':': // \??\C:\foo\bar + // do nothing + case len(s) >= 4 && s[:4] == `UNC\`: // \??\UNC\foo\bar + s = `\\` + s[4:] + default: + // unexpected; do nothing + } + } else { + // unexpected; do nothing + } + } + case _IO_REPARSE_TAG_MOUNT_POINT: + data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2]) + if len(s) >= 4 && s[:4] == `\??\` { // \??\C:\foo\bar + if len(s) < 48 || s[:11] != `\??\Volume{` { + s = s[4:] + } + } else { + // unexpected; do nothing + } + default: + // the path is not a symlink or junction but another type of reparse + // point + return -1, syscall.ENOENT + } + n = copy(buf, []byte(s)) + + return n, nil +} diff --git a/vendor/github.com/containerd/continuity/sysx/README.md b/vendor/github.com/containerd/continuity/sysx/README.md new file mode 100644 index 0000000000..ad7aee5331 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/README.md @@ -0,0 +1,3 @@ +This package is for internal use only. It is intended to only have +temporary changes before they are upstreamed to golang.org/x/sys/ +(a.k.a. https://github.com/golang/sys). diff --git a/vendor/github.com/containerd/continuity/sysx/file_posix.go b/vendor/github.com/containerd/continuity/sysx/file_posix.go new file mode 100644 index 0000000000..e28f3a1b57 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/file_posix.go @@ -0,0 +1,128 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "os" + "path/filepath" + + "github.com/containerd/continuity/syscallx" +) + +// Readlink returns the destination of the named symbolic link. +// If there is an error, it will be of type *PathError. +func Readlink(name string) (string, error) { + for len := 128; ; len *= 2 { + b := make([]byte, len) + n, e := fixCount(syscallx.Readlink(fixLongPath(name), b)) + if e != nil { + return "", &os.PathError{Op: "readlink", Path: name, Err: e} + } + if n < len { + return string(b[0:n]), nil + } + } +} + +// Many functions in package syscall return a count of -1 instead of 0. +// Using fixCount(call()) instead of call() corrects the count. +func fixCount(n int, err error) (int, error) { + if n < 0 { + n = 0 + } + return n, err +} + +// fixLongPath returns the extended-length (\\?\-prefixed) form of +// path when needed, in order to avoid the default 260 character file +// path limit imposed by Windows. If path is not easily converted to +// the extended-length form (for example, if path is a relative path +// or contains .. elements), or is short enough, fixLongPath returns +// path unmodified. +// +// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath +func fixLongPath(path string) string { + // Do nothing (and don't allocate) if the path is "short". + // Empirically (at least on the Windows Server 2013 builder), + // the kernel is arbitrarily okay with < 248 bytes. That + // matches what the docs above say: + // "When using an API to create a directory, the specified + // path cannot be so long that you cannot append an 8.3 file + // name (that is, the directory name cannot exceed MAX_PATH + // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. + // + // The MSDN docs appear to say that a normal path that is 248 bytes long + // will work; empirically the path must be less then 248 bytes long. + if len(path) < 248 { + // Don't fix. (This is how Go 1.7 and earlier worked, + // not automatically generating the \\?\ form) + return path + } + + // The extended form begins with \\?\, as in + // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt. + // The extended form disables evaluation of . and .. path + // elements and disables the interpretation of / as equivalent + // to \. The conversion here rewrites / to \ and elides + // . elements as well as trailing or duplicate separators. For + // simplicity it avoids the conversion entirely for relative + // paths or paths containing .. elements. For now, + // \\server\share paths are not converted to + // \\?\UNC\server\share paths because the rules for doing so + // are less well-specified. + if len(path) >= 2 && path[:2] == `\\` { + // Don't canonicalize UNC paths. + return path + } + if !filepath.IsAbs(path) { + // Relative path + return path + } + + const prefix = `\\?` + + pathbuf := make([]byte, len(prefix)+len(path)+len(`\`)) + copy(pathbuf, prefix) + n := len(path) + r, w := 0, len(prefix) + for r < n { + switch { + case os.IsPathSeparator(path[r]): + // empty block + r++ + case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])): + // /./ + r++ + case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])): + // /../ is currently unhandled + return path + default: + pathbuf[w] = '\\' + w++ + for ; r < n && !os.IsPathSeparator(path[r]); r++ { + pathbuf[w] = path[r] + w++ + } + } + } + // A drive's root directory needs a trailing \ + if w == len(`\\?\c:`) { + pathbuf[w] = '\\' + w++ + } + return string(pathbuf[:w]) +} diff --git a/vendor/github.com/containerd/continuity/sysx/generate.sh b/vendor/github.com/containerd/continuity/sysx/generate.sh new file mode 100644 index 0000000000..87d708d7ae --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/generate.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +mksyscall="$(go env GOROOT)/src/syscall/mksyscall.pl" + +fix() { + sed 's,^package syscall$,package sysx,' \ + | sed 's,^import "unsafe"$,import (\n\t"syscall"\n\t"unsafe"\n),' \ + | gofmt -r='BytePtrFromString -> syscall.BytePtrFromString' \ + | gofmt -r='Syscall6 -> syscall.Syscall6' \ + | gofmt -r='Syscall -> syscall.Syscall' \ + | gofmt -r='SYS_GETXATTR -> syscall.SYS_GETXATTR' \ + | gofmt -r='SYS_LISTXATTR -> syscall.SYS_LISTXATTR' \ + | gofmt -r='SYS_SETXATTR -> syscall.SYS_SETXATTR' \ + | gofmt -r='SYS_REMOVEXATTR -> syscall.SYS_REMOVEXATTR' \ + | gofmt -r='SYS_LGETXATTR -> syscall.SYS_LGETXATTR' \ + | gofmt -r='SYS_LLISTXATTR -> syscall.SYS_LLISTXATTR' \ + | gofmt -r='SYS_LSETXATTR -> syscall.SYS_LSETXATTR' \ + | gofmt -r='SYS_LREMOVEXATTR -> syscall.SYS_LREMOVEXATTR' +} + +if [ "$GOARCH" == "" ] || [ "$GOOS" == "" ]; then + echo "Must specify \$GOARCH and \$GOOS" + exit 1 +fi + +mkargs="" + +if [ "$GOARCH" == "386" ] || [ "$GOARCH" == "arm" ]; then + mkargs="-l32" +fi + +for f in "$@"; do + $mksyscall $mkargs "${f}_${GOOS}.go" | fix > "${f}_${GOOS}_${GOARCH}.go" +done + diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_linux.go b/vendor/github.com/containerd/continuity/sysx/nodata_linux.go new file mode 100644 index 0000000000..28ce5d8de3 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/nodata_linux.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "syscall" +) + +const ENODATA = syscall.ENODATA diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go b/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go new file mode 100644 index 0000000000..e0575f4468 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go @@ -0,0 +1,24 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "syscall" +) + +// This should actually be a set that contains ENOENT and EPERM +const ENODATA = syscall.ENOENT diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_unix.go b/vendor/github.com/containerd/continuity/sysx/nodata_unix.go new file mode 100644 index 0000000000..b26f5b3d03 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/nodata_unix.go @@ -0,0 +1,25 @@ +// +build darwin freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "syscall" +) + +const ENODATA = syscall.ENOATTR diff --git a/vendor/github.com/containerd/continuity/sysx/xattr.go b/vendor/github.com/containerd/continuity/sysx/xattr.go new file mode 100644 index 0000000000..9e4326dcfe --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/xattr.go @@ -0,0 +1,125 @@ +// +build linux darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "bytes" + "syscall" + + "golang.org/x/sys/unix" +) + +// Listxattr calls syscall listxattr and reads all content +// and returns a string array +func Listxattr(path string) ([]string, error) { + return listxattrAll(path, unix.Listxattr) +} + +// Removexattr calls syscall removexattr +func Removexattr(path string, attr string) (err error) { + return unix.Removexattr(path, attr) +} + +// Setxattr calls syscall setxattr +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return unix.Setxattr(path, attr, data, flags) +} + +// Getxattr calls syscall getxattr +func Getxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, unix.Getxattr) +} + +// LListxattr lists xattrs, not following symlinks +func LListxattr(path string) ([]string, error) { + return listxattrAll(path, unix.Llistxattr) +} + +// LRemovexattr removes an xattr, not following symlinks +func LRemovexattr(path string, attr string) (err error) { + return unix.Lremovexattr(path, attr) +} + +// LSetxattr sets an xattr, not following symlinks +func LSetxattr(path string, attr string, data []byte, flags int) (err error) { + return unix.Lsetxattr(path, attr, data, flags) +} + +// LGetxattr gets an xattr, not following symlinks +func LGetxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, unix.Lgetxattr) +} + +const defaultXattrBufferSize = 5 + +type listxattrFunc func(path string, dest []byte) (int, error) + +func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) { + var p []byte // nil on first execution + + for { + n, err := listFunc(path, p) // first call gets buffer size. + if err != nil { + return nil, err + } + + if n > len(p) { + p = make([]byte, n) + continue + } + + p = p[:n] + + ps := bytes.Split(bytes.TrimSuffix(p, []byte{0}), []byte{0}) + var entries []string + for _, p := range ps { + s := string(p) + if s != "" { + entries = append(entries, s) + } + } + + return entries, nil + } +} + +type getxattrFunc func(string, string, []byte) (int, error) + +func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) { + p := make([]byte, defaultXattrBufferSize) + for { + n, err := getFunc(path, attr, p) + if err != nil { + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ERANGE { + p = make([]byte, len(p)*2) // this can't be ideal. + continue // try again! + } + + return nil, err + } + + // realloc to correct size and repeat + if n > len(p) { + p = make([]byte, n) + continue + } + + return p[:n], nil + } +} diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go b/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go new file mode 100644 index 0000000000..c9ef3a1d25 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go @@ -0,0 +1,67 @@ +// +build !linux,!darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "errors" + "runtime" +) + +var unsupported = errors.New("extended attributes unsupported on " + runtime.GOOS) + +// Listxattr calls syscall listxattr and reads all content +// and returns a string array +func Listxattr(path string) ([]string, error) { + return []string{}, nil +} + +// Removexattr calls syscall removexattr +func Removexattr(path string, attr string) (err error) { + return unsupported +} + +// Setxattr calls syscall setxattr +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return unsupported +} + +// Getxattr calls syscall getxattr +func Getxattr(path, attr string) ([]byte, error) { + return []byte{}, unsupported +} + +// LListxattr lists xattrs, not following symlinks +func LListxattr(path string) ([]string, error) { + return []string{}, nil +} + +// LRemovexattr removes an xattr, not following symlinks +func LRemovexattr(path string, attr string) (err error) { + return unsupported +} + +// LSetxattr sets an xattr, not following symlinks +func LSetxattr(path string, attr string, data []byte, flags int) (err error) { + return unsupported +} + +// LGetxattr gets an xattr, not following symlinks +func LGetxattr(path, attr string) ([]byte, error) { + return []byte{}, nil +} diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml index 1f78150739..66774c2266 100644 --- a/vendor/github.com/containers/buildah/.cirrus.yml +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -29,7 +29,7 @@ env: #### # Command to prefix every output line with a timestamp # (can't do inline awk script, Cirrus-CI or YAML mangles quoting) - _TIMESTAMP: 'awk --file ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/timestamp.awk' + _TIMESTAMP: 'awk -f ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/timestamp.awk' _DFCMD: 'df -lhTx tmpfs' _RAUDITCMD: 'cat /var/log/audit/audit.log' _UAUDITCMD: 'cat /var/log/kern.log' @@ -75,9 +75,37 @@ testing_task: failure_journal_log_script: '${_JOURNALCMD} || true' +# This task runs `make vendor` followed by ./hack/tree_status.sh to check +# whether the git tree is clean. The reasoning for that is to make sure +# that the vendor.conf, the code and the vendored packages in ./vendor are +# in sync at all times. +vendor_task: + + only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*' + + env: + CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/buildah" + GOPATH: "/go" + GOSRC: "/go/src/github.com/containers/buildah" + + # Runs within Cirrus's "community cluster" + container: + image: docker.io/library/golang:1.13 + cpu: 1 + memory: 1 + + timeout_in: 30m + + vendor_script: + - 'cd ${CIRRUS_WORKING_DIR} && make vendor' + - 'cd ${CIRRUS_WORKING_DIR} && ./hack/tree_status.sh' + # Update metadata on VM images referenced by this repository state meta_task: + depends_on: + - "vendor" + container: image: "quay.io/libpod/imgts:latest" # see contrib/imgts cpu: 1 diff --git a/vendor/github.com/containers/buildah/.golangci.yml b/vendor/github.com/containers/buildah/.golangci.yml index 044bc1f1b5..dde37ad790 100644 --- a/vendor/github.com/containers/buildah/.golangci.yml +++ b/vendor/github.com/containers/buildah/.golangci.yml @@ -2,7 +2,6 @@ run: build-tags: - apparmor - - ostree - seccomp - selinux concurrency: 6 diff --git a/vendor/github.com/containers/buildah/.papr.sh b/vendor/github.com/containers/buildah/.papr.sh index 25ab4c29dc..2795e9ec05 100644 --- a/vendor/github.com/containers/buildah/.papr.sh +++ b/vendor/github.com/containers/buildah/.papr.sh @@ -26,7 +26,6 @@ dnf install -y \ libselinux-utils \ make \ openssl \ - ostree-devel \ skopeo-containers \ which diff --git a/vendor/github.com/containers/buildah/.papr.yml b/vendor/github.com/containers/buildah/.papr.yml index 6eaba332cd..4be12a18e9 100644 --- a/vendor/github.com/containers/buildah/.papr.yml +++ b/vendor/github.com/containers/buildah/.papr.yml @@ -68,7 +68,6 @@ packages: - golang - libassuan-devel - make - - ostree-devel - skopeo-containers required: false diff --git a/vendor/github.com/containers/buildah/.travis.yml b/vendor/github.com/containers/buildah/.travis.yml index 8379c649d3..a741082304 100644 --- a/vendor/github.com/containers/buildah/.travis.yml +++ b/vendor/github.com/containers/buildah/.travis.yml @@ -2,9 +2,10 @@ language: go dist: xenial sudo: required go: - - 1.11.x - 1.12.x + - 1.13.x - tip +go_import_path: github.com/containers/buildah env: global: @@ -39,9 +40,6 @@ matrix: services: - docker before_install: - - make vendor - - ./hack/tree_status.sh - - sudo apt-get update - sudo apt-get -qq install software-properties-common - sudo add-apt-repository -y ppa:duggan/bats - sudo apt-get update @@ -50,15 +48,6 @@ before_install: - sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce - mkdir /home/travis/auth - sudo mkdir -p /var/lib/containers/storage/overlay - - > - OSTREE_VERSION=v2019.2; - git clone https://github.com/ostreedev/ostree && - pushd ostree && - git checkout $OSTREE_VERSION && - ./autogen.sh --prefix=/usr && - sudo make -j4 install && - popd && - sudo rm -rf ostree install: # Let's create a self signed certificate and get it in the right places - hostname @@ -97,7 +86,7 @@ script: # Setting up Docker Registry is complete, let's do Buildah testing! - make install.tools -j4 - make install.libseccomp.sudo all runc validate lint SECURITYTAGS="apparmor seccomp" - - go test -c -tags "apparmor seccomp `./btrfs_tag.sh` `./libdm_tag.sh` `./ostree_tag.sh` `./selinux_tag.sh`" ./cmd/buildah + - go test -c -tags "apparmor seccomp `./btrfs_tag.sh` `./libdm_tag.sh` `./selinux_tag.sh`" ./cmd/buildah - tmp=`mktemp -d`; mkdir $tmp/root $tmp/runroot; sudo PATH="$PATH" ./buildah.test -test.v --root $tmp/root --runroot $tmp/runroot --storage-driver vfs --signature-policy `pwd`/tests/policy.json --registries-conf `pwd`/tests/registries.conf - cd tests; sudo PATH="$PATH" ./test_runner.sh - cd .. diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 61c29b2008..b41ff83504 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,6 +2,59 @@ # Changelog +## v1.11.4 (2019-10-28) + buildah: add a "manifest" command + manifests: add the module + pkg/supplemented: add a package for grouping images together + pkg/manifests: add a manifest list build/manipulation API + Update for ErrUnauthorizedForCredentials API change in containers/image + Update for manifest-lists API changes in containers/image + version: also note the version of containers/image + Move to containers/image v5.0.0 + Enable --device directory as src device + Fix git build with branch specified + Bump github.com/openshift/imagebuilder from 1.1.0 to 1.1.1 + Bump github.com/fsouza/go-dockerclient from 1.4.4 to 1.5.0 + Add clarification to the Tutorial for new users + Silence "using cache" to ensure -q is fully quiet + Add OWNERS File to Buildah + Bump github.com/containers/storage from 1.13.4 to 1.13.5 + Move runtime flag to bud from common + Commit: check for storage.ErrImageUnknown using errors.Cause() + Fix crash when invalid COPY --from flag is specified. + Bump back to v1.12.0-dev + +## v1.11.3 (2019-10-04) + Update c/image to v4.0.1 + Bump github.com/spf13/pflag from 1.0.3 to 1.0.5 + Fix --build-args handling + Bump github.com/spf13/cobra from 0.0.3 to 0.0.5 + Bump github.com/cyphar/filepath-securejoin from 0.2.1 to 0.2.2 + Bump github.com/onsi/ginkgo from 1.8.0 to 1.10.1 + Bump github.com/fsouza/go-dockerclient from 1.3.0 to 1.4.4 + Add support for retrieving context from stdin "-" + Ensure bud remote context cleans up on error + info: add cgroups2 + Bump github.com/seccomp/libseccomp-golang from 0.9.0 to 0.9.1 + Bump github.com/mattn/go-shellwords from 1.0.5 to 1.0.6 + Bump github.com/stretchr/testify from 1.3.0 to 1.4.0 + Bump github.com/opencontainers/selinux from 1.2.2 to 1.3.0 + Bump github.com/etcd-io/bbolt from 1.3.2 to 1.3.3 + Bump github.com/onsi/gomega from 1.5.0 to 1.7.0 + update c/storage to v1.13.4 + Print build 'STEP' line to stdout, not stderr + Fix travis-ci on forks + Vendor c/storage v1.13.3 + Use Containerfile by default + Added tutorial on how to include Buildah as library + util/util: Fix "configuraitno" -> "configuration" log typo + Bump back to v1.12.0-dev + +## v1.11.2 (2019-09-13) + Add some cleanup code + Move devices code to unit specific directory. + Bump back to v1.12.0-dev + ## v1.11.1 (2019-09-11) Add --devices flag to bud and from Downgrade .papr to highest atomic verion diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index cb0dfeb51c..9d04177d0f 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -2,7 +2,7 @@ export GOPROXY=https://proxy.golang.org SELINUXTAG := $(shell ./selinux_tag.sh) APPARMORTAG := $(shell hack/apparmor_tag.sh) -STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh) $(shell ./ostree_tag.sh) +STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh) SECURITYTAGS ?= seccomp $(SELINUXTAG) $(APPARMORTAG) TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) BUILDTAGS += $(TAGS) @@ -24,7 +24,7 @@ endif GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed")) SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed")) -STATIC_STORAGETAGS = "containers_image_ostree_stub containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)" +STATIC_STORAGETAGS = "containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)" CNI_COMMIT := $(shell sed -n 's;\tgithub.com/containernetworking/cni \([^ \n]*\).*$\;\1;p' go.mod) #RUNC_COMMIT := $(shell sed -n 's;\tgithub.com/opencontainers/runc \([^ \n]*\).*$\;\1;p' go.mod) @@ -130,6 +130,9 @@ test-unit: tests/testreport/testreport mkdir -p $$tmp/root $$tmp/runroot; \ $(GO) test -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" ./cmd/buildah -args -root $$tmp/root -runroot $$tmp/runroot -storage-driver vfs -signature-policy $(shell pwd)/tests/policy.json -registries-conf $(shell pwd)/tests/registries.conf +vendor-in-container: + podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.13 make vendor + .PHONY: vendor vendor: export GO111MODULE=on \ diff --git a/vendor/github.com/containers/buildah/OWNERS b/vendor/github.com/containers/buildah/OWNERS new file mode 100644 index 0000000000..5e6cbfdfaf --- /dev/null +++ b/vendor/github.com/containers/buildah/OWNERS @@ -0,0 +1,22 @@ +approvers: + - TomSweeneyRedHat + - cevich + - giuseppe + - nalind + - rhatdan + - vrothberg +reviewers: + - QiWang19 + - TomSweeneyRedHat + - baude + - cevich + - edsantiago + - giuseppe + - haircommander + - jwhonce + - mheon + - mrunalp + - nalind + - rhatdan + - umohnani8 + - vrothberg diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index b482fe1be8..d57eea818e 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -12,7 +12,7 @@ import ( "github.com/containers/buildah/docker" "github.com/containers/buildah/util" - "github.com/containers/image/types" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/ioutils" v1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -27,7 +27,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.11.2" + Version = "1.12.0-dev" // The value we use to identify what type of information, currently a // serialized Builder structure, we are using as per-container state. // This should only be changed when we make incompatible changes to diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index 2ae070e8cc..6e98e54050 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,3 +1,51 @@ +- Changelog for v1.11.4 (2019-10-28) + * buildah: add a "manifest" command + * manifests: add the module + * pkg/supplemented: add a package for grouping images together + * pkg/manifests: add a manifest list build/manipulation API + * Update for ErrUnauthorizedForCredentials API change in containers/image + * Update for manifest-lists API changes in containers/image + * version: also note the version of containers/image + * Move to containers/image v5.0.0 + * Enable --device directory as src device + * Fix git build with branch specified + * Bump github.com/openshift/imagebuilder from 1.1.0 to 1.1.1 + * Bump github.com/fsouza/go-dockerclient from 1.4.4 to 1.5.0 + * Add clarification to the Tutorial for new users + * Silence "using cache" to ensure -q is fully quiet + * Add OWNERS File to Buildah + * Bump github.com/containers/storage from 1.13.4 to 1.13.5 + * Move runtime flag to bud from common + * Commit: check for storage.ErrImageUnknown using errors.Cause() + * Fix crash when invalid COPY --from flag is specified. + * Bump back to v1.12.0-dev + +- Changelog for v1.11.3 (2019-10-04) + * Update c/image to v4.0.1 + * Bump github.com/spf13/pflag from 1.0.3 to 1.0.5 + * Fix --build-args handling + * Bump github.com/spf13/cobra from 0.0.3 to 0.0.5 + * Bump github.com/cyphar/filepath-securejoin from 0.2.1 to 0.2.2 + * Bump github.com/onsi/ginkgo from 1.8.0 to 1.10.1 + * Bump github.com/fsouza/go-dockerclient from 1.3.0 to 1.4.4 + * Add support for retrieving context from stdin "-" + * Ensure bud remote context cleans up on error + * info: add cgroups2 + * Bump github.com/seccomp/libseccomp-golang from 0.9.0 to 0.9.1 + * Bump github.com/mattn/go-shellwords from 1.0.5 to 1.0.6 + * Bump github.com/stretchr/testify from 1.3.0 to 1.4.0 + * Bump github.com/opencontainers/selinux from 1.2.2 to 1.3.0 + * Bump github.com/etcd-io/bbolt from 1.3.2 to 1.3.3 + * Bump github.com/onsi/gomega from 1.5.0 to 1.7.0 + * update c/storage to v1.13.4 + * Print build 'STEP' line to stdout, not stderr + * Fix travis-ci on forks + * Vendor c/storage v1.13.3 + * Use Containerfile by default + * Added tutorial on how to include Buildah as library + * util/util: Fix "configuraitno" -> "configuration" log typo + * Bump back to v1.12.0-dev + - Changelog for v1.11.2 (2019-09-13) * Add some cleanup code * Move devices code to unit specific directory. diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index b3b56f39ae..4df3b99088 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -12,14 +12,14 @@ import ( "github.com/containers/buildah/pkg/blobcache" "github.com/containers/buildah/util" - cp "github.com/containers/image/copy" - "github.com/containers/image/docker" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/signature" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/types" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/stringid" @@ -96,7 +96,7 @@ type PushOptions struct { // github.com/containers/image/types SystemContext to hold credentials // and other authentication/authorization information. SystemContext *types.SystemContext - // ManifestType is the format to use when saving the imge using the 'dir' transport + // ManifestType is the format to use when saving the image using the 'dir' transport // possible options are oci, v2s1, and v2s2 ManifestType string // BlobDirectory is the name of a directory in which we'll look for @@ -309,7 +309,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options } img, err := is.Transport.GetStoreImage(b.store, dest) - if err != nil && err != storage.ErrImageUnknown { + if err != nil && errors.Cause(err) != storage.ErrImageUnknown { return imgID, nil, "", errors.Wrapf(err, "error locating image %q in local storage", transports.ImageName(dest)) } if err == nil { diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go index c08541ac70..d2e9dc732a 100644 --- a/vendor/github.com/containers/buildah/common.go +++ b/vendor/github.com/containers/buildah/common.go @@ -6,8 +6,8 @@ import ( "path/filepath" "github.com/containers/buildah/pkg/unshare" - cp "github.com/containers/image/copy" - "github.com/containers/image/types" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/types" "github.com/containers/storage" ) diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go index 0292ea43c6..617619e45c 100644 --- a/vendor/github.com/containers/buildah/config.go +++ b/vendor/github.com/containers/buildah/config.go @@ -8,9 +8,9 @@ import ( "time" "github.com/containers/buildah/docker" - "github.com/containers/image/manifest" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/stringid" ociv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/buildah/docker/types.go b/vendor/github.com/containers/buildah/docker/types.go index 2011619f4e..561287ac27 100644 --- a/vendor/github.com/containers/buildah/docker/types.go +++ b/vendor/github.com/containers/buildah/docker/types.go @@ -7,8 +7,8 @@ package docker import ( "time" - "github.com/containers/image/pkg/strslice" - "github.com/opencontainers/go-digest" + "github.com/containers/image/v5/pkg/strslice" + digest "github.com/opencontainers/go-digest" ) // github.com/moby/moby/image/rootfs.go diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod index bf30e9925d..c8741b7812 100644 --- a/vendor/github.com/containers/buildah/go.mod +++ b/vendor/github.com/containers/buildah/go.mod @@ -3,59 +3,49 @@ module github.com/containers/buildah go 1.12 require ( - github.com/VividCortex/ewma v1.1.1 // indirect github.com/blang/semver v3.5.0+incompatible // indirect - github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b // indirect - github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50 // indirect - github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect github.com/containernetworking/cni v0.7.1 - github.com/containers/image v3.0.2+incompatible - github.com/containers/storage v1.13.2 - github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect - github.com/cyphar/filepath-securejoin v0.2.1 - github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65 + github.com/containers/image/v5 v5.0.0 + github.com/containers/storage v1.13.5 + github.com/cyphar/filepath-securejoin v0.2.2 + github.com/docker/distribution v2.7.1+incompatible github.com/docker/docker-credential-helpers v0.6.1 // indirect + github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.4.0 github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 - github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect - github.com/etcd-io/bbolt v1.3.2 - github.com/fsouza/go-dockerclient v1.3.0 + github.com/etcd-io/bbolt v1.3.3 + github.com/fsouza/go-dockerclient v1.5.0 github.com/ghodss/yaml v1.0.0 github.com/hashicorp/go-multierror v1.0.0 github.com/imdario/mergo v0.3.6 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111 // indirect - github.com/mattn/go-isatty v0.0.4 // indirect - github.com/mattn/go-shellwords v1.0.5 - github.com/moby/moby v0.0.0-20171005181806-f8806b18b4b9 // indirect - github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 // indirect - github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c // indirect - github.com/onsi/ginkgo v1.8.0 - github.com/onsi/gomega v1.5.0 + github.com/mattn/go-shellwords v1.0.6 + github.com/morikuni/aec v1.0.0 // indirect + github.com/onsi/ginkgo v1.10.2 + github.com/onsi/gomega v1.7.0 github.com/opencontainers/go-digest v1.0.0-rc1 - github.com/opencontainers/image-spec v1.0.1 + github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158 github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 github.com/opencontainers/runtime-tools v0.9.0 - github.com/opencontainers/selinux v1.2.2 + github.com/opencontainers/selinux v1.3.0 github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible - github.com/openshift/imagebuilder v1.1.0 + github.com/openshift/imagebuilder v1.1.1 github.com/pkg/errors v0.8.1 github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4 - github.com/seccomp/libseccomp-golang v0.9.0 + github.com/seccomp/libseccomp-golang v0.9.1 github.com/sirupsen/logrus v1.4.2 - github.com/spf13/cobra v0.0.3 - github.com/spf13/pflag v1.0.3 - github.com/stretchr/testify v1.3.0 + github.com/spf13/cobra v0.0.5 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.4.0 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 - github.com/ulikunitz/xz v0.5.5 // indirect - github.com/urfave/cli v1.21.0 // indirect - github.com/vbauerster/mpb v3.4.0+incompatible // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/vishvananda/netlink v1.0.0 // indirect + github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f // indirect github.com/xeipuuv/gojsonschema v1.1.0 // indirect - golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 - golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb + golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad + golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 + golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 // indirect + google.golang.org/grpc v1.24.0 // indirect k8s.io/api v0.0.0-20190813020757-36bff7324fb7 // indirect k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083 // indirect ) diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum index 6b69f2f360..4a6673b048 100644 --- a/vendor/github.com/containers/buildah/go.sum +++ b/vendor/github.com/containers/buildah/go.sum @@ -1,82 +1,79 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= +github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/BurntSushi/toml v0.2.0 h1:OthAm9ZSUx4uAmn3WbPwc06nowWrByRwBsYRhbmFjBs= -github.com/BurntSushi/toml v0.2.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/zstd v1.4.0 h1:vhoV+DUHnRZdKW1i5UMjAk2G4JY8wN4ayRfYDNdEhwo= github.com/DataDog/zstd v1.4.0/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/hcsshim v0.8.3 h1:KWCdVGOju81E0RL4ndn9/E6I4qMBi6kuPw1W4yBYlCw= -github.com/Microsoft/hcsshim v0.8.3/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= -github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/continuity v0.0.0-20180814194400-c7c5070e6f6e/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20180216233310-d8fb8589b0e8/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containernetworking/cni v0.7.0-rc2 h1:2GGDhbwdWPY53iT7LXy+LBP76Ch2D/hnw1U2zVFfGbk= -github.com/containernetworking/cni v0.7.0-rc2/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containers/image v2.0.0+incompatible h1:FTr6Br7jlIKNCKMjSOMbAxKp2keQ0//jzJaYNTVhauk= -github.com/containers/image v2.0.0+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= -github.com/containers/image v2.0.1+incompatible h1:w39mlElA/aSFZ6moFa5N+A4MWu9c8hgdMiMMYnH94Hs= -github.com/containers/image v2.0.1+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= -github.com/containers/image v3.0.0+incompatible h1:pdUHY//H+3jYNnoTt+rqY8NsStX4ZBLKzPTlMC+XvnU= -github.com/containers/image v3.0.0+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= -github.com/containers/image v3.0.1+incompatible h1:VlNEQUI1JHa1SJfJ4jz/GBt7gpk+aRYGR6TUKsxXMkU= -github.com/containers/image v3.0.1+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= -github.com/containers/image v3.0.2+incompatible h1:B1lqAE8MUPCrsBLE86J0gnXleeRq8zJnQryhiiGQNyE= -github.com/containers/image v3.0.2+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= -github.com/containers/storage v1.12.10-0.20190725063046-8038df61d6f6 h1:c7Fq9bbRl0Ua6swRHAH8rkrK2fSt6K+ZBrXHD50kDR4= -github.com/containers/storage v1.12.10-0.20190725063046-8038df61d6f6/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c= -github.com/containers/storage v1.12.13 h1:GtaLCY8p1Drlk1Oew581jGvB137UaO+kpz0HII67T0A= -github.com/containers/storage v1.12.13/go.mod h1:+RirK6VQAqskQlaTBrOG6ulDvn4si2QjFE1NZCn06MM= -github.com/containers/storage v1.12.14 h1:S1QGlC15gj5JOvB73W5tpVBApS4I7b/6rvxfflBAg+Q= -github.com/containers/storage v1.12.14/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c= -github.com/containers/storage v1.12.15 h1:nN/RxtEe4ejasGVJqzy+y5++pIYp54XPXzRO46xXnns= -github.com/containers/storage v1.12.15/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c= -github.com/containers/storage v1.12.16 h1:zePYS1GiG8CuRqLCeA0ufx4X27K06HcJLV50DdojL+Y= -github.com/containers/storage v1.12.16/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c= -github.com/containers/storage v1.13.1 h1:rjVirLS9fCGkUFlLDZEoGDDUugtIf46DufWvJu08wxQ= -github.com/containers/storage v1.13.1/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA= -github.com/containers/storage v1.13.2 h1:UXZ0Ckmk6+6+4vj2M2ywruVtH97pnRoAhTG8ctd+yQI= -github.com/containers/storage v1.13.2/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/cyphar/filepath-securejoin v0.2.1 h1:5DPkzz/0MwUpvR4fxASKzgApeq2OMFY5FfYtrX28Coo= -github.com/cyphar/filepath-securejoin v0.2.1/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/containers/image/v4 v4.0.1 h1:idNGHChj0Pyv3vLrxul2oSVMZLeFqpoq3CjLeVgapSQ= +github.com/containers/image/v4 v4.0.1/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA= +github.com/containers/image/v5 v5.0.0 h1:arnXgbt1ucsC/ndtSpiQY87rA0UjhF+/xQnPzqdBDn4= +github.com/containers/image/v5 v5.0.0/go.mod h1:MgiLzCfIeo8lrHi+4Lb8HP+rh513sm0Mlk6RrhjFOLY= +github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= +github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/storage v1.13.4 h1:j0bBaJDKbUHtAW1MXPFnwXJtqcH+foWeuXK1YaBV5GA= +github.com/containers/storage v1.13.4/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA= +github.com/containers/storage v1.13.5 h1:/SUzGeOP2HDijpF7Yur21Ch6WTZC1BNeZF917CWcp5c= +github.com/containers/storage v1.13.5/go.mod h1:HELz8Sn+UVbPaUZMI8RvIG9doD4y4z6Gtg4k7xdd2ZY= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65 h1:4zlOyrJUbYnrvlzChJ+jP2J3i77Jbhm336NEuCv7kZo= github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.0.0-20171019062838-86f080cff091/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v0.7.3-0.20180827131323-0c5f8d2b9b23 h1:mJtkfC9RUrUWHMk0cFDNhVoc9U3k2FRAzEZ+5pqSIHo= -github.com/docker/docker v0.7.3-0.20180827131323-0c5f8d2b9b23/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v0.0.0-20180522102801-da99009bbb11/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b h1:+Ga+YpCDpcY1fln6GI0fiiirpqHGcob5/Vk3oKNuGdU= +github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce h1:H3csZuxZESJeeEiOxq4YXPNmLFbjl7u2qVBrAAGX/sA= +github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.1 h1:Dq4iIfcM7cNtddhLVWe9h4QDjsi4OER3Z8voPu/I52g= github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.0.0-20180212134524-7beb39f0b969/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.8.0-dev.2.0.20180608203834-19279f049241/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 h1:moehPjPiGUaWdwgOl92xRyFHJyaqXDHcCyW9M6nmCK4= github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= @@ -84,46 +81,69 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNE github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/etcd-io/bbolt v1.3.2 h1:RLRQ0TKLX7DlBRXAJHvbmXL17Q3KNnTBtZ9B6Qo+/Y0= -github.com/etcd-io/bbolt v1.3.2/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsouza/go-dockerclient v1.3.0 h1:tOXkq/5++XihrAvH5YNwCTdPeQg3XVcC6WI2FVy4ZS0= -github.com/fsouza/go-dockerclient v1.3.0/go.mod h1:IN9UPc4/w7cXiARH2Yg99XxUHbAM+6rAi9hzBVbkWRU= +github.com/fsouza/go-dockerclient v1.4.4 h1:Sd5nD4wdAgiPxvrbYUzT2ZZNmPk3z+GGnZ+frvw8z04= +github.com/fsouza/go-dockerclient v1.4.4/go.mod h1:PrwszSL5fbmsESocROrOGq/NULMXRw+bajY0ltzD6MA= +github.com/fsouza/go-dockerclient v1.5.0 h1:7OtayOe5HnoG+KWMHgyyPymwaodnB2IDYuVfseKyxbA= +github.com/fsouza/go-dockerclient v1.5.0/go.mod h1:AqZZK/zFO3phxYxlTsAaeAMSdQ9mgHuhy+bjN034Qds= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v0.0.0-20161207003320-04f313413ffd/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v0.0.0-20170815085658-fcdc5011193f/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v0.0.0-20170217192616-94e7d24fd285/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd h1:anPrsicrIi2ColgWTVPk+TrN42hJIWlfPHSBP9S0ZkM= +github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd/go.mod h1:3LVOLeyx9XVvwPgrt2be44XgSqndprz1G18rSk8KD84= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= @@ -131,59 +151,80 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111 h1:NAAiV9ass6VReWFjuxqrMIq12WKlSULI6Gs3PxQghLA= github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.4.1 h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E= -github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.7.2 h1:liMOoeIvFpr9kEvalrZ7VVBA4wGf7zfOgwBjzz/5g2Y= github.com/klauspost/compress v1.7.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/compress v1.8.1 h1:oygt2ychZFHOB6M9gUgajzgKrwRgHbGC77NwA4COVgI= +github.com/klauspost/compress v1.8.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-shellwords v1.0.3 h1:K/VxK7SZ+cvuPgFSLKi5QPI9Vr/ipOf4C1gN+ntueUk= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.5 h1:JhhFTIOslh5ZsPrpa3Wdg8bF0WI3b44EMblmU9wIsXc= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6 h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3ZkeUUI= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/moby/moby v0.0.0-20171005181806-f8806b18b4b9/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c h1:xa+eQWKuJ9MbB9FBL/eoNvDFvveAkz2LQoz8PzX7Q/4= github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c/go.mod h1:GhAqVMEWnTcW2dxoD/SO3n2enrgWl3y6Dnx4m59GvcA= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.2 h1:uqH7bpe+ERSiDa34FDOF7RikN6RzXgduUF8yarlZp94= +github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU= +github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8 h1:dDCFes8Hj1r/i5qnypONo5jdOme/8HWZC/aNDyhECt0= github.com/opencontainers/runc v1.0.0-rc8/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -195,110 +236,170 @@ github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4FtGEe8bFg= github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= -github.com/openshift/api v3.9.0+incompatible h1:fJ/KsefYuZAjmrr3+5U9yZIZbTOpVkDDLDLFresAeYs= +github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqGe5TgR0g= +github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible h1:s55wx8JIG/CKnewev892HifTBrtKzMdvgB3rm4rxC2s= github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= github.com/openshift/imagebuilder v1.1.0 h1:oT704SkwMEzmIMU/+Uv1Wmvt+p10q3v2WuYMeFI18c4= github.com/openshift/imagebuilder v1.1.0/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= -github.com/ostreedev/ostree-go v0.0.0-20181112201119-9ab99253d365 h1:5DKEDlc/DLftia3h4tk5K0KBiqBXogCc6EarWTlD3fM= -github.com/ostreedev/ostree-go v0.0.0-20181112201119-9ab99253d365/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= +github.com/openshift/imagebuilder v1.1.1 h1:KAUR31p8UBJdfVO42azWgb+LeMAed2zaKQ19e0C0X2I= +github.com/openshift/imagebuilder v1.1.1/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13 h1:AUK/hm/tPsiNNASdb3J8fySVRZoI7fnK5mlOvdFD43o= -github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 h1:gGBSHPOU7g8YjTbhwn+lvFm2VDEhhA+PwDIlstkgSxE= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9 h1:kyf9snWXHvQc+yxE9imhdI8YAm4oKeZISlaAR+x73zs= +github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4 h1:rOG9oHVIndNR14f3HRyBy9UPQYmIPniWqTU1TDdHhq4= github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4/go.mod h1:f/98/SnvAzhAEFQJ3u836FePXvcbE8BS0YGMQNn4mhA= -github.com/seccomp/libseccomp-golang v0.9.0 h1:S1pmhdFh5spQtVojA+4GUdWBqvI8ydYHxrx8iR6xN8o= -github.com/seccomp/libseccomp-golang v0.9.0/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tchap/go-patricia v2.2.6+incompatible h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck= -github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= -github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/urfave/cli v1.21.0/go.mod h1:lxDj6qX9Q6lWQxIrbrT0nwecwUtRnhVZAJjJZrVUZZQ= -github.com/vbatts/tar-split v0.10.2 h1:CXd7HEKGkTLjBMinpObcJZU5Hm8EKlor2a1JtX6msXQ= -github.com/vbatts/tar-split v0.10.2/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8= +github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE= github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= -github.com/vbauerster/mpb v3.3.4+incompatible h1:DDIhnwmgTQIDZo+SWlEr5d6mJBxkOLBwCXPzunhEfJ4= -github.com/vbauerster/mpb v3.3.4+incompatible/go.mod h1:zAHG26FUhVKETRu+MWqYXcI70POlC6N8up9p1dID7SU= github.com/vbauerster/mpb v3.4.0+incompatible h1:mfiiYw87ARaeRW6x5gWwYRUawxaW1tLAD8IceomUCNw= github.com/vbauerster/mpb v3.4.0+incompatible/go.mod h1:zAHG26FUhVKETRu+MWqYXcI70POlC6N8up9p1dID7SU= +github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM= github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vrothberg/storage v0.0.0-20190724065215-a1e42fd78930 h1:/LeIxi2kj5UYTJR9W35t5Pq2gqz03ZNoTURchTH3vc0= -github.com/vrothberg/storage v0.0.0-20190724065215-a1e42fd78930/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c= +github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f h1:nBX3nTcmxEtHSERBJaIo1Qa26VwRaopnZmfDQUXsF4I= +github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA= +github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20190816131739-be0936907f66/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xeipuuv/gojsonschema v1.1.0 h1:ngVtJC9TY/lg0AA/1k48FYhBrhRoFlEmWzsehpNAaZg= github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc h1:F5tKCVGp+MUAHhKp5MZtGqAlGX3+oCsiL1Q629FL90M= -golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190107210223-45ffb0cd1ba0 h1:1DW40AJQ7AP4nY6ORUGUdkpXyEC9W2GAXcOPaMZK0K8= -golang.org/x/net v0.0.0-20190107210223-45ffb0cd1ba0/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180824143301-4910a1d54f87/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542 h1:6ZQFf1D2YYDDI7eSwW8adlkkavTB9sw5I24FVtEvNUQ= +golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 h1:tdsQdquKbTNMsSZLqnLELJGzCANp9oXhu6zFBW6ODx4= +golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 h1:xQwXv67TxFo9nC1GJFyab5eq/5B590r6RlnL/G8Sz7w= +golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -307,11 +408,14 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v0.0.0-20190624233834-05ebafbffc79/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= -gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.0.0-20190813020757-36bff7324fb7 h1:4uJOjRn9kWq4AqJRE8+qzmAy+lJd9rh8TY455dNef4U= k8s.io/api v0.0.0-20190813020757-36bff7324fb7/go.mod h1:3Iy+myeAORNCLgjd/Xu9ebwN7Vh59Bw0vh9jhoX+V58= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010 h1:pyoq062NftC1y/OcnbSvgolyZDJ8y4fmUPWMkdA6gfU= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= +k8s.io/client-go v0.0.0-20170217214107-bcde30fb7eae/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083 h1:+Qf/nITucAbm09aIdxvoA+7X0BwaXmQGVoR8k7Ynk9o= k8s.io/client-go v0.0.0-20181219152756-3dd551c0f083/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -320,4 +424,5 @@ k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index cca7dd836b..79c75ce0bd 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -13,11 +13,11 @@ import ( "time" "github.com/containers/buildah/docker" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/manifest" - is "github.com/containers/image/storage" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" @@ -596,7 +596,7 @@ func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest * return i.manifest, i.manifestType, nil } -func (i *containerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { +func (i *containerImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { return nil, nil } diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index f53018cd4d..6b2c9c84c2 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -13,12 +13,12 @@ import ( "strings" "github.com/containers/buildah" - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runtime-spec/specs-go" + specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/openshift/imagebuilder" "github.com/pkg/errors" "github.com/sirupsen/logrus" diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index c65c3bab40..27ec1bb23b 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -12,11 +12,11 @@ import ( "github.com/containers/buildah" "github.com/containers/buildah/util" - "github.com/containers/image/docker/reference" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" v1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -164,7 +164,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod stepCounter++ prefix := fmt.Sprintf("STEP %d: ", stepCounter) suffix := "\n" - fmt.Fprintf(exec.err, prefix+format+suffix, args...) + fmt.Fprintf(exec.out, prefix+format+suffix, args...) } } for arg := range options.Args { diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index e942b3b340..fad2bfe955 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -13,12 +13,12 @@ import ( "github.com/containers/buildah" buildahdocker "github.com/containers/buildah/docker" "github.com/containers/buildah/util" - cp "github.com/containers/image/copy" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/types" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" securejoin "github.com/cyphar/filepath-securejoin" @@ -253,7 +253,7 @@ func (s *StageExecutor) volumeCacheRestore() error { // don't care about the details of where in the filesystem the content actually // goes, because we're not actually going to add it here, so this is less // involved than Copy(). -func (s *StageExecutor) digestSpecifiedContent(node *parser.Node) (string, error) { +func (s *StageExecutor) digestSpecifiedContent(node *parser.Node, argValues []string) (string, error) { // No instruction: done. if node == nil { return "", nil @@ -297,7 +297,15 @@ func (s *StageExecutor) digestSpecifiedContent(node *parser.Node) (string, error } } } + for _, src := range srcs { + // If src has an argument within it, resolve it to its + // value. Otherwise just return the value found. + name, err := imagebuilder.ProcessWord(src, argValues) + if err != nil { + return "", errors.Wrapf(err, "unable to resolve source %q", src) + } + src = name if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { // Source is a URL. TODO: cache this content // somewhere, so that we can avoid pulling it down @@ -334,7 +342,14 @@ func (s *StageExecutor) digestSpecifiedContent(node *parser.Node) (string, error } s.builder.ContentDigester.Restart() download := strings.ToUpper(node.Value) == "ADD" - err := s.builder.Add(destination.Value, download, options, sources...) + + // If destination.Value has an argument within it, resolve it to its + // value. Otherwise just return the value found. + destValue, destErr := imagebuilder.ProcessWord(destination.Value, argValues) + if destErr != nil { + return "", errors.Wrapf(destErr, "unable to resolve destination %q", destination.Value) + } + err := s.builder.Add(destValue, download, options, sources...) if err != nil { return "", errors.Wrapf(err, "error dry-running %q", node.Original) } @@ -744,6 +759,12 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b s.executor.log(commitMessage) } } + logCacheHit := func(cacheID string) { + if !s.executor.quiet { + cacheHitMessage := "--> Using cache" + fmt.Fprintf(s.executor.out, "%s %s\n", cacheHitMessage, cacheID) + } + } logImageID := func(imgID string) { if s.executor.iidfile == "" { fmt.Fprintf(s.executor.out, "%s\n", imgID) @@ -801,6 +822,9 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b if strings.Contains(n, "--from") && (command == "COPY" || command == "ADD") { var mountPoint string arr := strings.Split(n, "=") + if len(arr) != 2 { + return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=", command) + } otherStage, ok := s.executor.stages[arr[1]] if !ok { if mountPoint, err = s.getImageRootfs(ctx, stage, arr[1]); err != nil { @@ -832,7 +856,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) } // In case we added content, retrieve its digest. - addedContentDigest, err := s.digestSpecifiedContent(node) + addedContentDigest, err := s.digestSpecifiedContent(node, ib.Arguments()) if err != nil { return "", nil, err } @@ -881,7 +905,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b // cached images so far, look for one that matches what we // expect to produce for this instruction. if checkForLayers && !(s.executor.squash && lastInstruction && lastStage) { - addedContentDigest, err := s.digestSpecifiedContent(node) + addedContentDigest, err := s.digestSpecifiedContent(node, ib.Arguments()) if err != nil { return "", nil, err } @@ -891,7 +915,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b } if cacheID != "" { // Note the cache hit. - fmt.Fprintf(s.executor.out, "--> Using cache %s\n", cacheID) + logCacheHit(cacheID) } else { // We're not going to find any more cache hits. checkForLayers = false @@ -939,7 +963,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) } // In case we added content, retrieve its digest. - addedContentDigest, err := s.digestSpecifiedContent(node) + addedContentDigest, err := s.digestSpecifiedContent(node, ib.Arguments()) if err != nil { return "", nil, err } diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go index 4b020bf419..520b92e3f1 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/util.go +++ b/vendor/github.com/containers/buildah/imagebuildah/util.go @@ -1,6 +1,8 @@ package imagebuildah import ( + "bufio" + "bytes" "fmt" "io/ioutil" "net/http" @@ -21,8 +23,15 @@ func cloneToDirectory(url, dir string) error { if !strings.HasPrefix(url, "git://") && !strings.HasSuffix(url, ".git") { url = "git://" + url } - logrus.Debugf("cloning %q to %q", url, dir) - cmd := exec.Command("git", "clone", url, dir) + gitBranch := strings.Split(url, "#") + var cmd *exec.Cmd + if len(gitBranch) < 2 { + logrus.Debugf("cloning %q to %q", url, dir) + cmd = exec.Command("git", "clone", url, dir) + } else { + logrus.Debugf("cloning repo %q and branch %q to %q", gitBranch[0], gitBranch[1], dir) + cmd = exec.Command("git", "clone", "-b", gitBranch[1], gitBranch[0], dir) + } return cmd.Run() } @@ -55,7 +64,25 @@ func downloadToDirectory(url, dir string) error { return nil } -// TempDirForURL checks if the passed-in string looks like a URL. If it is, +func stdinToDirectory(dir string) error { + logrus.Debugf("extracting stdin to %q", dir) + r := bufio.NewReader(os.Stdin) + b, err := ioutil.ReadAll(r) + if err != nil { + return errors.Wrapf(err, "Failed to read from stdin") + } + reader := bytes.NewReader(b) + if err := chrootarchive.Untar(reader, dir, nil); err != nil { + dockerfile := filepath.Join(dir, "Dockerfile") + // Assume this is a Dockerfile + if err := ioutil.WriteFile(dockerfile, b, 0600); err != nil { + return errors.Wrapf(err, "Failed to write bytes to %q", dockerfile) + } + } + return nil +} + +// TempDirForURL checks if the passed-in string looks like a URL or -. If it is, // TempDirForURL creates a temporary directory, arranges for its contents to be // the contents of that URL, and returns the temporary directory's path, along // with the name of a subdirectory which should be used as the build context @@ -66,7 +93,8 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err if !strings.HasPrefix(url, "http://") && !strings.HasPrefix(url, "https://") && !strings.HasPrefix(url, "git://") && - !strings.HasPrefix(url, "github.com/") { + !strings.HasPrefix(url, "github.com/") && + url != "-" { return "", "", nil } name, err = ioutil.TempDir(dir, prefix) @@ -76,7 +104,7 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err if strings.HasPrefix(url, "git://") || strings.HasSuffix(url, ".git") { err = cloneToDirectory(url, name) if err != nil { - if err2 := os.Remove(name); err2 != nil { + if err2 := os.RemoveAll(name); err2 != nil { logrus.Debugf("error removing temporary directory %q: %v", name, err2) } return "", "", err @@ -92,11 +120,22 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") { err = downloadToDirectory(url, name) if err != nil { - if err2 := os.Remove(name); err2 != nil { + if err2 := os.RemoveAll(name); err2 != nil { + logrus.Debugf("error removing temporary directory %q: %v", name, err2) + } + return "", subdir, err + } + return name, subdir, nil + } + if url == "-" { + err = stdinToDirectory(name) + if err != nil { + if err2 := os.RemoveAll(name); err2 != nil { logrus.Debugf("error removing temporary directory %q: %v", name, err2) } return "", subdir, err } + logrus.Debugf("Build context is at %q", name) return name, subdir, nil } logrus.Debugf("don't know how to retrieve %q", url) diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go index b01d4d07b3..751ce6ae1c 100644 --- a/vendor/github.com/containers/buildah/import.go +++ b/vendor/github.com/containers/buildah/import.go @@ -5,11 +5,11 @@ import ( "github.com/containers/buildah/docker" "github.com/containers/buildah/util" - "github.com/containers/image/manifest" - is "github.com/containers/image/storage" - "github.com/containers/image/types" + "github.com/containers/image/v5/manifest" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/types" "github.com/containers/storage" - "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go index ed21ac1e99..c741bb449e 100644 --- a/vendor/github.com/containers/buildah/info.go +++ b/vendor/github.com/containers/buildah/info.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "github.com/containers/buildah/pkg/cgroups" "github.com/containers/buildah/pkg/unshare" "github.com/containers/storage" "github.com/containers/storage/pkg/system" @@ -45,6 +46,17 @@ func hostInfo() map[string]interface{} { info["arch"] = runtime.GOARCH info["cpus"] = runtime.NumCPU() info["rootless"] = unshare.IsRootless() + + unified, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + logrus.Error(err, "err reading cgroups mode") + } + cgroupVersion := "v1" + if unified { + cgroupVersion = "v2" + } + info["CgroupVersion"] = cgroupVersion + mi, err := system.ReadMemInfo() if err != nil { logrus.Error(err, "err reading memory info") diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md index 858364b452..f533b0fb2b 100644 --- a/vendor/github.com/containers/buildah/install.md +++ b/vendor/github.com/containers/buildah/install.md @@ -139,7 +139,6 @@ Prior to installing Buildah, install the following packages on your Linux distro * glib2-devel * libassuan-devel * libseccomp-devel -* ostree-devel * runc (Requires version 1.0 RC4 or higher.) * containers-common @@ -158,7 +157,6 @@ In Fedora, you can use this command: gpgme-devel \ libassuan-devel \ libseccomp-devel \ - ostree-devel \ git \ bzip2 \ go-md2man \ @@ -196,7 +194,6 @@ run this command: gpgme-devel \ libassuan-devel \ libseccomp-devel \ - ostree-devel \ git \ bzip2 \ go-md2man \ @@ -241,7 +238,7 @@ In Ubuntu zesty and xenial, you can use these commands: add-apt-repository -y ppa:gophers/archive apt-add-repository -y ppa:projectatomic/ppa apt-get -y -qq update - apt-get -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libostree-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man + apt-get -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man apt-get -y install golang-1.10 ``` Then to install Buildah on Ubuntu follow the steps in this example: @@ -266,7 +263,7 @@ gpg --recv-keys 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D gpg --export 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D >> /usr/share/keyrings/projectatomic-ppa.gpg echo 'deb [signed-by=/usr/share/keyrings/projectatomic-ppa.gpg] http://ppa.launchpad.net/projectatomic/ppa/ubuntu zesty main' > /etc/apt/sources.list.d/projectatomic-ppa.list apt update -apt -y install -t stretch-backports libostree-dev golang +apt -y install -t stretch-backports golang apt -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man ``` @@ -274,7 +271,7 @@ The build steps on Debian are otherwise the same as Ubuntu, above. ## Vendoring - Dependency Management -This project is using [go modules](https://github.com/golang/go/wiki/Modules) for dependency management. If the CI is complaining about a pull request leaving behind an unclean state, it is very likely right about it. After changing dependencies, make sure to run `make vendor` to synchronize the code with the go module and repopulate the `./vendor` directory. +This project is using [go modules](https://github.com/golang/go/wiki/Modules) for dependency management. If the CI is complaining about a pull request leaving behind an unclean state, it is very likely right about it. After changing dependencies, make sure to run `make vendor-in-container` to synchronize the code with the go module and repopulate the `./vendor` directory. ## Configuration files @@ -381,7 +378,7 @@ Buildah uses Go Modules for vendoring purposes. If you need to update or add a * Enter into your sandbox `src/github.com/containers/buildah` and ensure that the GOPATH variable is set to the directory prior as noted above. * `export GO111MODULE=on` * Assuming you want to 'bump' the `github.com/containers/storage` package to version 1.12.13, use this command: `go get github.com/containers/storage@v1.12.13` - * `make vendor` + * `make vendor-in-container` * `make` * `make install` * Then add any updated or added files with `git add` then do a `git commit` and create a PR. @@ -391,10 +388,10 @@ Buildah uses Go Modules for vendoring purposes. If you need to update or add a If you wish to vendor in your personal fork to try changes out (assuming containers/storage in the below example): * `go mod edit -replace github.com/containers/storage=github.com/{mygithub_username}/storage@YOUR_BRANCH` - * `make vendor` + * `make vendor-in-container` To revert * `go mod edit -dropreplace github.com/containers/storage` - * `make vendor` + * `make vendor-in-container` To speed up fetching dependencies, you can use a [Go Module Proxy](https://proxy.golang.org) by setting `GOPROXY=https://proxy.golang.org`. diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go index 31ae01be7d..87cfd5d0da 100644 --- a/vendor/github.com/containers/buildah/new.go +++ b/vendor/github.com/containers/buildah/new.go @@ -7,12 +7,12 @@ import ( "strings" "github.com/containers/buildah/util" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/sysregistriesv2" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/sysregistriesv2" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/openshift/imagebuilder" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/buildah/ostree_tag.sh b/vendor/github.com/containers/buildah/ostree_tag.sh deleted file mode 100644 index 537c17ff4f..0000000000 --- a/vendor/github.com/containers/buildah/ostree_tag.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash -if pkg-config ostree-1 2> /dev/null ; then - echo containers_image_ostree -else - echo containers_image_ostree_stub -fi diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go index 53e6ec44b7..b7f7046153 100644 --- a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go +++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go @@ -10,11 +10,11 @@ import ( "sync" "github.com/containers/buildah/docker" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/manifest" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" digest "github.com/opencontainers/go-digest" @@ -263,14 +263,14 @@ func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *dig return s.source.GetSignatures(ctx, instanceDigest) } -func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - signatures, err := s.source.GetSignatures(ctx, nil) +func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + signatures, err := s.source.GetSignatures(ctx, instanceDigest) if err != nil { return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference)) } canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0) - infos, err := s.source.LayerInfosForCopy(ctx) + infos, err := s.source.LayerInfosForCopy(ctx, instanceDigest) if err != nil { return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference)) } @@ -515,7 +515,7 @@ func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.Bl return false, types.BlobInfo{}, nil } -func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte) error { +func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error { manifestDigest, err := manifest.Digest(manifestBytes) if err != nil { logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err) @@ -525,13 +525,13 @@ func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes [] logrus.Warnf("error saving manifest as %q: %v", filename, err) } } - return d.destination.PutManifest(ctx, manifestBytes) + return d.destination.PutManifest(ctx, manifestBytes, instanceDigest) } -func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - return d.destination.PutSignatures(ctx, signatures) +func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + return d.destination.PutSignatures(ctx, signatures, instanceDigest) } -func (d *blobCacheDestination) Commit(ctx context.Context) error { - return d.destination.Commit(ctx) +func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return d.destination.Commit(ctx, unparsedToplevel) } diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go index 06aec96eaa..e8979cd7f8 100644 --- a/vendor/github.com/containers/buildah/pkg/cli/common.go +++ b/vendor/github.com/containers/buildah/pkg/cli/common.go @@ -7,7 +7,7 @@ package cli import ( "fmt" "os" - "path/filepath" + "runtime" "strings" "github.com/containers/buildah" @@ -95,6 +95,8 @@ type FromAndBudResults struct { Isolation string Memory string MemorySwap string + OverrideArch string + OverrideOS string SecurityOpt []string ShmSize string Ulimit []string @@ -161,7 +163,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet { fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image, even if a version is present") fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress") fs.BoolVar(&flags.Rm, "rm", true, "Remove intermediate containers after a successful build") - fs.StringVar(&flags.Runtime, "runtime", util.Runtime(), "`path` to an alternate runtime. Use BUILDAH_RUNTIME environment variable to override.") + // "runtime" definition moved to avoid name collision in podman build. Defined in cmd/buildah/bud.go. fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime") fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)") fs.BoolVar(&flags.Squash, "squash", false, "Squash newly built layers into a single new layer.") @@ -194,6 +196,14 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.") fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: [], where unit = b, k, m or g)") fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap") + fs.StringVar(&flags.OverrideOS, "override-os", runtime.GOOS, "prefer `OS` instead of the running OS when pulling images") + if err := fs.MarkHidden("override-os"); err != nil { + panic(fmt.Sprintf("error marking override-os as hidden: %v", err)) + } + fs.StringVar(&flags.OverrideArch, "override-arch", runtime.GOARCH, "prefer `ARCH` instead of the architecture of the machine when pulling images") + if err := fs.MarkHidden("override-arch"); err != nil { + panic(fmt.Sprintf("error marking override-arch as hidden: %v", err)) + } fs.StringArrayVar(&flags.SecurityOpt, "security-opt", []string{}, "security options (default [])") fs.StringVar(&flags.ShmSize, "shm-size", "65536k", "size of '/dev/shm'. The format is ``.") fs.StringSliceVar(&flags.Ulimit, "ulimit", []string{}, "ulimit options (default [])") @@ -259,9 +269,5 @@ func GetDefaultAuthFile() string { if authfile != "" { return authfile } - runtimeDir := os.Getenv("XDG_RUNTIME_DIR") - if runtimeDir != "" { - return filepath.Join(runtimeDir, "containers/auth.json") - } return "" } diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index 36ae071906..9194ddf58e 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -14,10 +14,10 @@ import ( "unicode" "github.com/containers/buildah" - "github.com/containers/image/types" + "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/idtools" - "github.com/docker/go-units" - "github.com/opencontainers/runtime-spec/specs-go" + units "github.com/docker/go-units" + specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -583,6 +583,12 @@ func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) { ctx.RegistriesDirPath = regConfDir } ctx.DockerRegistryUserAgent = fmt.Sprintf("Buildah/%s", buildah.Version) + if os, err := c.Flags().GetString("override-os"); err == nil { + ctx.OSChoice = os + } + if arch, err := c.Flags().GetString("override-arch"); err == nil { + ctx.ArchitectureChoice = arch + } return ctx, nil } diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go index 238293894f..1aaeca2786 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go @@ -4,6 +4,8 @@ package parse import ( "fmt" + "os" + "path/filepath" "github.com/containers/buildah/pkg/unshare" "github.com/opencontainers/runc/libcontainer/configs" @@ -24,18 +26,40 @@ func getDefaultProcessLimits() []string { return defaultLimits } -func DeviceFromPath(device string) (configs.Device, error) { +func DeviceFromPath(device string) ([]configs.Device, error) { + var devs []configs.Device src, dst, permissions, err := Device(device) if err != nil { - return configs.Device{}, err + return nil, err } if unshare.IsRootless() { - return configs.Device{}, errors.Errorf("Renaming device %s to %s is not a supported in rootless containers", src, dst) + return nil, errors.Errorf("Renaming device %s to %s is not a supported in rootless containers", src, dst) } - dev, err := devices.DeviceFromPath(src, permissions) + srcInfo, err := os.Stat(src) if err != nil { - return configs.Device{}, errors.Wrapf(err, "%s is not a valid device", src) + return nil, errors.Wrapf(err, "error getting info of source device %s", src) } - dev.Path = dst - return *dev, nil + + if !srcInfo.IsDir() { + + dev, err := devices.DeviceFromPath(src, permissions) + if err != nil { + return nil, errors.Wrapf(err, "%s is not a valid device", src) + } + dev.Path = dst + devs = append(devs, *dev) + return devs, nil + } + + // If source device is a directory + srcDevices, err := devices.GetDevices(src) + if err != nil { + return nil, errors.Wrapf(err, "error getting source devices from directory %s", src) + } + for _, d := range srcDevices { + d.Path = filepath.Join(dst, filepath.Base(d.Path)) + d.Permissions = permissions + devs = append(devs, *d) + } + return devs, nil } diff --git a/vendor/github.com/containers/buildah/pkg/secrets/secrets.go b/vendor/github.com/containers/buildah/pkg/secrets/secrets.go index 9be9bb3b6a..80ca050165 100644 --- a/vendor/github.com/containers/buildah/pkg/secrets/secrets.go +++ b/vendor/github.com/containers/buildah/pkg/secrets/secrets.go @@ -148,12 +148,12 @@ func getMountsMap(path string) (string, string, error) { } // SecretMounts copies, adds, and mounts the secrets to the container root filesystem -func SecretMounts(mountLabel, containerWorkingDir, mountFile string, rootless bool) []rspec.Mount { - return SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, containerWorkingDir, 0, 0, rootless) +func SecretMounts(mountLabel, containerWorkingDir, mountFile string, rootless, disableFips bool) []rspec.Mount { + return SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, containerWorkingDir, 0, 0, rootless, disableFips) } // SecretMountsWithUIDGID specifies the uid/gid of the owner -func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPrefix string, uid, gid int, rootless bool) []rspec.Mount { +func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPrefix string, uid, gid int, rootless, disableFips bool) []rspec.Mount { var ( secretMounts []rspec.Mount mountFiles []string @@ -180,6 +180,10 @@ func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPre } } + // Only add FIPS secret mount if disableFips=false + if disableFips { + return secretMounts + } // Add FIPS mode secret if /etc/system-fips exists on the host _, err := os.Stat("/etc/system-fips") if err == nil { diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go index f05d2bf508..300f3b3969 100644 --- a/vendor/github.com/containers/buildah/pull.go +++ b/vendor/github.com/containers/buildah/pull.go @@ -8,18 +8,18 @@ import ( "github.com/containers/buildah/pkg/blobcache" "github.com/containers/buildah/util" - cp "github.com/containers/image/copy" - "github.com/containers/image/directory" - "github.com/containers/image/docker" - dockerarchive "github.com/containers/image/docker/archive" - "github.com/containers/image/docker/reference" - tarfile "github.com/containers/image/docker/tarfile" - ociarchive "github.com/containers/image/oci/archive" - oci "github.com/containers/image/oci/layout" - "github.com/containers/image/signature" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/types" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/directory" + "github.com/containers/image/v5/docker" + dockerarchive "github.com/containers/image/v5/docker/archive" + "github.com/containers/image/v5/docker/reference" + tarfile "github.com/containers/image/v5/docker/tarfile" + ociarchive "github.com/containers/image/v5/oci/archive" + oci "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/signature" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/containers/storage" multierror "github.com/hashicorp/go-multierror" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index 624da9dae1..4f507d1bc4 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -460,7 +460,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st } // Get the list of secrets mounts. - secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID), unshare.IsRootless()) + secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID), unshare.IsRootless(), false) // Add temporary copies of the contents of volume locations at the // volume locations, unless we already have something there. diff --git a/vendor/github.com/containers/buildah/troubleshooting.md b/vendor/github.com/containers/buildah/troubleshooting.md index 96fa403f0e..4ff2f06c45 100644 --- a/vendor/github.com/containers/buildah/troubleshooting.md +++ b/vendor/github.com/containers/buildah/troubleshooting.md @@ -82,9 +82,9 @@ the command with single quotes and use `bash -c`. The previous examples would b changed to: ```console -# buildah run bash -c '$whalecontainer /usr/games/fortune -a | cowsay' -# buildah run bash -c '$newcontainer echo "daemon off;" >> /etc/nginx/nginx.conf' -# buildah run bash -c '$newcontainer echo "nginx on Fedora" > /usr/share/nginx/html/index.html' +# buildah run $whalecontainer bash -c '/usr/games/fortune -a | cowsay' +# buildah run $newcontainer bash -c 'echo "daemon off;" >> /etc/nginx/nginx.conf' +# buildah run $newcontainer bash -c 'echo "nginx on Fedora" > /usr/share/nginx/html/index.html' ``` --- diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go index 9fbeb14d47..b4670e41ce 100644 --- a/vendor/github.com/containers/buildah/util.go +++ b/vendor/github.com/containers/buildah/util.go @@ -8,9 +8,9 @@ import ( "path/filepath" "github.com/containers/buildah/util" - "github.com/containers/image/docker/reference" - "github.com/containers/image/pkg/sysregistriesv2" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" @@ -20,7 +20,7 @@ import ( "github.com/containers/storage/pkg/system" v1 "github.com/opencontainers/image-spec/specs-go/v1" rspec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/opencontainers/selinux/go-selinux" + selinux "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -248,28 +248,36 @@ func (b *Builder) copyWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts * // location into our working container, mapping permissions using the // container's ID maps, possibly overridden using the passed-in chownOpts func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error { - if hasher != nil && b.ContentDigester.Hash() != nil { - hasher = io.MultiWriter(hasher, b.ContentDigester.Hash()) - } - if hasher == nil { - hasher = b.ContentDigester.Hash() - } convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) if dryRun { return func(src, dest string) error { - if hasher == nil { - hasher = ioutil.Discard + thisHasher := hasher + if thisHasher != nil && b.ContentDigester.Hash() != nil { + thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash()) + } + if thisHasher == nil { + thisHasher = b.ContentDigester.Hash() } f, err := os.Open(src) if err != nil { return errors.Wrapf(err, "error opening %q", src) } defer f.Close() - _, err = io.Copy(hasher, f) + _, err = io.Copy(thisHasher, f) return err } } - return chrootarchive.UntarPathAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap) + return func(src, dest string) error { + thisHasher := hasher + if thisHasher != nil && b.ContentDigester.Hash() != nil { + thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash()) + } + if thisHasher == nil { + thisHasher = b.ContentDigester.Hash() + } + untarPathAndChown := chrootarchive.UntarPathAndChown(chownOpts, thisHasher, convertedUIDMap, convertedGIDMap) + return untarPathAndChown(src, dest) + } } // tarPath returns a function which creates an archive of a specified location, diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index 8ec7676019..d5e842315f 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -10,12 +10,12 @@ import ( "syscall" "github.com/containers/buildah/pkg/cgroups" - "github.com/containers/image/docker/reference" - "github.com/containers/image/pkg/sysregistriesv2" - "github.com/containers/image/signature" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/signature" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/docker/distribution/registry/api/errcode" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -115,7 +115,7 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto for _, registry := range searchRegistries { reg, err := sysregistriesv2.FindRegistry(sc, registry) if err != nil { - logrus.Debugf("unable to read registry configuraitno for %#v: %v", registry, err) + logrus.Debugf("unable to read registry configuration for %#v: %v", registry, err) continue } if reg == nil || !reg.Blocked { diff --git a/vendor/github.com/containers/conmon/runner/config/config.go b/vendor/github.com/containers/conmon/runner/config/config.go index 867ecd9bcf..0dd324c285 100644 --- a/vendor/github.com/containers/conmon/runner/config/config.go +++ b/vendor/github.com/containers/conmon/runner/config/config.go @@ -3,4 +3,14 @@ package config const ( // BufSize is the size of buffers passed in to sockets BufSize = 8192 + // ConnSockBufSize is the size of the socket used for + // to attach to the container + ConnSockBufSize = 32768 + // WinResizeEvent is the event code the caller program will + // send along the ctrl fd to signal conmon to resize + // the pty window + WinResizeEvent = 1 + // ReopenLogsEvent is the event code the caller program will + // send along the ctrl fd to signal conmon to reopen the log files + ReopenLogsEvent = 2 ) diff --git a/vendor/github.com/containers/image/image/docker_list.go b/vendor/github.com/containers/image/image/docker_list.go deleted file mode 100644 index 1f0faa1ad8..0000000000 --- a/vendor/github.com/containers/image/image/docker_list.go +++ /dev/null @@ -1,94 +0,0 @@ -package image - -import ( - "context" - "encoding/json" - "fmt" - "runtime" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type platformSpec struct { - Architecture string `json:"architecture"` - OS string `json:"os"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - Variant string `json:"variant,omitempty"` - Features []string `json:"features,omitempty"` // removed in OCI -} - -// A manifestDescriptor references a platform-specific manifest. -type manifestDescriptor struct { - manifest.Schema2Descriptor - Platform platformSpec `json:"platform"` -} - -type manifestList struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - Manifests []manifestDescriptor `json:"manifests"` -} - -// chooseDigestFromManifestList parses blob as a schema2 manifest list, -// and returns the digest of the image appropriate for the current environment. -func chooseDigestFromManifestList(sys *types.SystemContext, blob []byte) (digest.Digest, error) { - wantedArch := runtime.GOARCH - if sys != nil && sys.ArchitectureChoice != "" { - wantedArch = sys.ArchitectureChoice - } - wantedOS := runtime.GOOS - if sys != nil && sys.OSChoice != "" { - wantedOS = sys.OSChoice - } - - list := manifestList{} - if err := json.Unmarshal(blob, &list); err != nil { - return "", err - } - for _, d := range list.Manifests { - if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { - return d.Digest, nil - } - } - return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS) -} - -func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { - targetManifestDigest, err := chooseDigestFromManifestList(sys, manblob) - if err != nil { - return nil, err - } - manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) - if err != nil { - return nil, err - } - - matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) - if err != nil { - return nil, errors.Wrap(err, "Error computing manifest digest") - } - if !matches { - return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest) - } - - return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) -} - -// ChooseManifestInstanceFromManifestList returns a digest of a manifest appropriate -// for the current system from the manifest available from src. -func ChooseManifestInstanceFromManifestList(ctx context.Context, sys *types.SystemContext, src types.UnparsedImage) (digest.Digest, error) { - // For now this only handles manifest.DockerV2ListMediaType; we can generalize it later, - // probably along with manifest list editing. - blob, mt, err := src.Manifest(ctx) - if err != nil { - return "", err - } - if mt != manifest.DockerV2ListMediaType { - return "", fmt.Errorf("Internal error: Trying to select an image from a non-manifest-list manifest type %s", mt) - } - return chooseDigestFromManifestList(sys, blob) -} diff --git a/vendor/github.com/containers/image/pkg/compression/compression.go b/vendor/github.com/containers/image/pkg/compression/compression.go deleted file mode 100644 index aad2bfcf26..0000000000 --- a/vendor/github.com/containers/image/pkg/compression/compression.go +++ /dev/null @@ -1,94 +0,0 @@ -package compression - -import ( - "bytes" - "compress/bzip2" - "io" - "io/ioutil" - - "github.com/klauspost/pgzip" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/ulikunitz/xz" -) - -// DecompressorFunc returns the decompressed stream, given a compressed stream. -// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). -type DecompressorFunc func(io.Reader) (io.ReadCloser, error) - -// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. -func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { - return pgzip.NewReader(r) -} - -// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. -func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { - return ioutil.NopCloser(bzip2.NewReader(r)), nil -} - -// XzDecompressor is a DecompressorFunc for the xz compression algorithm. -func XzDecompressor(r io.Reader) (io.ReadCloser, error) { - r, err := xz.NewReader(r) - if err != nil { - return nil, err - } - return ioutil.NopCloser(r), nil -} - -// compressionAlgos is an internal implementation detail of DetectCompression -var compressionAlgos = map[string]struct { - prefix []byte - decompressor DecompressorFunc -}{ - "gzip": {[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor}, // gzip (RFC 1952) - "bzip2": {[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor}, // bzip2 (decompress.c:BZ2_decompress) - "xz": {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt) -} - -// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. -// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. -func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { - buffer := [8]byte{} - - n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. - // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. - return nil, nil, err - } - - var decompressor DecompressorFunc - for name, algo := range compressionAlgos { - if bytes.HasPrefix(buffer[:n], algo.prefix) { - logrus.Debugf("Detected compression format %s", name) - decompressor = algo.decompressor - break - } - } - if decompressor == nil { - logrus.Debugf("No compression detected") - } - - return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil -} - -// AutoDecompress takes a stream and returns an uncompressed version of the -// same stream. -// The caller must call Close() on the returned stream (even if the input does not need, -// or does not even support, closing!). -func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { - decompressor, stream, err := DetectCompression(stream) - if err != nil { - return nil, false, errors.Wrapf(err, "Error detecting compression") - } - var res io.ReadCloser - if decompressor != nil { - res, err = decompressor(stream) - if err != nil { - return nil, false, errors.Wrapf(err, "Error initializing decompression") - } - } else { - res = ioutil.NopCloser(stream) - } - return res, decompressor != nil, nil -} diff --git a/vendor/github.com/containers/image/v5/LICENSE b/vendor/github.com/containers/image/v5/LICENSE new file mode 100644 index 0000000000..9535635306 --- /dev/null +++ b/vendor/github.com/containers/image/v5/LICENSE @@ -0,0 +1,189 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go similarity index 65% rename from vendor/github.com/containers/image/copy/copy.go rename to vendor/github.com/containers/image/v5/copy/copy.go index f1b029f974..090d862d51 100644 --- a/vendor/github.com/containers/image/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -13,16 +13,16 @@ import ( "sync" "time" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/blobinfocache" - "github.com/containers/image/pkg/compression" - "github.com/containers/image/signature" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/klauspost/pgzip" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbauerster/mpb" @@ -43,6 +43,9 @@ type digestingReader struct { // downloads. Let's follow Firefox by limiting it to 6. var maxParallelDownloads = 6 +// compressionBufferSize is the buffer size used to compress a blob +var compressionBufferSize = 1048576 + // newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error // or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. // (neither is set if EOF is never reached). @@ -86,14 +89,16 @@ func (d *digestingReader) Read(p []byte) (int, error) { // copier allows us to keep track of diffID values for blobs, and other // data shared across one or more images in a possible manifest list. type copier struct { - dest types.ImageDestination - rawSource types.ImageSource - reportWriter io.Writer - progressOutput io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties - blobInfoCache types.BlobInfoCache - copyInParallel bool + dest types.ImageDestination + rawSource types.ImageSource + reportWriter io.Writer + progressOutput io.Writer + progressInterval time.Duration + progress chan types.ProgressProperties + blobInfoCache types.BlobInfoCache + copyInParallel bool + compressionFormat compression.Algorithm + compressionLevel *int } // imageCopier tracks state specific to a single image (possibly an item of a manifest list) @@ -106,6 +111,37 @@ type imageCopier struct { canSubstituteBlobs bool } +const ( + // CopySystemImage is the default value which, when set in + // Options.ImageListSelection, indicates that the caller expects only one + // image to be copied, so if the source reference refers to a list of + // images, one that matches the current system will be selected. + CopySystemImage ImageListSelection = iota + // CopyAllImages is a value which, when set in Options.ImageListSelection, + // indicates that the caller expects to copy multiple images, and if + // the source reference refers to a list, that the list and every image + // to which it refers will be copied. If the source reference refers + // to a list, the target reference can not accept lists, an error + // should be returned. + CopyAllImages + // CopySpecificImages is a value which, when set in + // Options.ImageListSelection, indicates that the caller expects the + // source reference to be either a single image or a list of images, + // and if the source reference is a list, wants only specific instances + // from it copied (or none of them, if the list of instances to copy is + // empty), along with the list itself. If the target reference can + // only accept one image (i.e., it cannot accept lists), an error + // should be returned. + CopySpecificImages +) + +// ImageListSelection is one of CopySystemImage, CopyAllImages, or +// CopySpecificImages, to control whether, when the source reference is a list, +// copy.Image() copies only an image which matches the current runtime +// environment, or all images which match the supplied reference, or only +// specific images from the source reference. +type ImageListSelection int + // Options allows supplying non-default configuration modifying the behavior of CopyImage. type Options struct { RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. @@ -117,12 +153,24 @@ type Options struct { Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type ForceManifestMIMEType string + ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list + Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself +} + +// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value +func validateImageListSelection(selection ImageListSelection) error { + switch selection { + case CopySystemImage, CopyAllImages, CopySpecificImages: + return nil + default: + return errors.Errorf("Invalid value for options.ImageListSelection: %d", selection) + } } // Image copies image from srcRef to destRef, using policyContext to validate // source image admissibility. It returns the manifest which was written to // the new copy of the image. -func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (manifest []byte, retErr error) { +func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (copiedManifest []byte, retErr error) { // NOTE this function uses an output parameter for the error return value. // Setting this and returning is the ideal way to return an error. // @@ -132,6 +180,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, options = &Options{} } + if err := validateImageListSelection(options.ImageListSelection); err != nil { + return nil, err + } + reportWriter := ioutil.Discard if options.ReportWriter != nil { @@ -166,6 +218,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, progressOutput = ioutil.Discard } copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() + c := &copier{ dest: dest, rawSource: rawSource, @@ -179,6 +232,20 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, // we might want to add a separate CommonCtx — or would that be too confusing? blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx), } + // Default to using gzip compression unless specified otherwise. + if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil { + algo, err := compression.AlgorithmByName("gzip") + if err != nil { + return nil, err + } + c.compressionFormat = algo + } else { + c.compressionFormat = *options.DestinationCtx.CompressionFormat + } + if options.DestinationCtx != nil { + // Note that the compressionLevel can be nil. + c.compressionLevel = options.DestinationCtx.CompressionLevel + } unparsedToplevel := image.UnparsedInstance(rawSource, nil) multiImage, err := isMultiImage(ctx, unparsedToplevel) @@ -187,79 +254,278 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, } if !multiImage { - // The simple case: Just copy a single image. - if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel); err != nil { + // The simple case: just copy a single image. + if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedToplevel, nil); err != nil { return nil, err } - } else { - // This is a manifest list. Choose a single image and copy it. - // FIXME: Copy to destinations which support manifest lists, one image at a time. - instanceDigest, err := image.ChooseManifestInstanceFromManifestList(ctx, options.SourceCtx, unparsedToplevel) + } else if options.ImageListSelection == CopySystemImage { + // This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that + // matches the current system to copy, and copy it. + mfest, manifestType, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, errors.Wrapf(err, "Error reading manifest for %s", transports.ImageName(srcRef)) + } + manifestList, err := manifest.ListFromBlob(mfest, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "Error parsing primary manifest as list for %s", transports.ImageName(srcRef)) + } + instanceDigest, err := manifestList.ChooseInstance(options.SourceCtx) // try to pick one that matches options.SourceCtx if err != nil { return nil, errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef)) } - logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest) + logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest) unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) - if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedInstance); err != nil { + if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, nil); err != nil { + return nil, err + } + } else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */ + // If we were asked to copy multiple images and can't, that's an error. + if !supportsMultipleImages(c.dest) { + return nil, errors.Errorf("Error copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name()) + } + // Copy some or all of the images. + switch options.ImageListSelection { + case CopyAllImages: + logrus.Debugf("Source is a manifest list; copying all instances") + case CopySpecificImages: + logrus.Debugf("Source is a manifest list; copying some instances") + } + if copiedManifest, _, err = c.copyMultipleImages(ctx, policyContext, options, unparsedToplevel); err != nil { return nil, err } } - if err := c.dest.Commit(ctx); err != nil { + if err := c.dest.Commit(ctx, unparsedToplevel); err != nil { return nil, errors.Wrap(err, "Error committing the finished image") } - return manifest, nil + return copiedManifest, nil +} + +// Checks if the destination supports accepting multiple images by checking if it can support +// manifest types that are lists of other manifests. +func supportsMultipleImages(dest types.ImageDestination) bool { + mtypes := dest.SupportedManifestMIMETypes() + if len(mtypes) == 0 { + // Anything goes! + return true + } + for _, mtype := range mtypes { + if manifest.MIMETypeIsMultiImage(mtype) { + return true + } + } + return false } -// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate +// copyMultipleImages copies some or all of an image list's instances, using +// policyContext to validate source image admissibility. +func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, copiedManifestType string, retErr error) { + // Parse the list and get a copy of the original value after it's re-encoded. + manifestList, manifestType, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, "", errors.Wrapf(err, "Error reading manifest list") + } + list, err := manifest.ListFromBlob(manifestList, manifestType) + if err != nil { + return nil, "", errors.Wrapf(err, "Error parsing manifest list %q", string(manifestList)) + } + originalList := list.Clone() + + // Read and/or clear the set of signatures for this list. + var sigs [][]byte + if options.RemoveSignatures { + sigs = [][]byte{} + } else { + c.Printf("Getting image list signatures\n") + s, err := c.rawSource.GetSignatures(ctx, nil) + if err != nil { + return nil, "", errors.Wrap(err, "Error reading signatures") + } + sigs = s + } + if len(sigs) != 0 { + c.Printf("Checking if image list destination supports signatures\n") + if err := c.dest.SupportsSignatures(ctx); err != nil { + return nil, "", errors.Wrap(err, "Can not copy signatures") + } + } + + // Determine if we'll need to convert the manifest list to a different format. + forceListMIMEType := options.ForceManifestMIMEType + switch forceListMIMEType { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + forceListMIMEType = manifest.DockerV2ListMediaType + case imgspecv1.MediaTypeImageManifest: + forceListMIMEType = imgspecv1.MediaTypeImageIndex + } + selectedListType, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType) + if err != nil { + return nil, "", errors.Wrapf(err, "Error determining manifest list type to write to destination") + } + if selectedListType != list.MIMEType() { + canModifyManifestList := (len(sigs) == 0) + if !canModifyManifestList { + return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType) + } + } + + // Copy each image, or just the ones we want to copy, in turn. + instanceDigests := list.Instances() + imagesToCopy := len(instanceDigests) + if options.ImageListSelection == CopySpecificImages { + imagesToCopy = len(options.Instances) + } + c.Printf("Copying %d of %d images in list\n", imagesToCopy, len(instanceDigests)) + updates := make([]manifest.ListUpdate, len(instanceDigests)) + instancesCopied := 0 + for i, instanceDigest := range instanceDigests { + if options.ImageListSelection == CopySpecificImages { + skip := true + for _, instance := range options.Instances { + if instance == instanceDigest { + skip = false + break + } + } + if skip { + update, err := list.Instance(instanceDigest) + if err != nil { + return nil, "", err + } + logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) + // Record the digest/size/type of the manifest that we didn't copy. + updates[i] = update + continue + } + } + logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) + c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy) + unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest) + updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest) + if err != nil { + return nil, "", err + } + instancesCopied++ + // Record the result of a possible conversion here. + update := manifest.ListUpdate{ + Digest: updatedManifestDigest, + Size: int64(len(updatedManifest)), + MediaType: updatedManifestType, + } + updates[i] = update + } + + // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made. + if err = list.UpdateInstances(updates); err != nil { + return nil, "", errors.Wrapf(err, "Error updating manifest list") + } + + // Check if the updates meaningfully changed the list of images. + listIsModified := false + if !reflect.DeepEqual(list.Instances(), originalList.Instances()) { + listIsModified = true + } + + // Perform the list conversion. + if selectedListType != list.MIMEType() { + list, err = list.ConvertToMIMEType(selectedListType) + if err != nil { + return nil, "", errors.Wrapf(err, "Error converting manifest list to list with MIME type %q", selectedListType) + } + } + + // If we can't use the original value, but we have to change it, flag an error. + if listIsModified { + manifestList, err = list.Serialize() + if err != nil { + return nil, "", errors.Wrapf(err, "Error encoding updated manifest list (%q: %#v)", list.MIMEType(), list.Instances()) + } + logrus.Debugf("Manifest list has been updated") + } + + // Save the manifest list. + c.Printf("Writing manifest list to image destination\n") + if err = c.dest.PutManifest(ctx, manifestList, nil); err != nil { + return nil, "", errors.Wrapf(err, "Error writing manifest list %q", string(manifestList)) + } + + // Sign the manifest list. + if options.SignBy != "" { + newSig, err := c.createSignature(manifestList, options.SignBy) + if err != nil { + return nil, "", err + } + sigs = append(sigs, newSig) + } + + c.Printf("Storing list signatures\n") + if err := c.dest.PutSignatures(ctx, sigs, nil); err != nil { + return nil, "", errors.Wrap(err, "Error writing signatures") + } + + return manifestList, selectedListType, nil +} + +// copyOneImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate // source image admissibility. -func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifestBytes []byte, retErr error) { +func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest) (retManifest []byte, retManifestType string, retManifestDigest digest.Digest, retErr error) { // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. // Make sure we fail cleanly in such cases. multiImage, err := isMultiImage(ctx, unparsedImage) if err != nil { // FIXME FIXME: How to name a reference for the sub-image? - return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference())) + return nil, "", "", errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference())) } if multiImage { - return nil, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") + return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") } // Please keep this policy check BEFORE reading any other information about the image. - // (the multiImage check above only matches the MIME type, which we have received anyway. + // (The multiImage check above only matches the MIME type, which we have received anyway. // Actual parsing of anything should be deferred.) if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. - return nil, errors.Wrap(err, "Source image rejected") + return nil, "", "", errors.Wrap(err, "Source image rejected") } src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage) if err != nil { - return nil, errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference())) + return nil, "", "", errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference())) } // If the destination is a digested reference, make a note of that, determine what digest value we're - // expecting, and check that the source manifest matches it. + // expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's + // one item from a manifest list that matches it, accept that as a match. destIsDigestedReference := false if named := c.dest.Reference().DockerReference(); named != nil { if digested, ok := named.(reference.Digested); ok { destIsDigestedReference = true sourceManifest, _, err := src.Manifest(ctx) if err != nil { - return nil, errors.Wrapf(err, "Error reading manifest from source image") + return nil, "", "", errors.Wrapf(err, "Error reading manifest from source image") } matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest()) if err != nil { - return nil, errors.Wrapf(err, "Error computing digest of source image's manifest") + return nil, "", "", errors.Wrapf(err, "Error computing digest of source image's manifest") } if !matches { - return nil, errors.New("Digest of source image's manifest would not match destination reference") + manifestList, _, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, "", "", errors.Wrapf(err, "Error reading manifest from source image") + } + matches, err = manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return nil, "", "", errors.Wrapf(err, "Error computing digest of source image's manifest") + } + if !matches { + return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference") + } } } } if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil { - return nil, err + return nil, "", "", err } var sigs [][]byte @@ -269,14 +535,14 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli c.Printf("Getting image source signatures\n") s, err := src.Signatures(ctx) if err != nil { - return nil, errors.Wrap(err, "Error reading signatures") + return nil, "", "", errors.Wrap(err, "Error reading signatures") } sigs = s } if len(sigs) != 0 { c.Printf("Checking if image destination supports signatures\n") if err := c.dest.SupportsSignatures(ctx); err != nil { - return nil, errors.Wrap(err, "Can not copy signatures") + return nil, "", "", errors.Wrap(err, "Can not copy signatures") } } @@ -296,28 +562,29 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == "" if err := ic.updateEmbeddedDockerReference(); err != nil { - return nil, err + return nil, "", "", err } // We compute preferredManifestMIMEType only to show it in error messages. // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType) if err != nil { - return nil, err + return nil, "", "", err } // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) if err := ic.copyLayers(ctx); err != nil { - return nil, err + return nil, "", "", err } // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support // without actually trying to upload something and getting a types.ManifestTypeRejectedError. // So, try the preferred manifest MIME type. If the process succeeds, fine… - manifestBytes, err = ic.copyUpdatedConfigAndManifest(ctx) + manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + retManifestType = preferredManifestMIMEType if err != nil { logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) // … if it fails, _and_ the failure is because the manifest is rejected, we may have other options. @@ -325,14 +592,14 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // We don’t have other options. // In principle the code below would handle this as well, but the resulting error message is fairly ugly. // Don’t bother the user with MIME types if we have no choice. - return nil, err + return nil, "", "", err } // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. // So if we are here, we will definitely be trying to convert the manifest. // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason, // so let’s bail out early and with a better error message. if !ic.canModifyManifest { - return nil, errors.Wrap(err, "Writing manifest failed (and converting it is not possible)") + return nil, "", "", errors.Wrap(err, "Writing manifest failed (and converting it is not possible)") } // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. @@ -340,7 +607,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli for _, manifestMIMEType := range otherManifestMIMETypeCandidates { logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) ic.manifestUpdates.ManifestMIMEType = manifestMIMEType - attemptedManifest, err := ic.copyUpdatedConfigAndManifest(ctx) + attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) if err != nil { logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) @@ -349,28 +616,30 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // We have successfully uploaded a manifest. manifestBytes = attemptedManifest + retManifestDigest = attemptedManifestDigest + retManifestType = manifestMIMEType errs = nil // Mark this as a success so that we don't abort below. break } if errs != nil { - return nil, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) + return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) } } if options.SignBy != "" { newSig, err := c.createSignature(manifestBytes, options.SignBy) if err != nil { - return nil, err + return nil, "", "", err } sigs = append(sigs, newSig) } c.Printf("Storing signatures\n") - if err := c.dest.PutSignatures(ctx, sigs); err != nil { - return nil, errors.Wrap(err, "Error writing signatures") + if err := c.dest.PutSignatures(ctx, sigs, targetInstance); err != nil { + return nil, "", "", errors.Wrap(err, "Error writing signatures") } - return manifestBytes, nil + return manifestBytes, retManifestType, retManifestDigest, nil } // Printf writes a formatted string to c.reportWriter. @@ -535,12 +804,13 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool { } // copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, -// stores the resulting config and manifest to the destination, and returns the stored manifest. -func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte, error) { +// stores the resulting config and manifest to the destination, and returns the stored manifest +// and its digest. +func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { pendingImage := ic.src if !reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) { if !ic.canModifyManifest { - return nil, errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") + return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") } if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. @@ -549,28 +819,35 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. - return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) + return nil, "", errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) } pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates) if err != nil { - return nil, errors.Wrap(err, "Error creating an updated image manifest") + return nil, "", errors.Wrap(err, "Error creating an updated image manifest") } pendingImage = pi } - manifest, _, err := pendingImage.Manifest(ctx) + man, _, err := pendingImage.Manifest(ctx) if err != nil { - return nil, errors.Wrap(err, "Error reading manifest") + return nil, "", errors.Wrap(err, "Error reading manifest") } if err := ic.c.copyConfig(ctx, pendingImage); err != nil { - return nil, err + return nil, "", err } ic.c.Printf("Writing manifest to image destination\n") - if err := ic.c.dest.PutManifest(ctx, manifest); err != nil { - return nil, errors.Wrap(err, "Error writing manifest") + manifestDigest, err := manifest.Digest(man) + if err != nil { + return nil, "", err + } + if instanceDigest != nil { + instanceDigest = &manifestDigest + } + if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { + return nil, "", errors.Wrap(err, "Error writing manifest") } - return manifest, nil + return man, manifestDigest, nil } // newProgressPool creates a *mpb.Progress and a cleanup function. @@ -666,7 +943,7 @@ type diffIDResult struct { err error } -// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress, +// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) { cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" @@ -695,7 +972,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, po bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done") - blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, diffIDIsNeeded, bar) + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, Annotations: srcInfo.Annotations}, diffIDIsNeeded, bar) if err != nil { return types.BlobInfo{}, "", err } @@ -722,7 +999,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, po } // copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. -// it copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest, +// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, // perhaps compressing the stream if canCompress, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, @@ -781,7 +1058,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) return digest.Canonical.FromReader(stream) } -// copyBlobFromStream copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest, +// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, // perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, // perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied blob. @@ -805,7 +1082,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr // === Detect compression of the input stream. // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. - decompressor, destStream, err := compression.DetectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform + compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform if err != nil { return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) } @@ -819,6 +1096,8 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr originalLayerReader = destStream } + desiredCompressionFormat := c.compressionFormat + // === Deal with layer compression/decompression if necessary var inputInfo types.BlobInfo var compressionOperation types.LayerCompression @@ -831,7 +1110,27 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, // we don’t care. - go compressGoroutine(pipeWriter, destStream) // Closes pipeWriter + go c.compressGoroutine(pipeWriter, destStream, desiredCompressionFormat) // Closes pipeWriter + destStream = pipeReader + inputInfo.Digest = "" + inputInfo.Size = -1 + } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && desiredCompressionFormat.Name() != compressionFormat.Name() { + // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally + // re-compressed using the desired format. + logrus.Debugf("Blob will be converted") + + compressionOperation = types.PreserveOriginal + s, err := decompressor(destStream) + if err != nil { + return types.BlobInfo{}, err + } + defer s.Close() + + pipeReader, pipeWriter := io.Pipe() + defer pipeReader.Close() + + go c.compressGoroutine(pipeWriter, s, desiredCompressionFormat) // Closes pipeWriter + destStream = pipeReader inputInfo.Digest = "" inputInfo.Size = -1 @@ -847,6 +1146,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr inputInfo.Digest = "" inputInfo.Size = -1 } else { + // PreserveOriginal might also need to recompress the original blob if the desired compression format is different. logrus.Debugf("Using original blob without modification") compressionOperation = types.PreserveOriginal inputInfo = srcInfo @@ -869,6 +1169,14 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr return types.BlobInfo{}, errors.Wrap(err, "Error writing blob") } + uploadedInfo.Annotations = srcInfo.Annotations + + uploadedInfo.CompressionOperation = compressionOperation + // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. + if canModifyBlob && !isConfig { + uploadedInfo.CompressionAlgorithm = &desiredCompressionFormat + } + // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. // So, read everything from originalLayerReader, which will cause the rest to be @@ -907,14 +1215,19 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } // compressGoroutine reads all input from src and writes its compressed equivalent to dest. -func compressGoroutine(dest *io.PipeWriter, src io.Reader) { +func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, compressionFormat compression.Algorithm) { err := errors.New("Internal error: unexpected panic in compressGoroutine") defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() }() - zipper := pgzip.NewWriter(dest) - defer zipper.Close() + compressor, err := compression.CompressStream(dest, compressionFormat, c.compressionLevel) + if err != nil { + return + } + defer compressor.Close() + + buf := make([]byte, compressionBufferSize) - _, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close() + _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close() } diff --git a/vendor/github.com/containers/image/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go similarity index 75% rename from vendor/github.com/containers/image/copy/manifest.go rename to vendor/github.com/containers/image/v5/copy/manifest.go index e8cc8a9e7a..f5f6c9c5fd 100644 --- a/vendor/github.com/containers/image/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -4,8 +4,8 @@ import ( "context" "strings" - "github.com/containers/image/manifest" - "github.com/containers/image/types" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -119,3 +119,36 @@ func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) { } return manifest.MIMETypeIsMultiImage(mt), nil } + +// determineListConversion takes the current MIME type of a list of manifests, +// the list of MIME types supported for a given destination, and a possible +// forced value, and returns the MIME type to which we should convert the list +// of manifests, whether we are converting to it or using it unmodified. +func (c *copier) determineListConversion(currentListMIMEType string, destSupportedMIMETypes []string, forcedListMIMEType string) (string, error) { + // If we're forcing it, we prefer the forced value over everything else. + if forcedListMIMEType != "" { + return forcedListMIMEType, nil + } + // If there's no list of supported types, then anything we support is expected to be supported. + if len(destSupportedMIMETypes) == 0 { + destSupportedMIMETypes = manifest.SupportedListMIMETypes + } + var selectedType string + for i := range destSupportedMIMETypes { + // The second priority is the first member of the list of acceptable types that is a list, + // but keep going in case current type occurs later in the list. + if selectedType == "" && manifest.MIMETypeIsMultiImage(destSupportedMIMETypes[i]) { + selectedType = destSupportedMIMETypes[i] + } + // The first priority is the current type, if it's in the list, since that lets us avoid a + // conversion that isn't strictly necessary. + if destSupportedMIMETypes[i] == currentListMIMEType { + selectedType = destSupportedMIMETypes[i] + } + } + if selectedType == "" { + return "", errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes) + } + // Done. + return selectedType, nil +} diff --git a/vendor/github.com/containers/image/copy/progress_reader.go b/vendor/github.com/containers/image/v5/copy/progress_reader.go similarity index 93% rename from vendor/github.com/containers/image/copy/progress_reader.go rename to vendor/github.com/containers/image/v5/copy/progress_reader.go index b670ee59f1..1d0c41bcea 100644 --- a/vendor/github.com/containers/image/copy/progress_reader.go +++ b/vendor/github.com/containers/image/v5/copy/progress_reader.go @@ -4,7 +4,7 @@ import ( "io" "time" - "github.com/containers/image/types" + "github.com/containers/image/v5/types" ) // progressReader is a reader that reports its progress on an interval. diff --git a/vendor/github.com/containers/image/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go similarity index 91% rename from vendor/github.com/containers/image/copy/sign.go rename to vendor/github.com/containers/image/v5/copy/sign.go index 91394d2b0f..8f46e9de69 100644 --- a/vendor/github.com/containers/image/copy/sign.go +++ b/vendor/github.com/containers/image/v5/copy/sign.go @@ -1,8 +1,8 @@ package copy import ( - "github.com/containers/image/signature" - "github.com/containers/image/transports" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/transports" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go similarity index 89% rename from vendor/github.com/containers/image/directory/directory_dest.go rename to vendor/github.com/containers/image/v5/directory/directory_dest.go index 4b2ab022e2..2d6650de71 100644 --- a/vendor/github.com/containers/image/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -7,7 +7,7 @@ import ( "os" "path/filepath" - "github.com/containers/image/types" + "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -199,16 +199,23 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo } // PutManifest writes manifest to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte) error { - return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644) +func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error { + return ioutil.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) } -func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { +// PutSignatures writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { for i, sig := range signatures { - if err := ioutil.WriteFile(d.ref.signaturePath(i), sig, 0644); err != nil { + if err := ioutil.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil { return err } } @@ -219,7 +226,7 @@ func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][] // WARNING: This does not have any transactional semantics: // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *dirImageDestination) Commit(ctx context.Context) error { +func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error { return nil } diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go similarity index 74% rename from vendor/github.com/containers/image/directory/directory_src.go rename to vendor/github.com/containers/image/v5/directory/directory_src.go index 59b625397b..ad9129d401 100644 --- a/vendor/github.com/containers/image/directory/directory_src.go +++ b/vendor/github.com/containers/image/v5/directory/directory_src.go @@ -6,10 +6,9 @@ import ( "io/ioutil" "os" - "github.com/containers/image/manifest" - "github.com/containers/image/types" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) type dirImageSource struct { @@ -38,10 +37,7 @@ func (s *dirImageSource) Close() error { // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`) - } - m, err := ioutil.ReadFile(s.ref.manifestPath()) + m, err := ioutil.ReadFile(s.ref.manifestPath(instanceDigest)) if err != nil { return nil, "", err } @@ -73,12 +69,9 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list // (e.g. if the source never returns manifest lists). func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - return nil, errors.Errorf(`Manifests lists are not supported by "dir:"`) - } signatures := [][]byte{} for i := 0; ; i++ { - signature, err := ioutil.ReadFile(s.ref.signaturePath(i)) + signature, err := ioutil.ReadFile(s.ref.signaturePath(i, instanceDigest)) if err != nil { if os.IsNotExist(err) { break @@ -90,7 +83,14 @@ func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *dige return signatures, nil } -// LayerInfosForCopy() returns updated layer info that should be used when copying, in preference to values in the manifest, if specified. -func (s *dirImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *dirImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { return nil, nil } diff --git a/vendor/github.com/containers/image/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go similarity index 92% rename from vendor/github.com/containers/image/directory/directory_transport.go rename to vendor/github.com/containers/image/v5/directory/directory_transport.go index 66b9e72585..adfec6ef37 100644 --- a/vendor/github.com/containers/image/directory/directory_transport.go +++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go @@ -6,11 +6,11 @@ import ( "path/filepath" "strings" - "github.com/containers/image/directory/explicitfilepath" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -166,18 +166,24 @@ func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContex } // manifestPath returns a path for the manifest within a directory using our conventions. -func (ref dirReference) manifestPath() string { +func (ref dirReference) manifestPath(instanceDigest *digest.Digest) string { + if instanceDigest != nil { + return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json") + } return filepath.Join(ref.path, "manifest.json") } // layerPath returns a path for a layer tarball within a directory using our conventions. func (ref dirReference) layerPath(digest digest.Digest) string { // FIXME: Should we keep the digest identification? - return filepath.Join(ref.path, digest.Hex()) + return filepath.Join(ref.path, digest.Encoded()) } // signaturePath returns a path for a signature within a directory using our conventions. -func (ref dirReference) signaturePath(index int) string { +func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) string { + if instanceDigest != nil { + return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1)) + } return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) } diff --git a/vendor/github.com/containers/image/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go similarity index 100% rename from vendor/github.com/containers/image/directory/explicitfilepath/path.go rename to vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go diff --git a/vendor/github.com/containers/image/docker/archive/dest.go b/vendor/github.com/containers/image/v5/docker/archive/dest.go similarity index 92% rename from vendor/github.com/containers/image/docker/archive/dest.go rename to vendor/github.com/containers/image/v5/docker/archive/dest.go index c88aea3b3a..5845f63be1 100644 --- a/vendor/github.com/containers/image/docker/archive/dest.go +++ b/vendor/github.com/containers/image/v5/docker/archive/dest.go @@ -5,8 +5,8 @@ import ( "io" "os" - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/tarfile" + "github.com/containers/image/v5/types" "github.com/pkg/errors" ) @@ -67,6 +67,6 @@ func (d *archiveImageDestination) Close() error { // WARNING: This does not have any transactional semantics: // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *archiveImageDestination) Commit(ctx context.Context) error { +func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { return d.Destination.Commit(ctx) } diff --git a/vendor/github.com/containers/image/docker/archive/src.go b/vendor/github.com/containers/image/v5/docker/archive/src.go similarity index 75% rename from vendor/github.com/containers/image/docker/archive/src.go rename to vendor/github.com/containers/image/v5/docker/archive/src.go index e46c9db4a1..a90707437c 100644 --- a/vendor/github.com/containers/image/docker/archive/src.go +++ b/vendor/github.com/containers/image/v5/docker/archive/src.go @@ -2,8 +2,8 @@ package archive import ( "context" - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/tarfile" + "github.com/containers/image/v5/types" "github.com/sirupsen/logrus" ) @@ -33,8 +33,3 @@ func newImageSource(ctx context.Context, ref archiveReference) (types.ImageSourc func (s *archiveImageSource) Reference() types.ImageReference { return s.ref } - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *archiveImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go similarity index 97% rename from vendor/github.com/containers/image/docker/archive/transport.go rename to vendor/github.com/containers/image/v5/docker/archive/transport.go index f345b343c5..44213bb8dc 100644 --- a/vendor/github.com/containers/image/docker/archive/transport.go +++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go @@ -5,10 +5,10 @@ import ( "fmt" "strings" - "github.com/containers/image/docker/reference" - ctrImage "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + ctrImage "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/image/docker/cache.go b/vendor/github.com/containers/image/v5/docker/cache.go similarity index 89% rename from vendor/github.com/containers/image/docker/cache.go rename to vendor/github.com/containers/image/v5/docker/cache.go index 64ad57a7c6..728d32d170 100644 --- a/vendor/github.com/containers/image/docker/cache.go +++ b/vendor/github.com/containers/image/v5/docker/cache.go @@ -1,8 +1,8 @@ package docker import ( - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" ) // bicTransportScope returns a BICTransportScope appropriate for ref. diff --git a/vendor/github.com/containers/image/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go similarity index 98% rename from vendor/github.com/containers/image/docker/daemon/client.go rename to vendor/github.com/containers/image/v5/docker/daemon/client.go index 26f1b03b72..323a02fc09 100644 --- a/vendor/github.com/containers/image/docker/daemon/client.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go @@ -4,7 +4,7 @@ import ( "net/http" "path/filepath" - "github.com/containers/image/types" + "github.com/containers/image/v5/types" dockerclient "github.com/docker/docker/client" "github.com/docker/go-connections/tlsconfig" ) diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go similarity index 95% rename from vendor/github.com/containers/image/docker/daemon/daemon_dest.go rename to vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go index 663086ff68..25ce55a17c 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go @@ -4,9 +4,9 @@ import ( "context" "io" - "github.com/containers/image/docker/reference" - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/docker/tarfile" + "github.com/containers/image/v5/types" "github.com/docker/docker/client" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -124,7 +124,7 @@ func (d *daemonImageDestination) Reference() types.ImageReference { // WARNING: This does not have any transactional semantics: // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *daemonImageDestination) Commit(ctx context.Context) error { +func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { logrus.Debugf("docker-daemon: Closing tar stream") if err := d.Destination.Commit(ctx); err != nil { return err diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go similarity index 85% rename from vendor/github.com/containers/image/docker/daemon/daemon_src.go rename to vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go index 89e66eff88..46fbcc4e0b 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_src.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go @@ -3,8 +3,8 @@ package daemon import ( "context" - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/tarfile" + "github.com/containers/image/v5/types" "github.com/pkg/errors" ) @@ -55,8 +55,3 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef func (s *daemonImageSource) Reference() types.ImageReference { return s.ref } - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *daemonImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go similarity index 97% rename from vendor/github.com/containers/image/docker/daemon/daemon_transport.go rename to vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go index 1a265bf76e..4e4ed68814 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go @@ -4,11 +4,11 @@ import ( "context" "fmt" - "github.com/containers/image/docker/policyconfiguration" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/policyconfiguration" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go similarity index 85% rename from vendor/github.com/containers/image/docker/docker_client.go rename to vendor/github.com/containers/image/v5/docker/docker_client.go index 48427f3d37..0b012c703b 100644 --- a/vendor/github.com/containers/image/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -16,12 +16,12 @@ import ( "sync" "time" - "github.com/containers/image/docker/reference" - "github.com/containers/image/pkg/docker/config" - "github.com/containers/image/pkg/sysregistriesv2" - "github.com/containers/image/pkg/tlsclientconfig" - "github.com/containers/image/types" - "github.com/docker/distribution/registry/client" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/docker/config" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/pkg/tlsclientconfig" + "github.com/containers/image/v5/types" + clientLib "github.com/docker/distribution/registry/client" "github.com/docker/go-connections/tlsconfig" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -47,14 +47,7 @@ const ( extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type ) -var ( - // ErrV1NotSupported is returned when we're trying to talk to a - // docker V1 registry. - ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") - // ErrUnauthorizedForCredentials is returned when the status code returned is 401 - ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password") - systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"} -) +var systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"} // extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: // signature represents a Docker image signature. @@ -284,14 +277,7 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password } defer resp.Body.Close() - switch resp.StatusCode { - case http.StatusOK: - return nil - case http.StatusUnauthorized: - return ErrUnauthorizedForCredentials - default: - return errors.Errorf("error occured with status code %d (%s)", resp.StatusCode, http.StatusText(resp.StatusCode)) - } + return httpResponseToError(resp) } // SearchResult holds the information of each matching image @@ -365,7 +351,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima } else { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - logrus.Debugf("error getting search results from v1 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode)) + logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, httpResponseToError(resp)) } else { if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil { return nil, err @@ -382,7 +368,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima } else { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - logrus.Errorf("error getting search results from v2 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode)) + logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, httpResponseToError(resp)) } else { if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { return nil, err @@ -417,8 +403,78 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea // makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. // streamLen, if not -1, specifies the length of the data expected on stream. // makeRequest should generally be preferred. +// In case of an http 429 status code in the response, it performs an exponential back off starting at 2 seconds for at most 5 iterations. +// If the `Retry-After` header is set in the response, the specified value or date is +// If the stream is non-nil, no back off will be performed. // TODO(runcom): too many arguments here, use a struct func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { + var ( + res *http.Response + err error + delay int64 + ) + delay = 2 + const numIterations = 5 + const maxDelay = 60 + + // math.Min() only supports float64, so have an anonymous func to avoid + // casting. + min := func(a int64, b int64) int64 { + if a < b { + return a + } + return b + } + + nextDelay := func(r *http.Response, delay int64) int64 { + after := res.Header.Get("Retry-After") + if after == "" { + return min(delay, maxDelay) + } + logrus.Debugf("detected 'Retry-After' header %q", after) + // First check if we have a numerical value. + if num, err := strconv.ParseInt(after, 10, 64); err == nil { + return min(num, maxDelay) + } + // Secondly check if we have an http date. + // If the delta between the date and now is positive, use it. + // Otherwise, fall back to using the default exponential back off. + if t, err := http.ParseTime(after); err == nil { + delta := int64(t.Sub(time.Now()).Seconds()) + if delta > 0 { + return min(delta, maxDelay) + } + logrus.Debugf("negative date: falling back to using %d seconds", delay) + return min(delay, maxDelay) + } + // If the header contains bogus, fall back to using the default + // exponential back off. + logrus.Debugf("invalid format: falling back to using %d seconds", delay) + return min(delay, maxDelay) + } + + for i := 0; i < numIterations; i++ { + res, err = c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, extraScope) + if stream == nil && res != nil && res.StatusCode == http.StatusTooManyRequests { + if i < numIterations-1 { + logrus.Errorf("HEADER %v", res.Header) + delay = nextDelay(res, delay) // compute next delay - does NOT exceed maxDelay + logrus.Debugf("too many request to %s: sleeping for %d seconds before next attempt", url, delay) + time.Sleep(time.Duration(delay) * time.Second) + delay = delay * 2 // exponential back off + } + continue + } + break + } + return res, err +} + +// makeRequestToResolvedURLOnce creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. +// streamLen, if not -1, specifies the length of the data expected on stream. +// makeRequest should generally be preferred. +// Note that no exponential back off is performed when receiving an http 429 status code. +func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { req, err := http.NewRequest(method, url, stream) if err != nil { return nil, err @@ -533,7 +589,9 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, defer res.Body.Close() switch res.StatusCode { case http.StatusUnauthorized: - return nil, ErrUnauthorizedForCredentials + err := clientLib.HandleErrorResponse(res) + logrus.Debugf("Server response when trying to obtain an access token: \n%q", err.Error()) + return nil, ErrUnauthorizedForCredentials{Err: err} case http.StatusOK: break default: @@ -569,7 +627,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { defer resp.Body.Close() logrus.Debugf("Ping %s status %d", url, resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { - return errors.Errorf("error pinging registry %s, response code %d (%s)", c.registry, resp.StatusCode, http.StatusText(resp.StatusCode)) + return httpResponseToError(resp) } c.challenges = parseAuthHeader(resp.Header) c.scheme = scheme @@ -581,7 +639,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { err = ping("http") } if err != nil { - err = errors.Wrap(err, "pinging docker registry returned") + err = errors.Wrapf(err, "error pinging docker registry %s", c.registry) if c.sys != nil && c.sys.DockerDisableV1Ping { return err } @@ -627,9 +685,11 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe return nil, err } defer res.Body.Close() + if res.StatusCode != http.StatusOK { - return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) + return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) } + body, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err diff --git a/vendor/github.com/containers/image/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go similarity index 88% rename from vendor/github.com/containers/image/docker/docker_image.go rename to vendor/github.com/containers/image/v5/docker/docker_image.go index 744667f549..dad382cd0d 100644 --- a/vendor/github.com/containers/image/docker/docker_image.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image.go @@ -4,13 +4,12 @@ import ( "context" "encoding/json" "fmt" - "net/http" "net/url" "strings" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/types" "github.com/pkg/errors" ) @@ -71,9 +70,8 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types. return nil, err } defer res.Body.Close() - if res.StatusCode != http.StatusOK { - // print url also - return nil, errors.Errorf("Invalid status code returned when fetching tags list %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) + if err := httpResponseToError(res); err != nil { + return nil, err } var tagsHolder struct { diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go similarity index 87% rename from vendor/github.com/containers/image/docker/docker_image_dest.go rename to vendor/github.com/containers/image/v5/docker/docker_image_dest.go index c116cbec32..417d97aec9 100644 --- a/vendor/github.com/containers/image/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -14,12 +14,12 @@ import ( "path/filepath" "strings" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/blobinfocache/none" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" + v2 "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -61,6 +61,8 @@ func (d *dockerImageDestination) SupportedManifestMIMETypes() []string { return []string{ imgspecv1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, + imgspecv1.MediaTypeImageIndex, + manifest.DockerV2ListMediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType, } @@ -343,20 +345,47 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types. } // PutManifest writes manifest to the destination. +// When the primary manifest is a manifest list, if instanceDigest is nil, we're saving the list +// itself, else instanceDigest contains a digest of the specific manifest instance to overwrite the +// manifest for; when the primary manifest is not a manifest list, instanceDigest should always be nil. // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) error { - digest, err := manifest.Digest(m) - if err != nil { - return err +func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + refTail := "" + if instanceDigest != nil { + // If the instanceDigest is provided, then use it as the refTail, because the reference, + // whether it includes a tag or a digest, refers to the list as a whole, and not this + // particular instance. + refTail = instanceDigest.String() + // Double-check that the manifest we've been given matches the digest we've been given. + matches, err := manifest.MatchesDigest(m, *instanceDigest) + if err != nil { + return errors.Wrapf(err, "error digesting manifest in PutManifest") + } + if !matches { + manifestDigest, merr := manifest.Digest(m) + if merr != nil { + return errors.Wrapf(err, "Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%v attempting to compute it)", instanceDigest.String(), merr) + } + return errors.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String()) + } + } else { + // Compute the digest of the main manifest, or the list if it's a list, so that we + // have a digest value to use if we're asked to save a signature for the manifest. + digest, err := manifest.Digest(m) + if err != nil { + return err + } + d.manifestDigest = digest + // The refTail should be either a digest (which we expect to match the value we just + // computed) or a tag name. + refTail, err = d.ref.tagOrDigest() + if err != nil { + return err + } } - d.manifestDigest = digest - refTail, err := d.ref.tagOrDigest() - if err != nil { - return err - } path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail) headers := map[string][]string{} @@ -416,19 +445,30 @@ func isManifestInvalidError(err error) bool { } } -func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { +// PutSignatures uploads a set of signatures to the relevant lookaside or API extension point. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to upload the signatures for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { // Do not fail if we don’t really need to support signatures. if len(signatures) == 0 { return nil } + if instanceDigest == nil { + if d.manifestDigest.String() == "" { + // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures + return errors.Errorf("Unknown manifest digest, can't add signatures") + } + instanceDigest = &d.manifestDigest + } + if err := d.c.detectProperties(ctx); err != nil { return err } switch { case d.c.signatureBase != nil: - return d.putSignaturesToLookaside(signatures) + return d.putSignaturesToLookaside(signatures, instanceDigest) case d.c.supportsSignatures: - return d.putSignaturesToAPIExtension(ctx, signatures) + return d.putSignaturesToAPIExtension(ctx, signatures, instanceDigest) default: return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") } @@ -436,7 +476,7 @@ func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [ // putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase, // which is not nil. -func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error { +func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, instanceDigest *digest.Digest) error { // FIXME? This overwrites files one at a time, definitely not atomic. // A failure when updating signatures with a reordered copy could lose some of them. @@ -445,14 +485,9 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) e return nil } - if d.manifestDigest.String() == "" { - // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures - return errors.Errorf("Unknown manifest digest, can't add signatures") - } - // NOTE: Keep this in sync with docs/signature-protocols.md! for i, signature := range signatures { - url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) + url := signatureStorageURL(d.c.signatureBase, *instanceDigest, i) if url == nil { return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") } @@ -467,7 +502,7 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) e // is enough for dockerImageSource to stop looking for other signatures, so that // is sufficient. for i := len(signatures); ; i++ { - url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) + url := signatureStorageURL(d.c.signatureBase, *instanceDigest, i) if url == nil { return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") } @@ -527,22 +562,17 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error } // putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension. -func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte) error { +func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { // Skip dealing with the manifest digest, or reading the old state, if not necessary. if len(signatures) == 0 { return nil } - if d.manifestDigest.String() == "" { - // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures - return errors.Errorf("Unknown manifest digest, can't add signatures") - } - // Because image signatures are a shared resource in Atomic Registry, the default upload // always adds signatures. Eventually we should also allow removing signatures, // but the X-Registry-Supports-Signatures API extension does not support that yet. - existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, d.manifestDigest) + existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, *instanceDigest) if err != nil { return err } @@ -567,7 +597,7 @@ sigExists: if err != nil || n != 16 { return errors.Wrapf(err, "Error generating random signature len %d", n) } - signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes) + signatureName = fmt.Sprintf("%s@%032x", instanceDigest.String(), randBytes) if _, ok := existingSigNames[signatureName]; !ok { break } @@ -606,6 +636,6 @@ sigExists: // WARNING: This does not have any transactional semantics: // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *dockerImageDestination) Commit(ctx context.Context) error { +func (d *dockerImageDestination) Commit(context.Context, types.UnparsedImage) error { return nil } diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go similarity index 93% rename from vendor/github.com/containers/image/docker/docker_image_src.go rename to vendor/github.com/containers/image/v5/docker/docker_image_src.go index 6951f31e93..35beb30e54 100644 --- a/vendor/github.com/containers/image/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -11,10 +11,10 @@ import ( "os" "strconv" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/sysregistriesv2" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" "github.com/docker/distribution/registry/client" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -103,8 +103,15 @@ func (s *dockerImageSource) Close() error { return nil } -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *dockerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *dockerImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { return nil, nil } @@ -232,9 +239,8 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca if err != nil { return nil, 0, err } - if res.StatusCode != http.StatusOK { - // print url also - return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) + if err := httpResponseToError(res); err != nil { + return nil, 0, err } cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref)) return res.Body, getBlobSize(res), nil diff --git a/vendor/github.com/containers/image/docker/docker_transport.go b/vendor/github.com/containers/image/v5/docker/docker_transport.go similarity index 97% rename from vendor/github.com/containers/image/docker/docker_transport.go rename to vendor/github.com/containers/image/v5/docker/docker_transport.go index 45da7c96f5..8b8e579683 100644 --- a/vendor/github.com/containers/image/docker/docker_transport.go +++ b/vendor/github.com/containers/image/v5/docker/docker_transport.go @@ -5,10 +5,10 @@ import ( "fmt" "strings" - "github.com/containers/image/docker/policyconfiguration" - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/policyconfiguration" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go new file mode 100644 index 0000000000..860868f415 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/errors.go @@ -0,0 +1,43 @@ +package docker + +import ( + "errors" + "fmt" + "net/http" + + "github.com/docker/distribution/registry/client" + perrors "github.com/pkg/errors" +) + +var ( + // ErrV1NotSupported is returned when we're trying to talk to a + // docker V1 registry. + ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") + // ErrTooManyRequests is returned when the status code returned is 429 + ErrTooManyRequests = errors.New("too many request to registry") +) + +// ErrUnauthorizedForCredentials is returned when the status code returned is 401 +type ErrUnauthorizedForCredentials struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. + Err error +} + +func (e ErrUnauthorizedForCredentials) Error() string { + return fmt.Sprintf("unable to retrieve auth token: invalid username/password: %s", e.Err.Error()) +} + +// httpResponseToError translates the https.Response into an error. It returns +// nil if the response is not considered an error. +func httpResponseToError(res *http.Response) error { + switch res.StatusCode { + case http.StatusOK: + return nil + case http.StatusTooManyRequests: + return ErrTooManyRequests + case http.StatusUnauthorized: + err := client.HandleErrorResponse(res) + return ErrUnauthorizedForCredentials{Err: err} + default: + return perrors.Errorf("invalid status code from registry %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) + } +} diff --git a/vendor/github.com/containers/image/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/lookaside.go similarity index 98% rename from vendor/github.com/containers/image/docker/lookaside.go rename to vendor/github.com/containers/image/v5/docker/lookaside.go index 860f1ad5e2..918c0f8389 100644 --- a/vendor/github.com/containers/image/docker/lookaside.go +++ b/vendor/github.com/containers/image/v5/docker/lookaside.go @@ -9,8 +9,8 @@ import ( "path/filepath" "strings" - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" "github.com/ghodss/yaml" "github.com/opencontainers/go-digest" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/image/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go similarity index 97% rename from vendor/github.com/containers/image/docker/policyconfiguration/naming.go rename to vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go index 31bbb544c6..61d9aab9a9 100644 --- a/vendor/github.com/containers/image/docker/policyconfiguration/naming.go +++ b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go @@ -3,7 +3,7 @@ package policyconfiguration import ( "strings" - "github.com/containers/image/docker/reference" + "github.com/containers/image/v5/docker/reference" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/image/v5/docker/reference/README.md b/vendor/github.com/containers/image/v5/docker/reference/README.md new file mode 100644 index 0000000000..3c4d74eb4d --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/README.md @@ -0,0 +1,2 @@ +This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8, +except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file diff --git a/vendor/github.com/containers/image/v5/docker/reference/helpers.go b/vendor/github.com/containers/image/v5/docker/reference/helpers.go new file mode 100644 index 0000000000..978df7eabb --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/normalize.go b/vendor/github.com/containers/image/v5/docker/reference/normalize.go new file mode 100644 index 0000000000..6a86ec64fd --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/normalize.go @@ -0,0 +1,181 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/reference.go b/vendor/github.com/containers/image/v5/docker/reference/reference.go new file mode 100644 index 0000000000..8c0c23b2fe --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp.go b/vendor/github.com/containers/image/v5/docker/reference/regexp.go new file mode 100644 index 0000000000..7860349320 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go similarity index 93% rename from vendor/github.com/containers/image/docker/tarfile/dest.go rename to vendor/github.com/containers/image/v5/docker/tarfile/dest.go index 5f30eddbc7..b02c60bb3d 100644 --- a/vendor/github.com/containers/image/docker/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go @@ -12,10 +12,10 @@ import ( "path/filepath" "time" - "github.com/containers/image/docker/reference" - "github.com/containers/image/internal/tmpdir" - "github.com/containers/image/manifest" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -195,10 +195,15 @@ func (d *Destination) createRepositoriesFile(rootLayerID string) error { } // PutManifest writes manifest to the destination. +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *Destination) PutManifest(ctx context.Context, m []byte) error { +func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported for docker tar files`) + } // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, // so the caller trying a different manifest kind would be pointless. var man manifest.Schema2 @@ -390,10 +395,13 @@ func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader return nil } -// PutSignatures adds the given signatures to the docker tarfile (currently not -// supported). MUST be called after PutManifest (signatures reference manifest -// contents) -func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte) error { +// PutSignatures would add the given signatures to the docker tarfile (currently not supported). +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents). +func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.Errorf(`Manifest lists are not supported for docker tar files`) + } if len(signatures) != 0 { return errors.Errorf("Storing signatures for docker tar files is not supported") } diff --git a/vendor/github.com/containers/image/docker/tarfile/doc.go b/vendor/github.com/containers/image/v5/docker/tarfile/doc.go similarity index 100% rename from vendor/github.com/containers/image/docker/tarfile/doc.go rename to vendor/github.com/containers/image/v5/docker/tarfile/doc.go diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/tarfile/src.go similarity index 92% rename from vendor/github.com/containers/image/docker/tarfile/src.go rename to vendor/github.com/containers/image/v5/docker/tarfile/src.go index dd5d78fe8e..ad0a3d2cb4 100644 --- a/vendor/github.com/containers/image/docker/tarfile/src.go +++ b/vendor/github.com/containers/image/v5/docker/tarfile/src.go @@ -11,10 +11,10 @@ import ( "path" "sync" - "github.com/containers/image/internal/tmpdir" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/compression" - "github.com/containers/image/types" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -349,10 +349,12 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif // It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be no secondary instances. func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { if instanceDigest != nil { // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. - return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) + return nil, "", errors.New(`Manifest lists are not supported by "docker-daemon:"`) } if s.generatedManifest == nil { if err := s.ensureCachedDataIsPresent(); err != nil { @@ -466,9 +468,8 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B } // GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as there can be no secondary manifests. func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { if instanceDigest != nil { // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. @@ -476,3 +477,14 @@ func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Diges } return [][]byte{}, nil } + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be no secondary manifests. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *Source) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/docker/tarfile/types.go b/vendor/github.com/containers/image/v5/docker/tarfile/types.go similarity index 94% rename from vendor/github.com/containers/image/docker/tarfile/types.go rename to vendor/github.com/containers/image/v5/docker/tarfile/types.go index e81d939307..ac222528a0 100644 --- a/vendor/github.com/containers/image/docker/tarfile/types.go +++ b/vendor/github.com/containers/image/v5/docker/tarfile/types.go @@ -1,7 +1,7 @@ package tarfile import ( - "github.com/containers/image/manifest" + "github.com/containers/image/v5/manifest" "github.com/opencontainers/go-digest" ) diff --git a/vendor/github.com/containers/image/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go similarity index 100% rename from vendor/github.com/containers/image/docker/wwwauthenticate.go rename to vendor/github.com/containers/image/v5/docker/wwwauthenticate.go diff --git a/vendor/github.com/containers/image/v5/image/docker_list.go b/vendor/github.com/containers/image/v5/image/docker_list.go new file mode 100644 index 0000000000..651c301aa3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/docker_list.go @@ -0,0 +1,34 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + list, err := manifest.Schema2ListFromManifest(manblob) + if err != nil { + return nil, errors.Wrapf(err, "Error parsing schema2 manifest list") + } + targetManifestDigest, err := list.ChooseInstance(sys) + if err != nil { + return nil, errors.Wrapf(err, "Error choosing image instance") + } + manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) + if err != nil { + return nil, errors.Wrapf(err, "Error loading manifest for target platform") + } + + matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) + if err != nil { + return nil, errors.Wrap(err, "Error computing manifest digest") + } + if !matches { + return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) + } + + return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) +} diff --git a/vendor/github.com/containers/image/image/docker_schema1.go b/vendor/github.com/containers/image/v5/image/docker_schema1.go similarity index 98% rename from vendor/github.com/containers/image/image/docker_schema1.go rename to vendor/github.com/containers/image/v5/image/docker_schema1.go index 28cec7ddb1..1a1c39d55b 100644 --- a/vendor/github.com/containers/image/image/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/image/docker_schema1.go @@ -3,9 +3,9 @@ package image import ( "context" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go similarity index 94% rename from vendor/github.com/containers/image/image/docker_schema2.go rename to vendor/github.com/containers/image/v5/image/docker_schema2.go index 351e73ea1d..254c13f789 100644 --- a/vendor/github.com/containers/image/image/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go @@ -6,13 +6,14 @@ import ( "crypto/sha256" "encoding/hex" "encoding/json" + "fmt" "io/ioutil" "strings" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/blobinfocache/none" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -207,12 +208,17 @@ func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context) (types.Imag layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) for idx := range layers { layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) - if m.m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { + switch m.m.LayersDescriptors[idx].MediaType { + case manifest.DockerV2Schema2ForeignLayerMediaType: layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable - } else { - // we assume layers are gzip'ed because docker v2s2 only deals with - // gzip'ed layers. However, OCI has non-gzip'ed layers as well. + case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip + case manifest.DockerV2SchemaLayerMediaTypeUncompressed: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema2LayerMediaType: layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip + default: + return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType) } } diff --git a/vendor/github.com/containers/image/image/manifest.go b/vendor/github.com/containers/image/v5/image/manifest.go similarity index 94% rename from vendor/github.com/containers/image/image/manifest.go rename to vendor/github.com/containers/image/v5/image/manifest.go index c5ca5b12e2..fe66da1576 100644 --- a/vendor/github.com/containers/image/image/manifest.go +++ b/vendor/github.com/containers/image/v5/image/manifest.go @@ -4,9 +4,9 @@ import ( "context" "fmt" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -58,6 +58,8 @@ func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src return manifestSchema2FromManifest(src, manblob) case manifest.DockerV2ListMediaType: return manifestSchema2FromManifestList(ctx, sys, src, manblob) + case imgspecv1.MediaTypeImageIndex: + return manifestOCI1FromImageIndex(ctx, sys, src, manblob) default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) } diff --git a/vendor/github.com/containers/image/image/memory.go b/vendor/github.com/containers/image/v5/image/memory.go similarity index 98% rename from vendor/github.com/containers/image/image/memory.go rename to vendor/github.com/containers/image/v5/image/memory.go index 0be69eca40..4c96b37d88 100644 --- a/vendor/github.com/containers/image/image/memory.go +++ b/vendor/github.com/containers/image/v5/image/memory.go @@ -3,9 +3,8 @@ package image import ( "context" + "github.com/containers/image/v5/types" "github.com/pkg/errors" - - "github.com/containers/image/types" ) // memoryImage is a mostly-implementation of types.Image assembled from data diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/v5/image/oci.go similarity index 85% rename from vendor/github.com/containers/image/image/oci.go rename to vendor/github.com/containers/image/v5/image/oci.go index cdff26e06a..18a38d463e 100644 --- a/vendor/github.com/containers/image/image/oci.go +++ b/vendor/github.com/containers/image/v5/image/oci.go @@ -3,12 +3,13 @@ package image import ( "context" "encoding/json" + "fmt" "io/ioutil" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/blobinfocache/none" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -187,7 +188,22 @@ func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) for idx := range layers { layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) - layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType + switch layers[idx].MediaType { + case imgspecv1.MediaTypeImageLayerNonDistributable: + layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType + case imgspecv1.MediaTypeImageLayerNonDistributableGzip: + layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip + case imgspecv1.MediaTypeImageLayerNonDistributableZstd: + return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + case imgspecv1.MediaTypeImageLayer: + layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed + case imgspecv1.MediaTypeImageLayerGzip: + layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType + case imgspecv1.MediaTypeImageLayerZstd: + return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + default: + return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) + } } // Rather than copying the ConfigBlob now, we just pass m.src to the diff --git a/vendor/github.com/containers/image/v5/image/oci_index.go b/vendor/github.com/containers/image/v5/image/oci_index.go new file mode 100644 index 0000000000..022e03aca4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/oci_index.go @@ -0,0 +1,34 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + index, err := manifest.OCI1IndexFromManifest(manblob) + if err != nil { + return nil, errors.Wrapf(err, "Error parsing OCI1 index") + } + targetManifestDigest, err := index.ChooseInstance(sys) + if err != nil { + return nil, errors.Wrapf(err, "Error choosing image instance") + } + manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) + if err != nil { + return nil, errors.Wrapf(err, "Error loading manifest for target platform") + } + + matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) + if err != nil { + return nil, errors.Wrap(err, "Error computing manifest digest") + } + if !matches { + return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) + } + + return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) +} diff --git a/vendor/github.com/containers/image/image/sourced.go b/vendor/github.com/containers/image/v5/image/sourced.go similarity index 97% rename from vendor/github.com/containers/image/image/sourced.go rename to vendor/github.com/containers/image/v5/image/sourced.go index 01cc28bbd2..3a016e1d09 100644 --- a/vendor/github.com/containers/image/image/sourced.go +++ b/vendor/github.com/containers/image/v5/image/sourced.go @@ -5,7 +5,8 @@ package image import ( "context" - "github.com/containers/image/types" + + "github.com/containers/image/v5/types" ) // imageCloser implements types.ImageCloser, perhaps allowing simple users @@ -99,5 +100,5 @@ func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { } func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return i.UnparsedImage.src.LayerInfosForCopy(ctx) + return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest) } diff --git a/vendor/github.com/containers/image/image/unparsed.go b/vendor/github.com/containers/image/v5/image/unparsed.go similarity index 96% rename from vendor/github.com/containers/image/image/unparsed.go rename to vendor/github.com/containers/image/v5/image/unparsed.go index 2c9280d35e..4e3028d855 100644 --- a/vendor/github.com/containers/image/image/unparsed.go +++ b/vendor/github.com/containers/image/v5/image/unparsed.go @@ -3,9 +3,9 @@ package image import ( "context" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/image/pkg/keyctl/key.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go similarity index 83% rename from vendor/github.com/containers/image/pkg/keyctl/key.go rename to vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go index e4396a9df7..88e123cdd1 100644 --- a/vendor/github.com/containers/image/pkg/keyctl/key.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go @@ -62,3 +62,12 @@ func (k *Key) Unlink() error { _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0) return err } + +// Describe returns a string describing the attributes of a specified key +func (k *Key) Describe() (string, error) { + keyAttr, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(k.id)) + if err != nil { + return "", err + } + return keyAttr, nil +} diff --git a/vendor/github.com/containers/image/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go similarity index 69% rename from vendor/github.com/containers/image/pkg/keyctl/keyring.go rename to vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go index 6e029c9235..4bf1701562 100644 --- a/vendor/github.com/containers/image/pkg/keyctl/keyring.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go @@ -11,6 +11,8 @@ package keyctl import ( + "unsafe" + "golang.org/x/sys/unix" ) @@ -77,3 +79,42 @@ func Link(parent Keyring, child ID) error { _, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0) return err } + +// ReadUserKeyring reads user keyring and returns slice of key with id(key_serial_t) representing the IDs of all the keys that are linked to it +func ReadUserKeyring() ([]*Key, error) { + var ( + b []byte + err error + sizeRead int + ) + krSize := 4 + size := krSize + b = make([]byte, size) + sizeRead = size + 1 + for sizeRead > size { + r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, unix.KEY_SPEC_USER_KEYRING, b, size) + if err != nil { + return nil, err + } + + if sizeRead = int(r1); sizeRead > size { + b = make([]byte, sizeRead) + size = sizeRead + sizeRead = size + 1 + } else { + krSize = sizeRead + } + } + keyIDs := getKeyIDsFromByte(b[:krSize]) + return keyIDs, err +} + +func getKeyIDsFromByte(byteKeyIDs []byte) []*Key { + idSize := 4 + var keys []*Key + for idx := 0; idx+idSize <= len(byteKeyIDs); idx = idx + idSize { + tempID := *(*int32)(unsafe.Pointer(&byteKeyIDs[idx])) + keys = append(keys, &Key{id: keyID(tempID)}) + } + return keys +} diff --git a/vendor/github.com/containers/image/pkg/keyctl/perm.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go similarity index 100% rename from vendor/github.com/containers/image/pkg/keyctl/perm.go rename to vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go diff --git a/vendor/github.com/containers/image/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go similarity index 100% rename from vendor/github.com/containers/image/pkg/keyctl/sys_linux.go rename to vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go diff --git a/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go similarity index 100% rename from vendor/github.com/containers/image/internal/tmpdir/tmpdir.go rename to vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go new file mode 100644 index 0000000000..58527d7137 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -0,0 +1,316 @@ +package manifest + +import ( + "encoding/json" + "regexp" + "strings" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/docker/docker/api/types/versions" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. +type Schema1FSLayers struct { + BlobSum digest.Digest `json:"blobSum"` +} + +// Schema1History is an entry of the "history" array in docker/distribution schema 1. +type Schema1History struct { + V1Compatibility string `json:"v1Compatibility"` +} + +// Schema1 is a manifest in docker/distribution schema 1. +type Schema1 struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []Schema1FSLayers `json:"fsLayers"` + History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility! + ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image) + SchemaVersion int `json:"schemaVersion"` +} + +type schema1V1CompatibilityContainerConfig struct { + Cmd []string +} + +// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. +type Schema1V1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. +// (NOTE: The instance is not necessary a literal representation of the original blob, +// layers with duplicate IDs are eliminated.) +func Schema1FromManifest(manifest []byte) (*Schema1, error) { + s1 := Schema1{} + if err := json.Unmarshal(manifest, &s1); err != nil { + return nil, err + } + if s1.SchemaVersion != 1 { + return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) + } + if err := s1.initialize(); err != nil { + return nil, err + } + if err := s1.fixManifestLayers(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. +func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) { + var name, tag string + if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. + name = reference.Path(ref) + if tagged, ok := ref.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + } + s1 := Schema1{ + Name: name, + Tag: tag, + Architecture: architecture, + FSLayers: fsLayers, + History: history, + SchemaVersion: 1, + } + if err := s1.initialize(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1Clone creates a copy of the supplied Schema1 manifest. +func Schema1Clone(src *Schema1) *Schema1 { + copy := *src + return © +} + +// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest. +func (m *Schema1) initialize() error { + if len(m.FSLayers) != len(m.History) { + return errors.New("length of history not equal to number of layers") + } + if len(m.FSLayers) == 0 { + return errors.New("no FSLayers in manifest") + } + m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History)) + for i, h := range m.History { + if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil { + return errors.Wrapf(err, "Error parsing v2s1 history entry %d", i) + } + } + return nil +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{} +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema1) LayerInfos() []LayerInfo { + layers := make([]LayerInfo, len(m.FSLayers)) + for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers[(len(m.FSLayers)-1)-i] = LayerInfo{ + BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, + EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, + } + } + return layers +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. + if len(m.FSLayers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) + } + m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) + for i, info := range layerInfos { + // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, + // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. + // So, we don't bother recomputing the IDs in m.History.V1Compatibility. + m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema1) Serialize() ([]byte, error) { + // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. + unsigned, err := json.Marshal(*m) + if err != nil { + return nil, err + } + return AddDummyV2S1Signature(unsigned) +} + +// fixManifestLayers, after validating the supplied manifest +// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), +// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, +// both from m.History and m.FSLayers). +// Note that even after this succeeds, m.FSLayers may contain duplicate entries +// (for Dockerfile operations which change the configuration but not the filesystem). +func (m *Schema1) fixManifestLayers() error { + // m.initialize() has verified that len(m.FSLayers) == len(m.History) + for _, compat := range m.ExtractedV1Compatibility { + if err := validateV1ID(compat.ID); err != nil { + return err + } + } + if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" { + return errors.New("Invalid parent ID in the base layer of the image") + } + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + var lastID string + for _, img := range m.ExtractedV1Compatibility { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + // backwards loop so that we keep the remaining indexes after removing items + for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- { + if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...) + } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { + return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) + } + } + return nil +} + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func validateV1ID(id string) error { + if ok := validHex.MatchString(id); !ok { + return errors.Errorf("image ID %q is invalid", id) + } + return nil +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + s1 := &Schema2V1Image{} + if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: m.Tag, + Created: &s1.Created, + DockerVersion: s1.DockerVersion, + Architecture: s1.Architecture, + Os: s1.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + } + if s1.Config != nil { + i.Labels = s1.Config.Labels + i.Env = s1.Config.Env + } + return i, nil +} + +// ToSchema2Config builds a schema2-style configuration blob using the supplied diffIDs. +func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { + // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields + // that aren't directly comparable using info from the manifest. + if len(m.History) == 0 { + return nil, errors.New("image has no layers") + } + s1 := Schema2V1Image{} + config := []byte(m.History[0].V1Compatibility) + err := json.Unmarshal(config, &s1) + if err != nil { + return nil, errors.Wrapf(err, "error decoding configuration") + } + // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, + // adding some fields that aren't "omitempty". + if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") { + config, err = json.Marshal(&s1) + if err != nil { + return nil, errors.Wrapf(err, "error re-encoding compat image config %#v", s1) + } + } + // Build the history. + convertedHistory := []Schema2History{} + for _, compat := range m.ExtractedV1Compatibility { + hitem := Schema2History{ + Created: compat.Created, + CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), + Author: compat.Author, + Comment: compat.Comment, + EmptyLayer: compat.ThrowAway, + } + convertedHistory = append([]Schema2History{hitem}, convertedHistory...) + } + // Build the rootfs information. We need the decompressed sums that we've been + // calculating to fill in the DiffIDs. It's expected (but not enforced by us) + // that the number of diffIDs corresponds to the number of non-EmptyLayer + // entries in the history. + rootFS := &Schema2RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + // And now for some raw manipulation. + raw := make(map[string]*json.RawMessage) + err = json.Unmarshal(config, &raw) + if err != nil { + return nil, errors.Wrapf(err, "error re-decoding compat image config %#v", s1) + } + // Drop some fields. + delete(raw, "id") + delete(raw, "parent") + delete(raw, "parent_id") + delete(raw, "layer_id") + delete(raw, "throwaway") + delete(raw, "Size") + // Add the history and rootfs information. + rootfs, err := json.Marshal(rootFS) + if err != nil { + return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err) + } + rawRootfs := json.RawMessage(rootfs) + raw["rootfs"] = &rawRootfs + history, err := json.Marshal(convertedHistory) + if err != nil { + return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err) + } + rawHistory := json.RawMessage(history) + raw["history"] = &rawHistory + // Encode the result. + config, err = json.Marshal(raw) + if err != nil { + return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s1, err) + } + return config, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { + image, err := m.ToSchema2Config(diffIDs) + if err != nil { + return "", err + } + return digest.FromBytes(image).Hex(), nil +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go new file mode 100644 index 0000000000..d768d6e114 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -0,0 +1,349 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/pkg/strslice" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. +type Schema2Descriptor struct { + MediaType string `json:"mediaType"` + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` + URLs []string `json:"urls,omitempty"` +} + +// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor. +func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + MediaType: desc.MediaType, + } +} + +// Schema2 is a manifest in docker/distribution schema 2. +type Schema2 struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + ConfigDescriptor Schema2Descriptor `json:"config"` + LayersDescriptors []Schema2Descriptor `json:"layers"` +} + +// Schema2Port is a Port, a string containing port number and protocol in the +// format "80/tcp", from docker/go-connections/nat. +type Schema2Port string + +// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from +// docker/go-connections/nat. +type Schema2PortSet map[Schema2Port]struct{} + +// Schema2HealthConfig is a HealthConfig, which holds configuration settings +// for the HEALTHCHECK feature, from docker/docker/api/types/container. +type Schema2HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Schema2Config is a Config in docker/docker/api/types/container. +type Schema2Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// Schema2V1Image is a V1Image in docker/docker/image. +type Schema2V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig Schema2Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *Schema2Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. +type Schema2RootFS struct { + Type string `json:"type"` + DiffIDs []digest.Digest `json:"diff_ids,omitempty"` +} + +// Schema2History stores build commands that were used to create an image, from docker/docker/image. +type Schema2History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Schema2Image is an Image in docker/docker/image. +type Schema2Image struct { + Schema2V1Image + Parent digest.Digest `json:"parent,omitempty"` + RootFS *Schema2RootFS `json:"rootfs,omitempty"` + History []Schema2History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. +func Schema2FromManifest(manifest []byte) (*Schema2, error) { + s2 := Schema2{} + if err := json.Unmarshal(manifest, &s2); err != nil { + return nil, err + } + // Check manifest's and layers' media types. + if err := SupportedSchema2MediaType(s2.MediaType); err != nil { + return nil, err + } + for _, layer := range s2.LayersDescriptors { + if err := SupportedSchema2MediaType(layer.MediaType); err != nil { + return nil, err + } + } + return &s2, nil +} + +// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. +func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { + return &Schema2{ + SchemaVersion: 2, + MediaType: DockerV2Schema2MediaType, + ConfigDescriptor: config, + LayersDescriptors: layers, + } +} + +// Schema2Clone creates a copy of the supplied Schema2 manifest. +func Schema2Clone(src *Schema2) *Schema2 { + copy := *src + return © +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema2) ConfigInfo() types.BlobInfo { + return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor) +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema2) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} + for _, layer := range m.LayersDescriptors { + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromSchema2Descriptor(layer), + EmptyLayer: false, + }) + } + return blobs +} + +// isSchema2ForeignLayer is a convenience wrapper to check if a given mime type +// is a compressed or decompressed schema 2 foreign layer. +func isSchema2ForeignLayer(mimeType string) bool { + switch mimeType { + case DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip: + return true + default: + return false + } +} + +// isSchema2Layer is a convenience wrapper to check if a given mime type is a +// compressed or decompressed schema 2 layer. +func isSchema2Layer(mimeType string) bool { + switch mimeType { + case DockerV2SchemaLayerMediaTypeUncompressed, DockerV2Schema2LayerMediaType: + return true + default: + return false + } +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.LayersDescriptors) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) + } + original := m.LayersDescriptors + m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) + for i, info := range layerInfos { + // First make sure we support the media type of the original layer. + if err := SupportedSchema2MediaType(original[i].MediaType); err != nil { + return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType) + } + + // Set the correct media types based on the specified compression + // operation, the desired compression algorithm AND the original media + // type. + // + // Note that manifests in containers-storage might be reporting the + // wrong media type since the original manifests are stored while layers + // are decompressed in storage. Hence, we need to consider the case + // that an already {de}compressed layer should be {de}compressed, which + // is being addressed in `isSchema2{Foreign}Layer`. + switch info.CompressionOperation { + case types.PreserveOriginal: + // Keep the original media type. + m.LayersDescriptors[i].MediaType = original[i].MediaType + + case types.Decompress: + // Decompress the original media type and check if it was + // non-distributable one or not. + mimeType := original[i].MediaType + switch { + case isSchema2ForeignLayer(mimeType): + m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaType + case isSchema2Layer(mimeType): + m.LayersDescriptors[i].MediaType = DockerV2SchemaLayerMediaTypeUncompressed + default: + return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", original[i].MediaType) + } + + case types.Compress: + if info.CompressionAlgorithm == nil { + logrus.Debugf("Preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest) + m.LayersDescriptors[i].MediaType = original[i].MediaType + break + } + // Compress the original media type and set the new one based on + // that type (distributable or not) and the specified compression + // algorithm. Throw an error if the algorithm is not supported. + switch info.CompressionAlgorithm.Name() { + case compression.Gzip.Name(): + mimeType := original[i].MediaType + switch { + case isSchema2ForeignLayer(mimeType): + m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaTypeGzip + case isSchema2Layer(mimeType): + m.LayersDescriptors[i].MediaType = DockerV2Schema2LayerMediaType + default: + return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType) + } + case compression.Zstd.Name(): + return fmt.Errorf("Error preparing updated manifest: zstd compression is not supported for docker images") + default: + return fmt.Errorf("Error preparing updated manifest: unknown compression algorithm %q for layer %q", info.CompressionAlgorithm.Name(), info.Digest) + } + + default: + return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest) + } + m.LayersDescriptors[i].Digest = info.Digest + m.LayersDescriptors[i].Size = info.Size + m.LayersDescriptors[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema2) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + s2 := &Schema2Image{} + if err := json.Unmarshal(config, s2); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: "", + Created: &s2.Created, + DockerVersion: s2.DockerVersion, + Architecture: s2.Architecture, + Os: s2.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + } + if s2.Config != nil { + i.Labels = s2.Config.Labels + i.Env = s2.Config.Env + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema2) ImageID([]digest.Digest) (string, error) { + if err := m.ConfigDescriptor.Digest.Validate(); err != nil { + return "", err + } + return m.ConfigDescriptor.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go new file mode 100644 index 0000000000..453976c487 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go @@ -0,0 +1,216 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "runtime" + + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// Schema2PlatformSpec describes the platform which a particular manifest is +// specialized for. +type Schema2PlatformSpec struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` // removed in OCI +} + +// Schema2ManifestDescriptor references a platform-specific manifest. +type Schema2ManifestDescriptor struct { + Schema2Descriptor + Platform Schema2PlatformSpec `json:"platform"` +} + +// Schema2List is a list of platform-specific manifests. +type Schema2List struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Manifests []Schema2ManifestDescriptor `json:"manifests"` +} + +// MIMEType returns the MIME type of this particular manifest list. +func (list *Schema2List) MIMEType() string { + return list.MediaType +} + +// Instances returns a slice of digests of the manifests that this list knows of. +func (list *Schema2List) Instances() []digest.Digest { + results := make([]digest.Digest, len(list.Manifests)) + for i, m := range list.Manifests { + results[i] = m.Digest + } + return results +} + +// Instance returns the ListUpdate of a particular instance in the list. +func (list *Schema2List) Instance(instanceDigest digest.Digest) (ListUpdate, error) { + for _, manifest := range list.Manifests { + if manifest.Digest == instanceDigest { + return ListUpdate{ + Digest: manifest.Digest, + Size: manifest.Size, + MediaType: manifest.MediaType, + }, nil + } + } + return ListUpdate{}, errors.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest) +} + +// UpdateInstances updates the sizes, digests, and media types of the manifests +// which the list catalogs. +func (list *Schema2List) UpdateInstances(updates []ListUpdate) error { + if len(updates) != len(list.Manifests) { + return errors.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates)) + } + for i := range updates { + if err := updates[i].Digest.Validate(); err != nil { + return errors.Wrapf(err, "update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest", i+1, len(updates)) + } + list.Manifests[i].Digest = updates[i].Digest + if updates[i].Size < 0 { + return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + } + list.Manifests[i].Size = updates[i].Size + if updates[i].MediaType == "" { + return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType) + } + if err := SupportedSchema2MediaType(updates[i].MediaType); err != nil && SupportedOCI1MediaType(updates[i].MediaType) != nil { + return errors.Wrapf(err, "update %d of %d passed to Schema2List.UpdateInstances had an unsupported media type (was %q): %q", i+1, len(updates), list.Manifests[i].MediaType, updates[i].MediaType) + } + list.Manifests[i].MediaType = updates[i].MediaType + } + return nil +} + +// ChooseInstance parses blob as a schema2 manifest list, and returns the digest +// of the image which is appropriate for the current environment. +func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { + wantedArch := runtime.GOARCH + if ctx != nil && ctx.ArchitectureChoice != "" { + wantedArch = ctx.ArchitectureChoice + } + wantedOS := runtime.GOOS + if ctx != nil && ctx.OSChoice != "" { + wantedOS = ctx.OSChoice + } + + for _, d := range list.Manifests { + if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { + return d.Digest, nil + } + } + return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS) +} + +// Serialize returns the list in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (list *Schema2List) Serialize() ([]byte, error) { + buf, err := json.Marshal(list) + if err != nil { + return nil, errors.Wrapf(err, "error marshaling Schema2List %#v", list) + } + return buf, nil +} + +// Schema2ListFromComponents creates a Schema2 manifest list instance from the +// supplied data. +func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List { + list := Schema2List{ + SchemaVersion: 2, + MediaType: DockerV2ListMediaType, + Manifests: make([]Schema2ManifestDescriptor, len(components)), + } + for i, component := range components { + m := Schema2ManifestDescriptor{ + Schema2Descriptor{ + MediaType: component.MediaType, + Size: component.Size, + Digest: component.Digest, + URLs: dupStringSlice(component.URLs), + }, + Schema2PlatformSpec{ + Architecture: component.Platform.Architecture, + OS: component.Platform.OS, + OSVersion: component.Platform.OSVersion, + OSFeatures: dupStringSlice(component.Platform.OSFeatures), + Variant: component.Platform.Variant, + Features: dupStringSlice(component.Platform.Features), + }, + } + list.Manifests[i] = m + } + return &list +} + +// Schema2ListClone creates a deep copy of the passed-in list. +func Schema2ListClone(list *Schema2List) *Schema2List { + return Schema2ListFromComponents(list.Manifests) +} + +// ToOCI1Index returns the list encoded as an OCI1 index. +func (list *Schema2List) ToOCI1Index() (*OCI1Index, error) { + components := make([]imgspecv1.Descriptor, 0, len(list.Manifests)) + for _, manifest := range list.Manifests { + converted := imgspecv1.Descriptor{ + MediaType: manifest.MediaType, + Size: manifest.Size, + Digest: manifest.Digest, + URLs: dupStringSlice(manifest.URLs), + Platform: &imgspecv1.Platform{ + OS: manifest.Platform.OS, + Architecture: manifest.Platform.Architecture, + OSFeatures: dupStringSlice(manifest.Platform.OSFeatures), + OSVersion: manifest.Platform.OSVersion, + Variant: manifest.Platform.Variant, + }, + } + components = append(components, converted) + } + oci := OCI1IndexFromComponents(components, nil) + return oci, nil +} + +// ToSchema2List returns the list encoded as a Schema2 list. +func (list *Schema2List) ToSchema2List() (*Schema2List, error) { + return Schema2ListClone(list), nil +} + +// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled +// JSON, presumably generated by encoding a Schema2 manifest list. +func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) { + list := Schema2List{ + Manifests: []Schema2ManifestDescriptor{}, + } + if err := json.Unmarshal(manifest, &list); err != nil { + return nil, errors.Wrapf(err, "error unmarshaling Schema2List %q", string(manifest)) + } + return &list, nil +} + +// Clone returns a deep copy of this list and its contents. +func (list *Schema2List) Clone() List { + return Schema2ListClone(list) +} + +// ConvertToMIMEType converts the passed-in manifest list to a manifest +// list of the specified type. +func (list *Schema2List) ConvertToMIMEType(manifestMIMEType string) (List, error) { + switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { + case DockerV2ListMediaType: + return list.Clone(), nil + case imgspecv1.MediaTypeImageIndex: + return list.ToOCI1Index() + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType) + default: + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType) + } +} diff --git a/vendor/github.com/containers/image/v5/manifest/list.go b/vendor/github.com/containers/image/v5/manifest/list.go new file mode 100644 index 0000000000..6d10430fd7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/list.go @@ -0,0 +1,106 @@ +package manifest + +import ( + "fmt" + + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + // SupportedListMIMETypes is a list of the manifest list types that we know how to + // read/manipulate/write. + SupportedListMIMETypes = []string{ + DockerV2ListMediaType, + imgspecv1.MediaTypeImageIndex, + } +) + +// List is an interface for parsing, modifying lists of image manifests. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members +// directly. +type List interface { + // MIMEType returns the MIME type of this particular manifest list. + MIMEType() string + + // Instances returns a list of the manifests that this list knows of, other than its own. + Instances() []digest.Digest + + // Update information about the list's instances. The length of the passed-in slice must + // match the length of the list of instances which the list already contains, and every field + // must be specified. + UpdateInstances([]ListUpdate) error + + // Instance returns the size and MIME type of a particular instance in the list. + Instance(digest.Digest) (ListUpdate, error) + + // ChooseInstance selects which manifest is most appropriate for the platform described by the + // SystemContext, or for the current platform if the SystemContext doesn't specify any details. + ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) + + // Serialize returns the list in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded + // from, even if no modifications were made! + Serialize() ([]byte, error) + + // ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error. + ConvertToMIMEType(mimeType string) (List, error) + + // Clone returns a deep copy of this list and its contents. + Clone() List +} + +// ListUpdate includes the fields which a List's UpdateInstances() method will modify. +type ListUpdate struct { + Digest digest.Digest + Size int64 + MediaType string +} + +// dupStringSlice returns a deep copy of a slice of strings, or nil if the +// source slice is empty. +func dupStringSlice(list []string) []string { + if len(list) == 0 { + return nil + } + dup := make([]string, len(list)) + for i := range list { + dup[i] = list[i] + } + return dup +} + +// dupStringStringMap returns a deep copy of a map[string]string, or nil if the +// passed-in map is nil or has no keys. +func dupStringStringMap(m map[string]string) map[string]string { + if len(m) == 0 { + return nil + } + result := make(map[string]string) + for k, v := range m { + result[k] = v + } + return result +} + +// ListFromBlob parses a list of manifests. +func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) { + normalized := NormalizedMIMEType(manifestMIMEType) + switch normalized { + case DockerV2ListMediaType: + return Schema2ListFromManifest(manifest) + case imgspecv1.MediaTypeImageIndex: + return OCI1IndexFromManifest(manifest) + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Treating single images as manifest lists is not implemented") + } + return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized) +} + +// ConvertListToMIMEType converts the passed-in manifest list to a manifest +// list of the specified type. +func ConvertListToMIMEType(list List, manifestMIMEType string) (List, error) { + return list.ConvertToMIMEType(manifestMIMEType) +} diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go new file mode 100644 index 0000000000..5b4d341d84 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -0,0 +1,263 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + "github.com/containers/image/v5/types" + "github.com/containers/libtrust" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// FIXME: Should we just use docker/distribution and docker/docker implementations directly? + +// FIXME(runcom, mitr): should we have a mediatype pkg?? +const ( + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 + DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 + DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" + // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. + DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" + // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. + DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. + DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar" + // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list + DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzippped schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" +) + +// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type. +func SupportedSchema2MediaType(m string) error { + switch m { + case DockerV2ListMediaType, DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema2ConfigMediaType, DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip, DockerV2Schema2LayerMediaType, DockerV2Schema2MediaType, DockerV2SchemaLayerMediaTypeUncompressed: + return nil + default: + return fmt.Errorf("unsupported docker v2s2 media type: %q", m) + } +} + +// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource +// should request from the backend unless directed otherwise. +var DefaultRequestedManifestMIMETypes = []string{ + imgspecv1.MediaTypeImageManifest, + DockerV2Schema2MediaType, + DockerV2Schema1SignedMediaType, + DockerV2Schema1MediaType, + DockerV2ListMediaType, + imgspecv1.MediaTypeImageIndex, +} + +// Manifest is an interface for parsing, modifying image manifests in isolation. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members +// directly. +// +// See types.Image for functionality not limited to manifests, including format conversions and config parsing. +// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. +type Manifest interface { + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + ConfigInfo() types.BlobInfo + // LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []LayerInfo + // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) + UpdateLayerInfos(layerInfos []types.BlobInfo) error + + // ImageID computes an ID which can uniquely identify this image by its contents, irrespective + // of which (of possibly more than one simultaneously valid) reference was used to locate the + // image, and unchanged by whether or how the layers are compressed. The result takes the form + // of the hexadecimal portion of a digest.Digest. + ImageID(diffIDs []digest.Digest) (string, error) + + // Inspect returns various information for (skopeo inspect) parsed from the manifest, + // incorporating information from a configuration blob returned by configGetter, if + // the underlying image format is expected to include a configuration blob. + Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) + + // Serialize returns the manifest in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! + Serialize() ([]byte, error) +} + +// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos. +type LayerInfo struct { + types.BlobInfo + EmptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. +} + +// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. +// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, +// but we may not have such metadata available (e.g. when the manifest is a local file). +func GuessMIMEType(manifest []byte) string { + // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. + // Also docker/distribution/manifest.Versioned. + meta := struct { + MediaType string `json:"mediaType"` + SchemaVersion int `json:"schemaVersion"` + Signatures interface{} `json:"signatures"` + }{} + if err := json.Unmarshal(manifest, &meta); err != nil { + return "" + } + + switch meta.MediaType { + case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type. + return meta.MediaType + } + // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. + switch meta.SchemaVersion { + case 1: + if meta.Signatures != nil { + return DockerV2Schema1SignedMediaType + } + return DockerV2Schema1MediaType + case 2: + // best effort to understand if this is an OCI image since mediaType + // isn't in the manifest for OCI anymore + // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. + ociMan := struct { + Config struct { + MediaType string `json:"mediaType"` + } `json:"config"` + }{} + if err := json.Unmarshal(manifest, &ociMan); err != nil { + return "" + } + if ociMan.Config.MediaType == imgspecv1.MediaTypeImageConfig { + return imgspecv1.MediaTypeImageManifest + } + ociIndex := struct { + Manifests []imgspecv1.Descriptor `json:"manifests"` + }{} + if err := json.Unmarshal(manifest, &ociIndex); err != nil { + return "" + } + if len(ociIndex.Manifests) != 0 { + if ociMan.Config.MediaType == "" { + return imgspecv1.MediaTypeImageIndex + } + return ociMan.Config.MediaType + } + return DockerV2Schema2MediaType + } + return "" +} + +// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. +func Digest(manifest []byte) (digest.Digest, error) { + if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { + sig, err := libtrust.ParsePrettySignature(manifest, "signatures") + if err != nil { + return "", err + } + manifest, err = sig.Payload() + if err != nil { + // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string + // that libtrust itself has josebase64UrlEncode()d + return "", err + } + } + + return digest.FromBytes(manifest), nil +} + +// MatchesDigest returns true iff the manifest matches expectedDigest. +// Error may be set if this returns false. +// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, +// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. +func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { + // This should eventually support various digest types. + actualDigest, err := Digest(manifest) + if err != nil { + return false, err + } + return expectedDigest == actualDigest, nil +} + +// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. +// This is useful to make the manifest acceptable to a Docker Registry (even though nothing needs or wants the JWS signature). +func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err // Coverage: This can fail only if rand.Reader fails. + } + + js, err := libtrust.NewJSONSignature(manifest) + if err != nil { + return nil, err + } + if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails. + return nil, err + } + return js.PrettySignature("signatures") +} + +// MIMETypeIsMultiImage returns true if mimeType is a list of images +func MIMETypeIsMultiImage(mimeType string) bool { + return mimeType == DockerV2ListMediaType || mimeType == imgspecv1.MediaTypeImageIndex +} + +// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, +// centralizing various workarounds. +func NormalizedMIMEType(input string) string { + switch input { + // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . + // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might + // need to happen within the ImageSource. + case "application/json": + return DockerV2Schema1SignedMediaType + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, + DockerV2Schema2MediaType, + DockerV2ListMediaType: + return input + default: + // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time + // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 + // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 + // + // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. + // This makes no real sense, but it happens + // because requests for manifests are + // redirected to a content distribution + // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 + return DockerV2Schema1SignedMediaType + } +} + +// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type +func FromBlob(manblob []byte, mt string) (Manifest, error) { + nmt := NormalizedMIMEType(mt) + switch nmt { + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: + return Schema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return OCI1FromManifest(manblob) + case DockerV2Schema2MediaType: + return Schema2FromManifest(manblob) + case DockerV2ListMediaType, imgspecv1.MediaTypeImageIndex: + return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") + } + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt) +} + +// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() +// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. +func layerInfosToStrings(infos []LayerInfo) []string { + layers := make([]string, len(infos)) + for i, info := range infos { + layers[i] = info.Digest.String() + } + return layers +} diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go new file mode 100644 index 0000000000..46c551b188 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -0,0 +1,243 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. +func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + Annotations: desc.Annotations, + MediaType: desc.MediaType, + } +} + +// OCI1 is a manifest.Manifest implementation for OCI images. +// The underlying data from imgspecv1.Manifest is also available. +type OCI1 struct { + imgspecv1.Manifest +} + +// SupportedOCI1MediaType checks if the specified string is a supported OCI1 media type. +func SupportedOCI1MediaType(m string) error { + switch m { + case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader: + return nil + default: + return fmt.Errorf("unsupported OCIv1 media type: %q", m) + } +} + +// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. +func OCI1FromManifest(manifest []byte) (*OCI1, error) { + oci1 := OCI1{} + if err := json.Unmarshal(manifest, &oci1); err != nil { + return nil, err + } + // Check manifest's and layers' media types. + if err := SupportedOCI1MediaType(oci1.Config.MediaType); err != nil { + return nil, err + } + for _, layer := range oci1.Layers { + if err := SupportedOCI1MediaType(layer.MediaType); err != nil { + return nil, err + } + } + return &oci1, nil +} + +// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. +func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { + return &OCI1{ + imgspecv1.Manifest{ + Versioned: specs.Versioned{SchemaVersion: 2}, + Config: config, + Layers: layers, + }, + } +} + +// OCI1Clone creates a copy of the supplied OCI1 manifest. +func OCI1Clone(src *OCI1) *OCI1 { + return &OCI1{ + Manifest: src.Manifest, + } +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *OCI1) ConfigInfo() types.BlobInfo { + return BlobInfoFromOCI1Descriptor(m.Config) +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *OCI1) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} + for _, layer := range m.Layers { + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromOCI1Descriptor(layer), + EmptyLayer: false, + }) + } + return blobs +} + +// isOCI1NonDistributableLayer is a convenience wrapper to check if a given mime +// type is a compressed or decompressed OCI v1 non-distributable layer. +func isOCI1NonDistributableLayer(mimeType string) bool { + switch mimeType { + case imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd: + return true + default: + return false + } +} + +// isOCI1Layer is a convenience wrapper to check if a given mime type is a +// compressed or decompressed OCI v1 layer. +func isOCI1Layer(mimeType string) bool { + switch mimeType { + case imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd: + return true + default: + return false + } +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.Layers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) + } + original := m.Layers + m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) + for i, info := range layerInfos { + // First make sure we support the media type of the original layer. + if err := SupportedOCI1MediaType(original[i].MediaType); err != nil { + return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType) + } + + // Set the correct media types based on the specified compression + // operation, the desired compression algorithm AND the original media + // type. + // + // Note that manifests in containers-storage might be reporting the + // wrong media type since the original manifests are stored while layers + // are decompressed in storage. Hence, we need to consider the case + // that an already {de}compressed layer should be {de}compressed, which + // is being addressed in `isSchema2{Foreign}Layer`. + switch info.CompressionOperation { + case types.PreserveOriginal: + // Keep the original media type. + m.Layers[i].MediaType = original[i].MediaType + + case types.Decompress: + // Decompress the original media type and check if it was + // non-distributable one or not. + mimeType := original[i].MediaType + switch { + case isOCI1NonDistributableLayer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable + case isOCI1Layer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayer + default: + return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", original[i].MediaType) + } + + case types.Compress: + if info.CompressionAlgorithm == nil { + logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest) + m.Layers[i].MediaType = original[i].MediaType + break + } + // Compress the original media type and set the new one based on + // that type (distributable or not) and the specified compression + // algorithm. Throw an error if the algorithm is not supported. + mimeType := original[i].MediaType + switch info.CompressionAlgorithm.Name() { + case compression.Gzip.Name(): + switch { + case isOCI1NonDistributableLayer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip + case isOCI1Layer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerGzip + default: + return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType) + } + + case compression.Zstd.Name(): + switch { + case isOCI1NonDistributableLayer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableZstd + case isOCI1Layer(mimeType): + m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerZstd + default: + return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType) + } + + default: + return fmt.Errorf("Error preparing updated manifest: unknown compression algorithm %q for layer %q", info.CompressionAlgorithm.Name(), info.Digest) + } + + default: + return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest) + } + m.Layers[i].Digest = info.Digest + m.Layers[i].Size = info.Size + m.Layers[i].Annotations = info.Annotations + m.Layers[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *OCI1) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + v1 := &imgspecv1.Image{} + if err := json.Unmarshal(config, v1); err != nil { + return nil, err + } + d1 := &Schema2V1Image{} + json.Unmarshal(config, d1) + i := &types.ImageInspectInfo{ + Tag: "", + Created: v1.Created, + DockerVersion: d1.DockerVersion, + Labels: v1.Config.Labels, + Architecture: v1.Architecture, + Os: v1.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + Env: d1.Config.Env, + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *OCI1) ImageID([]digest.Digest) (string, error) { + if err := m.Config.Digest.Validate(); err != nil { + return "", err + } + return m.Config.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go new file mode 100644 index 0000000000..816503ce5e --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go @@ -0,0 +1,221 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "runtime" + + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// OCI1Index is just an alias for the OCI index type, but one which we can +// provide methods for. +type OCI1Index struct { + imgspecv1.Index +} + +// MIMEType returns the MIME type of this particular manifest index. +func (index *OCI1Index) MIMEType() string { + return imgspecv1.MediaTypeImageIndex +} + +// Instances returns a slice of digests of the manifests that this index knows of. +func (index *OCI1Index) Instances() []digest.Digest { + results := make([]digest.Digest, len(index.Manifests)) + for i, m := range index.Manifests { + results[i] = m.Digest + } + return results +} + +// Instance returns the ListUpdate of a particular instance in the index. +func (index *OCI1Index) Instance(instanceDigest digest.Digest) (ListUpdate, error) { + for _, manifest := range index.Manifests { + if manifest.Digest == instanceDigest { + return ListUpdate{ + Digest: manifest.Digest, + Size: manifest.Size, + MediaType: manifest.MediaType, + }, nil + } + } + return ListUpdate{}, errors.Errorf("unable to find instance %s in OCI1Index", instanceDigest) +} + +// UpdateInstances updates the sizes, digests, and media types of the manifests +// which the list catalogs. +func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error { + if len(updates) != len(index.Manifests) { + return errors.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates)) + } + for i := range updates { + if err := updates[i].Digest.Validate(); err != nil { + return errors.Wrapf(err, "update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest", i+1, len(updates)) + } + index.Manifests[i].Digest = updates[i].Digest + if updates[i].Size < 0 { + return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + } + index.Manifests[i].Size = updates[i].Size + if updates[i].MediaType == "" { + return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType) + } + if err := SupportedOCI1MediaType(updates[i].MediaType); err != nil && SupportedSchema2MediaType(updates[i].MediaType) != nil && updates[i].MediaType != imgspecv1.MediaTypeImageIndex { + return errors.Wrapf(err, "update %d of %d passed to OCI1Index.UpdateInstances had an unsupported media type (was %q): %q", i+1, len(updates), index.Manifests[i].MediaType, updates[i].MediaType) + } + index.Manifests[i].MediaType = updates[i].MediaType + } + return nil +} + +// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest +// of the image which is appropriate for the current environment. +func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { + wantedArch := runtime.GOARCH + if ctx != nil && ctx.ArchitectureChoice != "" { + wantedArch = ctx.ArchitectureChoice + } + wantedOS := runtime.GOOS + if ctx != nil && ctx.OSChoice != "" { + wantedOS = ctx.OSChoice + } + + for _, d := range index.Manifests { + if d.Platform != nil && d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { + return d.Digest, nil + } + } + for _, d := range index.Manifests { + if d.Platform == nil { + return d.Digest, nil + } + } + return "", fmt.Errorf("no image found in image index for architecture %s, OS %s", wantedArch, wantedOS) +} + +// Serialize returns the index in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (index *OCI1Index) Serialize() ([]byte, error) { + buf, err := json.Marshal(index) + if err != nil { + return nil, errors.Wrapf(err, "error marshaling OCI1Index %#v", index) + } + return buf, nil +} + +// OCI1IndexFromComponents creates an OCI1 image index instance from the +// supplied data. +func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index { + index := OCI1Index{ + imgspecv1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + Manifests: make([]imgspecv1.Descriptor, len(components)), + Annotations: dupStringStringMap(annotations), + }, + } + for i, component := range components { + var platform *imgspecv1.Platform + if component.Platform != nil { + platform = &imgspecv1.Platform{ + Architecture: component.Platform.Architecture, + OS: component.Platform.OS, + OSVersion: component.Platform.OSVersion, + OSFeatures: dupStringSlice(component.Platform.OSFeatures), + Variant: component.Platform.Variant, + } + } + m := imgspecv1.Descriptor{ + MediaType: component.MediaType, + Size: component.Size, + Digest: component.Digest, + URLs: dupStringSlice(component.URLs), + Annotations: dupStringStringMap(component.Annotations), + Platform: platform, + } + index.Manifests[i] = m + } + return &index +} + +// OCI1IndexClone creates a deep copy of the passed-in index. +func OCI1IndexClone(index *OCI1Index) *OCI1Index { + return OCI1IndexFromComponents(index.Manifests, index.Annotations) +} + +// ToOCI1Index returns the index encoded as an OCI1 index. +func (index *OCI1Index) ToOCI1Index() (*OCI1Index, error) { + return OCI1IndexClone(index), nil +} + +// ToSchema2List returns the index encoded as a Schema2 list. +func (index *OCI1Index) ToSchema2List() (*Schema2List, error) { + components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests)) + for _, manifest := range index.Manifests { + platform := manifest.Platform + if platform == nil { + platform = &imgspecv1.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + } + } + converted := Schema2ManifestDescriptor{ + Schema2Descriptor{ + MediaType: manifest.MediaType, + Size: manifest.Size, + Digest: manifest.Digest, + URLs: dupStringSlice(manifest.URLs), + }, + Schema2PlatformSpec{ + OS: platform.OS, + Architecture: platform.Architecture, + OSFeatures: dupStringSlice(platform.OSFeatures), + OSVersion: platform.OSVersion, + Variant: platform.Variant, + }, + } + components = append(components, converted) + } + s2 := Schema2ListFromComponents(components) + return s2, nil +} + +// OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled +// JSON, presumably generated by encoding a OCI1 manifest index. +func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { + index := OCI1Index{ + Index: imgspecv1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + Manifests: []imgspecv1.Descriptor{}, + Annotations: make(map[string]string), + }, + } + if err := json.Unmarshal(manifest, &index); err != nil { + return nil, errors.Wrapf(err, "error unmarshaling OCI1Index %q", string(manifest)) + } + return &index, nil +} + +// Clone returns a deep copy of this list and its contents. +func (index *OCI1Index) Clone() List { + return OCI1IndexClone(index) +} + +// ConvertToMIMEType converts the passed-in image index to a manifest list of +// the specified type. +func (index *OCI1Index) ConvertToMIMEType(manifestMIMEType string) (List, error) { + switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { + case DockerV2ListMediaType: + return index.ToSchema2List() + case imgspecv1.MediaTypeImageIndex: + return index.Clone(), nil + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType) + default: + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType) + } +} diff --git a/vendor/github.com/containers/image/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go similarity index 83% rename from vendor/github.com/containers/image/oci/archive/oci_dest.go rename to vendor/github.com/containers/image/v5/oci/archive/oci_dest.go index 9571c37e2b..164d5522d9 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -5,8 +5,9 @@ import ( "io" "os" - "github.com/containers/image/types" + "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/archive" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -105,19 +106,26 @@ func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info ty return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute) } -// PutManifest writes manifest to the destination -func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte) error { - return d.unpackedDest.PutManifest(ctx, m) +// PutManifest writes the manifest to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + return d.unpackedDest.PutManifest(ctx, m, instanceDigest) } -func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - return d.unpackedDest.PutSignatures(ctx, signatures) +// PutSignatures writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + return d.unpackedDest.PutSignatures(ctx, signatures, instanceDigest) } // Commit marks the process of storing the image as successful and asks for the image to be persisted // after the directory is made, it is tarred up into a file and the directory is deleted -func (d *ociArchiveImageDestination) Commit(ctx context.Context) error { - if err := d.unpackedDest.Commit(ctx); err != nil { +func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil { return errors.Wrapf(err, "error storing image %q", d.ref.image) } diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go similarity index 81% rename from vendor/github.com/containers/image/oci/archive/oci_src.go rename to vendor/github.com/containers/image/v5/oci/archive/oci_src.go index ca74f950bf..33a41d44b0 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go @@ -4,8 +4,8 @@ import ( "context" "io" - ocilayout "github.com/containers/image/oci/layout" - "github.com/containers/image/types" + ocilayout "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -96,7 +96,14 @@ func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDiges return s.unpackedSrc.GetSignatures(ctx, instanceDigest) } -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *ociArchiveImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *ociArchiveImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return s.unpackedSrc.LayerInfosForCopy(ctx, instanceDigest) } diff --git a/vendor/github.com/containers/image/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go similarity index 94% rename from vendor/github.com/containers/image/oci/archive/oci_transport.go rename to vendor/github.com/containers/image/v5/oci/archive/oci_transport.go index 7c1d26ba82..2d72a6fee8 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -7,14 +7,14 @@ import ( "os" "strings" - "github.com/containers/image/directory/explicitfilepath" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/internal/tmpdir" - "github.com/containers/image/oci/internal" - ocilayout "github.com/containers/image/oci/layout" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/oci/internal" + ocilayout "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/archive" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/image/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go similarity index 100% rename from vendor/github.com/containers/image/oci/internal/oci_util.go rename to vendor/github.com/containers/image/v5/oci/internal/oci_util.go diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go similarity index 78% rename from vendor/github.com/containers/image/oci/layout/oci_dest.go rename to vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index db102184db..370e8d2cd2 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -9,8 +9,8 @@ import ( "path/filepath" "runtime" - "github.com/containers/image/manifest" - "github.com/containers/image/types" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -38,6 +38,7 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (types.Imag Versioned: imgspec.Versioned{ SchemaVersion: 2, }, + Annotations: make(map[string]string), } } @@ -73,6 +74,7 @@ func (d *ociImageDestination) Close() error { func (d *ociImageDestination) SupportedManifestMIMETypes() []string { return []string{ imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, } } @@ -205,20 +207,27 @@ func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.Blo return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil } -// PutManifest writes manifest to the destination. +// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types, +// this should be either an OCI manifest (possibly converted to this format by the caller) or index, +// neither of which we'll need to modify further. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte) error { - digest, err := manifest.Digest(m) - if err != nil { - return err +func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + var digest digest.Digest + var err error + if instanceDigest != nil { + digest = *instanceDigest + } else { + digest, err = manifest.Digest(m) + if err != nil { + return err + } } - desc := imgspecv1.Descriptor{} - desc.Digest = digest - // TODO(runcom): beaware and add support for OCI manifest list - desc.MediaType = imgspecv1.MediaTypeImageManifest - desc.Size = int64(len(m)) blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) if err != nil { @@ -231,32 +240,59 @@ func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte) error { return err } - if d.ref.image != "" { - annotations := make(map[string]string) - annotations["org.opencontainers.image.ref.name"] = d.ref.image - desc.Annotations = annotations + if instanceDigest != nil { + return nil } - desc.Platform = &imgspecv1.Platform{ - Architecture: runtime.GOARCH, - OS: runtime.GOOS, + + // If we had platform information, we'd build an imgspecv1.Platform structure here. + + // Start filling out the descriptor for this entry + desc := imgspecv1.Descriptor{} + desc.Digest = digest + desc.Size = int64(len(m)) + if d.ref.image != "" { + desc.Annotations = make(map[string]string) + desc.Annotations[imgspecv1.AnnotationRefName] = d.ref.image } + + // If we knew the MIME type, we wouldn't have to guess here. + desc.MediaType = manifest.GuessMIMEType(m) + d.addManifest(&desc) return nil } func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { + // If the new entry has a name, remove any conflicting names which we already have. + if desc.Annotations != nil && desc.Annotations[imgspecv1.AnnotationRefName] != "" { + // The name is being set on a new entry, so remove any older ones that had the same name. + // We might be storing an index and all of its component images, and we'll want to attach + // the name to the last one, which is the index. + for i, manifest := range d.index.Manifests { + if manifest.Annotations[imgspecv1.AnnotationRefName] == desc.Annotations[imgspecv1.AnnotationRefName] { + delete(d.index.Manifests[i].Annotations, imgspecv1.AnnotationRefName) + break + } + } + } + // If it has the same digest as another entry in the index, we already overwrote the file, + // so just pick up the other information. for i, manifest := range d.index.Manifests { - if manifest.Annotations["org.opencontainers.image.ref.name"] == desc.Annotations["org.opencontainers.image.ref.name"] { - // TODO Should there first be a cleanup based on the descriptor we are going to replace? + if manifest.Digest == desc.Digest { + // Replace it completely. d.index.Manifests[i] = *desc return } } + // It's a new entry to be added to the index. d.index.Manifests = append(d.index.Manifests, *desc) } -func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { +// PutSignatures would add the given signatures to the oci layout (currently not supported). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { if len(signatures) != 0 { return errors.Errorf("Pushing signatures for OCI images is not supported") } @@ -267,7 +303,7 @@ func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][] // WARNING: This does not have any transactional semantics: // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *ociImageDestination) Commit(ctx context.Context) error { +func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error { if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { return err } diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go similarity index 78% rename from vendor/github.com/containers/image/oci/layout/oci_src.go rename to vendor/github.com/containers/image/v5/oci/layout/oci_src.go index cc536f69ef..f515203df7 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -8,8 +8,9 @@ import ( "os" "strconv" - "github.com/containers/image/pkg/tlsclientconfig" - "github.com/containers/image/types" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/tlsclientconfig" + "github.com/containers/image/v5/types" "github.com/docker/go-connections/tlsconfig" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -18,6 +19,7 @@ import ( type ociImageSource struct { ref ociReference + index *imgspecv1.Index descriptor imgspecv1.Descriptor client *http.Client sharedBlobDir string @@ -41,7 +43,11 @@ func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSour if err != nil { return nil, err } - d := &ociImageSource{ref: ref, descriptor: descriptor, client: client} + index, err := ref.getIndex() + if err != nil { + return nil, err + } + d := &ociImageSource{ref: ref, index: index, descriptor: descriptor, client: client} if sys != nil { // TODO(jonboulle): check dir existence? d.sharedBlobDir = sys.OCISharedBlobDirPath @@ -66,28 +72,33 @@ func (s *ociImageSource) Close() error { func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { var dig digest.Digest var mimeType string + var err error + if instanceDigest == nil { dig = digest.Digest(s.descriptor.Digest) mimeType = s.descriptor.MediaType } else { dig = *instanceDigest - // XXX: instanceDigest means that we don't immediately have the context of what - // mediaType the manifest has. In OCI this means that we don't know - // what reference it came from, so we just *assume* that its - // MediaTypeImageManifest. - // FIXME: We should actually be able to look up the manifest in the index, - // and see the MIME type there. - mimeType = imgspecv1.MediaTypeImageManifest + for _, md := range s.index.Manifests { + if md.Digest == dig { + mimeType = md.MediaType + break + } + } } manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) if err != nil { return nil, "", err } + m, err := ioutil.ReadFile(manifestPath) if err != nil { return nil, "", err } + if mimeType == "" { + mimeType = manifest.GuessMIMEType(m) + } return m, mimeType, nil } @@ -157,8 +168,15 @@ func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io return nil, 0, errWrap } -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *ociImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *ociImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { return nil, nil } diff --git a/vendor/github.com/containers/image/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go similarity index 95% rename from vendor/github.com/containers/image/oci/layout/oci_transport.go rename to vendor/github.com/containers/image/v5/oci/layout/oci_transport.go index 4e5cecff2e..c662c9a7a0 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -8,12 +8,12 @@ import ( "path/filepath" "strings" - "github.com/containers/image/directory/explicitfilepath" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/oci/internal" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/oci/internal" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -195,10 +195,10 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { } else { // if image specified, look through all manifests for a match for _, md := range index.Manifests { - if md.MediaType != imgspecv1.MediaTypeImageManifest { + if md.MediaType != imgspecv1.MediaTypeImageManifest && md.MediaType != imgspecv1.MediaTypeImageIndex { continue } - refName, ok := md.Annotations["org.opencontainers.image.ref.name"] + refName, ok := md.Annotations[imgspecv1.AnnotationRefName] if !ok { continue } diff --git a/vendor/github.com/containers/image/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go similarity index 98% rename from vendor/github.com/containers/image/openshift/openshift-copies.go rename to vendor/github.com/containers/image/v5/openshift/openshift-copies.go index 01fe71a243..f45dc24c4e 100644 --- a/vendor/github.com/containers/image/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -205,22 +205,18 @@ func (config *directClientConfig) ClientConfig() (*restConfig, error) { // only try to read the auth information if we are secure if isConfigTransportTLS(*clientConfig) { var err error - - // mergo is a first write wins for map value and a last writing wins for interface values - // NOTE: This behavior changed with https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a. - // Our mergo.Merge version is older than this change. // REMOVED: Support for interactive fallback. userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo) if err != nil { return nil, err } - mergo.Merge(clientConfig, userAuthPartialConfig) + mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig) serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) if err != nil { return nil, err } - mergo.Merge(clientConfig, serverAuthPartialConfig) + mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig) } return clientConfig, nil @@ -241,7 +237,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, conf configClientConfig.CAFile = configClusterInfo.CertificateAuthority configClientConfig.CAData = configClusterInfo.CertificateAuthorityData configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify - mergo.Merge(mergedConfig, configClientConfig) + mergo.MergeWithOverwrite(mergedConfig, configClientConfig) return mergedConfig, nil } @@ -324,7 +320,7 @@ func (config *directClientConfig) getContext() clientcmdContext { var mergedContext clientcmdContext if configContext, exists := contexts[contextName]; exists { - mergo.Merge(&mergedContext, configContext) + mergo.MergeWithOverwrite(&mergedContext, configContext) } // REMOVED: overrides support @@ -427,7 +423,7 @@ func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo { var mergedAuthInfo clientcmdAuthInfo if configAuthInfo, exists := authInfos[authInfoName]; exists { - mergo.Merge(&mergedAuthInfo, configAuthInfo) + mergo.MergeWithOverwrite(&mergedAuthInfo, configAuthInfo) } // REMOVED: overrides support @@ -440,10 +436,10 @@ func (config *directClientConfig) getCluster() clientcmdCluster { clusterInfoName := config.getClusterName() var mergedClusterInfo clientcmdCluster - mergo.Merge(&mergedClusterInfo, defaultCluster) - mergo.Merge(&mergedClusterInfo, envVarCluster) + mergo.MergeWithOverwrite(&mergedClusterInfo, defaultCluster) + mergo.MergeWithOverwrite(&mergedClusterInfo, envVarCluster) if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { - mergo.Merge(&mergedClusterInfo, configClusterInfo) + mergo.MergeWithOverwrite(&mergedClusterInfo, configClusterInfo) } // REMOVED: overrides support @@ -577,7 +573,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { // first merge all of our maps mapConfig := clientcmdNewConfig() for _, kubeconfig := range kubeconfigs { - mergo.Merge(mapConfig, kubeconfig) + mergo.MergeWithOverwrite(mapConfig, kubeconfig) } // merge all of the struct values in the reverse order so that priority is given correctly @@ -585,14 +581,14 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { nonMapConfig := clientcmdNewConfig() for i := len(kubeconfigs) - 1; i >= 0; i-- { kubeconfig := kubeconfigs[i] - mergo.Merge(nonMapConfig, kubeconfig) + mergo.MergeWithOverwrite(nonMapConfig, kubeconfig) } // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and // get the values we expect. config := clientcmdNewConfig() - mergo.Merge(config, mapConfig) - mergo.Merge(config, nonMapConfig) + mergo.MergeWithOverwrite(config, mapConfig) + mergo.MergeWithOverwrite(config, nonMapConfig) // REMOVED: Possibility to skip this. if err := resolveLocalPaths(config); err != nil { diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go similarity index 90% rename from vendor/github.com/containers/image/openshift/openshift.go rename to vendor/github.com/containers/image/v5/openshift/openshift.go index 814c3eea1d..016de48034 100644 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -12,11 +12,11 @@ import ( "net/url" "strings" - "github.com/containers/image/docker" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/containers/image/version" + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/image/v5/version" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -231,16 +231,16 @@ func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list // (e.g. if the source never returns manifest lists). func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - var imageName string + var imageStreamImageName string if instanceDigest == nil { if err := s.ensureImageIsResolved(ctx); err != nil { return nil, err } - imageName = s.imageStreamImageName + imageStreamImageName = s.imageStreamImageName } else { - imageName = instanceDigest.String() + imageStreamImageName = instanceDigest.String() } - image, err := s.client.getImage(ctx, imageName) + image, err := s.client.getImage(ctx, imageStreamImageName) if err != nil { return nil, err } @@ -253,8 +253,15 @@ func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest return sigs, nil } -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { return nil, nil } @@ -414,20 +421,28 @@ func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info typ // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte) error { - manifestDigest, err := manifest.Digest(m) - if err != nil { - return err +func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + if instanceDigest == nil { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return err + } + d.imageStreamImageName = manifestDigest.String() } - d.imageStreamImageName = manifestDigest.String() - - return d.docker.PutManifest(ctx, m) + return d.docker.PutManifest(ctx, m, instanceDigest) } -func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { - if d.imageStreamImageName == "" { - return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures") +func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + var imageStreamName string + if instanceDigest == nil { + if d.imageStreamImageName == "" { + return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures") + } + imageStreamName = d.imageStreamImageName + } else { + imageStreamName = instanceDigest.String() } + // Because image signatures are a shared resource in Atomic Registry, the default upload // always adds signatures. Eventually we should also allow removing signatures. @@ -435,7 +450,7 @@ func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signature return nil // No need to even read the old state. } - image, err := d.client.getImage(ctx, d.imageStreamImageName) + image, err := d.client.getImage(ctx, imageStreamName) if err != nil { return err } @@ -460,7 +475,7 @@ sigExists: if err != nil || n != 16 { return errors.Wrapf(err, "Error generating random signature len %d", n) } - signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes) + signatureName = fmt.Sprintf("%s@%032x", imageStreamName, randBytes) if _, ok := existingSigNames[signatureName]; !ok { break } @@ -489,8 +504,8 @@ sigExists: // WARNING: This does not have any transactional semantics: // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *openshiftImageDestination) Commit(ctx context.Context) error { - return d.docker.Commit(ctx) +func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return d.docker.Commit(ctx, unparsedToplevel) } // These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies. diff --git a/vendor/github.com/containers/image/openshift/openshift_transport.go b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go similarity index 96% rename from vendor/github.com/containers/image/openshift/openshift_transport.go rename to vendor/github.com/containers/image/v5/openshift/openshift_transport.go index b27867a0d6..6bbb43be28 100644 --- a/vendor/github.com/containers/image/openshift/openshift_transport.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go @@ -6,11 +6,11 @@ import ( "regexp" "strings" - "github.com/containers/image/docker/policyconfiguration" - "github.com/containers/image/docker/reference" - genericImage "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/policyconfiguration" + "github.com/containers/image/v5/docker/reference" + genericImage "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go similarity index 94% rename from vendor/github.com/containers/image/ostree/ostree_dest.go rename to vendor/github.com/containers/image/v5/ostree/ostree_dest.go index 06a905aed1..c442b4d2ee 100644 --- a/vendor/github.com/containers/image/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -20,8 +20,8 @@ import ( "time" "unsafe" - "github.com/containers/image/manifest" - "github.com/containers/image/types" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/archive" "github.com/klauspost/pgzip" "github.com/opencontainers/go-digest" @@ -376,10 +376,16 @@ func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types. } // PutManifest writes manifest to the destination. +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error { +func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported by "ostree:"`) + } + d.manifest = string(manifestBlob) if err := json.Unmarshal(manifestBlob, &d.schema); err != nil { @@ -400,7 +406,14 @@ func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob [ return ioutil.WriteFile(manifestPath, manifestBlob, 0644) } -func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { +// PutSignatures writes signatures to the destination. +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. +func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported by "ostree:"`) + } + path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0)) if err := ensureParentDirectoryExists(path); err != nil { return err @@ -416,7 +429,7 @@ func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [ return nil } -func (d *ostreeImageDestination) Commit(ctx context.Context) error { +func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) error { runtime.LockOSThread() defer runtime.UnlockOSThread() diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go similarity index 88% rename from vendor/github.com/containers/image/ostree/ostree_src.go rename to vendor/github.com/containers/image/v5/ostree/ostree_src.go index 43d8f6837a..4948ec6641 100644 --- a/vendor/github.com/containers/image/ostree/ostree_src.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go @@ -13,8 +13,8 @@ import ( "strings" "unsafe" - "github.com/containers/image/manifest" - "github.com/containers/image/types" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/ioutils" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" @@ -98,9 +98,11 @@ func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) { // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be non-default instances. func (s *ostreeImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { if instanceDigest != nil { - return nil, "", errors.Errorf(`Manifest lists are not supported by "ostree:"`) + return nil, "", errors.New(`Manifest lists are not supported by "ostree:"`) } if s.repo == nil { repo, err := openRepo(s.ref.repo) @@ -275,7 +277,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca // Ensure s.compressed is initialized. It is build by LayerInfosForCopy. if s.compressed == nil { - _, err := s.LayerInfosForCopy(ctx) + _, err := s.LayerInfosForCopy(ctx, nil) if err != nil { return nil, -1, err } @@ -337,9 +339,12 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca return rc, layerSize, nil } +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as there can be no secondary manifests. func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { if instanceDigest != nil { - return nil, errors.New("manifest lists are not supported by this transport") + return nil, errors.New(`Manifest lists are not supported by "ostree:"`) } lenSignatures, err := s.getLenSignatures() if err != nil { @@ -372,9 +377,18 @@ func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *d return signatures, nil } -// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of -// the image, after they've been decompressed. -func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be secondary manifests. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + if instanceDigest != nil { + return nil, errors.New(`Manifest lists are not supported by "ostree:"`) + } + updatedBlobInfos := []types.BlobInfo{} manifestBlob, manifestType, err := s.GetManifest(ctx, nil) if err != nil { diff --git a/vendor/github.com/containers/image/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go similarity index 97% rename from vendor/github.com/containers/image/ostree/ostree_transport.go rename to vendor/github.com/containers/image/v5/ostree/ostree_transport.go index 2e86623ac0..a55147b85e 100644 --- a/vendor/github.com/containers/image/ostree/ostree_transport.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go @@ -11,11 +11,11 @@ import ( "regexp" "strings" - "github.com/containers/image/directory/explicitfilepath" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go similarity index 99% rename from vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go rename to vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go index 19d0a6c80e..9c9a17a584 100644 --- a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go @@ -7,8 +7,8 @@ import ( "sync" "time" - "github.com/containers/image/pkg/blobinfocache/internal/prioritize" - "github.com/containers/image/types" + "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v5/types" bolt "github.com/etcd-io/bbolt" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go similarity index 94% rename from vendor/github.com/containers/image/pkg/blobinfocache/default.go rename to vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go index 357333215d..952bcf5a12 100644 --- a/vendor/github.com/containers/image/pkg/blobinfocache/default.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go @@ -6,9 +6,9 @@ import ( "path/filepath" "strconv" - "github.com/containers/image/pkg/blobinfocache/boltdb" - "github.com/containers/image/pkg/blobinfocache/memory" - "github.com/containers/image/types" + "github.com/containers/image/v5/pkg/blobinfocache/boltdb" + "github.com/containers/image/v5/pkg/blobinfocache/memory" + "github.com/containers/image/v5/types" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go similarity index 99% rename from vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go rename to vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go index 5479319de1..5deca4a82d 100644 --- a/vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -6,7 +6,7 @@ import ( "sort" "time" - "github.com/containers/image/types" + "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" ) diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go similarity index 98% rename from vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go rename to vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index dfb3386348..8f28c66238 100644 --- a/vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -5,8 +5,8 @@ import ( "sync" "time" - "github.com/containers/image/pkg/blobinfocache/internal/prioritize" - "github.com/containers/image/types" + "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go similarity index 98% rename from vendor/github.com/containers/image/pkg/blobinfocache/none/none.go rename to vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go index e5dca25ceb..fa1879afdb 100644 --- a/vendor/github.com/containers/image/pkg/blobinfocache/none/none.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go @@ -2,7 +2,7 @@ package none import ( - "github.com/containers/image/types" + "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" ) diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go new file mode 100644 index 0000000000..04d231c6d6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -0,0 +1,149 @@ +package compression + +import ( + "bytes" + "compress/bzip2" + "fmt" + "io" + "io/ioutil" + + "github.com/containers/image/v5/pkg/compression/internal" + "github.com/containers/image/v5/pkg/compression/types" + "github.com/klauspost/pgzip" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" +) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm = types.Algorithm + +var ( + // Gzip compression. + Gzip = internal.NewAlgorithm("gzip", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor) + // Bzip2 compression. + Bzip2 = internal.NewAlgorithm("bzip2", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor) + // Xz compression. + Xz = internal.NewAlgorithm("Xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) + // Zstd compression. + Zstd = internal.NewAlgorithm("zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) + + compressionAlgorithms = map[string]Algorithm{ + Gzip.Name(): Gzip, + Bzip2.Name(): Bzip2, + Xz.Name(): Xz, + Zstd.Name(): Zstd, + } +) + +// AlgorithmByName returns the compressor by its name +func AlgorithmByName(name string) (Algorithm, error) { + algorithm, ok := compressionAlgorithms[name] + if ok { + return algorithm, nil + } + return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name) +} + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. +func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { + return pgzip.NewReader(r) +} + +// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. +func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { + return ioutil.NopCloser(bzip2.NewReader(r)), nil +} + +// XzDecompressor is a DecompressorFunc for the xz compression algorithm. +func XzDecompressor(r io.Reader) (io.ReadCloser, error) { + r, err := xz.NewReader(r) + if err != nil { + return nil, err + } + return ioutil.NopCloser(r), nil +} + +// gzipCompressor is a CompressorFunc for the gzip compression algorithm. +func gzipCompressor(r io.Writer, level *int) (io.WriteCloser, error) { + if level != nil { + return pgzip.NewWriterLevel(r, *level) + } + return pgzip.NewWriter(r), nil +} + +// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm. +func bzip2Compressor(r io.Writer, level *int) (io.WriteCloser, error) { + return nil, fmt.Errorf("bzip2 compression not supported") +} + +// xzCompressor is a CompressorFunc for the xz compression algorithm. +func xzCompressor(r io.Writer, level *int) (io.WriteCloser, error) { + return xz.NewWriter(r) +} + +// CompressStream returns the compressor by its name +func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) { + return internal.AlgorithmCompressor(algo)(dest, level) +} + +// DetectCompressionFormat returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) { + buffer := [8]byte{} + + n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. + // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. + return Algorithm{}, nil, nil, err + } + + var retAlgo Algorithm + var decompressor DecompressorFunc + for _, algo := range compressionAlgorithms { + if bytes.HasPrefix(buffer[:n], internal.AlgorithmPrefix(algo)) { + logrus.Debugf("Detected compression format %s", algo.Name()) + retAlgo = algo + decompressor = internal.AlgorithmDecompressor(algo) + break + } + } + if decompressor == nil { + logrus.Debugf("No compression detected") + } + + return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil +} + +// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { + _, d, r, e := DetectCompressionFormat(input) + return d, r, e +} + +// AutoDecompress takes a stream and returns an uncompressed version of the +// same stream. +// The caller must call Close() on the returned stream (even if the input does not need, +// or does not even support, closing!). +func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { + decompressor, stream, err := DetectCompression(stream) + if err != nil { + return nil, false, errors.Wrapf(err, "Error detecting compression") + } + var res io.ReadCloser + if decompressor != nil { + res, err = decompressor(stream) + if err != nil { + return nil, false, errors.Wrapf(err, "Error initializing decompression") + } + } else { + res = ioutil.NopCloser(stream) + } + return res, decompressor != nil, nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go new file mode 100644 index 0000000000..6092a9517b --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go @@ -0,0 +1,57 @@ +package internal + +import "io" + +// CompressorFunc writes the compressed stream to the given writer using the specified compression level. +// The caller must call Close() on the stream (even if the input stream does not need closing!). +type CompressorFunc func(io.Writer, *int) (io.WriteCloser, error) + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc func(io.Reader) (io.ReadCloser, error) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm struct { + name string + prefix []byte + decompressor DecompressorFunc + compressor CompressorFunc +} + +// NewAlgorithm creates an Algorithm instance. +// This function exists so that Algorithm instances can only be created by code that +// is allowed to import this internal subpackage. +func NewAlgorithm(name string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { + return Algorithm{ + name: name, + prefix: prefix, + decompressor: decompressor, + compressor: compressor, + } +} + +// Name returns the name for the compression algorithm. +func (c Algorithm) Name() string { + return c.name +} + +// AlgorithmCompressor returns the compressor field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmCompressor(algo Algorithm) CompressorFunc { + return algo.compressor +} + +// AlgorithmDecompressor returns the decompressor field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmDecompressor(algo Algorithm) DecompressorFunc { + return algo.decompressor +} + +// AlgorithmPrefix returns the prefix field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmPrefix(algo Algorithm) []byte { + return algo.prefix +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/types/types.go b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go new file mode 100644 index 0000000000..f96eff2e3c --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go @@ -0,0 +1,13 @@ +package types + +import ( + "github.com/containers/image/v5/pkg/compression/internal" +) + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// Algorithm is a compression algorithm provided and supported by pkg/compression. +// It can’t be supplied from the outside. +type Algorithm = internal.Algorithm diff --git a/vendor/github.com/containers/image/v5/pkg/compression/zstd.go b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go new file mode 100644 index 0000000000..962fe96764 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go @@ -0,0 +1,59 @@ +package compression + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} + +func zstdWriterWithLevel(dest io.Writer, level int) (io.WriteCloser, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} + +// zstdCompressor is a CompressorFunc for the zstd compression algorithm. +func zstdCompressor(r io.Writer, level *int) (io.WriteCloser, error) { + if level == nil { + return zstdWriter(r) + } + return zstdWriterWithLevel(r, *level) +} + +// ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm. +func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) { + return zstdReader(r) +} diff --git a/vendor/github.com/containers/image/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go similarity index 87% rename from vendor/github.com/containers/image/pkg/docker/config/config.go rename to vendor/github.com/containers/image/v5/pkg/docker/config/config.go index eef629d5c6..b7dddd0d69 100644 --- a/vendor/github.com/containers/image/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -9,7 +9,7 @@ import ( "path/filepath" "strings" - "github.com/containers/image/types" + "github.com/containers/image/v5/types" helperclient "github.com/docker/docker-credential-helpers/client" "github.com/docker/docker-credential-helpers/credentials" "github.com/docker/docker/pkg/homedir" @@ -26,6 +26,11 @@ type dockerConfigFile struct { CredHelpers map[string]string `json:"credHelpers,omitempty"` } +type authPath struct { + path string + legacyFormat bool +} + var ( defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json") xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json") @@ -84,28 +89,28 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin } } - dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyHomePath) - var paths []string - pathToAuth, err := getPathToAuth(sys) + paths := []authPath{} + pathToAuth, lf, err := getPathToAuth(sys) if err == nil { - paths = append(paths, pathToAuth) + paths = append(paths, authPath{path: pathToAuth, legacyFormat: lf}) } else { // Error means that the path set for XDG_RUNTIME_DIR does not exist // but we don't want to completely fail in the case that the user is pulling a public image // Logging the error as a warning instead and moving on to pulling the image logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err) } - paths = append(paths, filepath.Join(homedir.Get(), dockerHomePath), dockerLegacyPath) + paths = append(paths, + authPath{path: filepath.Join(homedir.Get(), dockerHomePath), legacyFormat: false}, + authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true}) for _, path := range paths { - legacyFormat := path == dockerLegacyPath - username, password, err := findAuthentication(registry, path, legacyFormat) + username, password, err := findAuthentication(registry, path.path, path.legacyFormat) if err != nil { logrus.Debugf("Credentials not found") return "", "", err } if username != "" && password != "" { - logrus.Debugf("Returning credentials from %s", path) + logrus.Debugf("Returning credentials from %s", path.path) return username, password, nil } } @@ -142,9 +147,17 @@ func RemoveAuthentication(sys *types.SystemContext, registry string) error { }) } -// RemoveAllAuthentication deletes all the credentials stored in auth.json +// RemoveAllAuthentication deletes all the credentials stored in auth.json and kernel keyring func RemoveAllAuthentication(sys *types.SystemContext) error { return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + if enableKeyring { + err := removeAllAuthFromKernelKeyring() + if err == nil { + logrus.Debugf("removing all credentials from kernel keyring") + return false, nil + } + logrus.Debugf("error removing credentials from kernel keyring") + } auths.CredHelpers = make(map[string]string) auths.AuthConfigs = make(map[string]dockerAuthConfig) return true, nil @@ -155,13 +168,16 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { // The path can be overriden by the user if the overwrite-path flag is set // If the flag is not set and XDG_RUNTIME_DIR is set, the auth.json file is saved in XDG_RUNTIME_DIR/containers // Otherwise, the auth.json file is stored in /run/containers/UID -func getPathToAuth(sys *types.SystemContext) (string, error) { +func getPathToAuth(sys *types.SystemContext) (string, bool, error) { if sys != nil { if sys.AuthFilePath != "" { - return sys.AuthFilePath, nil + return sys.AuthFilePath, false, nil + } + if sys.LegacyFormatAuthFilePath != "" { + return sys.LegacyFormatAuthFilePath, true, nil } if sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), nil + return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil } } @@ -174,11 +190,11 @@ func getPathToAuth(sys *types.SystemContext) (string, error) { // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory // or made a typo while setting the environment variable, // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. - return "", errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir) + return "", false, errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir) } // else ignore err and let the caller fail accessing xdgRuntimeDirPath. - return filepath.Join(runtimeDir, xdgRuntimeDirPath), nil + return filepath.Join(runtimeDir, xdgRuntimeDirPath), false, nil } - return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), nil + return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), false, nil } // readJSONFile unmarshals the authentications stored in the auth.json file and returns it @@ -212,7 +228,7 @@ func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { // modifyJSON writes to auth.json if the dockerConfigFile has been updated func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) error { - path, err := getPathToAuth(sys) + path, legacyFormat, err := getPathToAuth(sys) if err != nil { return err } @@ -224,6 +240,9 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) ( } } + if legacyFormat { + return fmt.Errorf("writes to %s using legacy format are not supported", path) + } auths, err := readJSONFile(path, false) if err != nil { return errors.Wrapf(err, "error reading JSON file %q", path) diff --git a/vendor/github.com/containers/image/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go similarity index 65% rename from vendor/github.com/containers/image/pkg/docker/config/config_linux.go rename to vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go index 4d66a50df5..43f2d5a85f 100644 --- a/vendor/github.com/containers/image/pkg/docker/config/config_linux.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go @@ -4,10 +4,13 @@ import ( "fmt" "strings" - "github.com/containers/image/pkg/keyctl" + "github.com/containers/image/v5/internal/pkg/keyctl" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) +const keyDescribePrefix = "container-registry-login:" + func getAuthFromKernelKeyring(registry string) (string, string, error) { userkeyring, err := keyctl.UserKeyring() if err != nil { @@ -41,6 +44,39 @@ func deleteAuthFromKernelKeyring(registry string) error { return key.Unlink() } +func removeAllAuthFromKernelKeyring() error { + keys, err := keyctl.ReadUserKeyring() + if err != nil { + return err + } + + userkeyring, err := keyctl.UserKeyring() + if err != nil { + return err + } + + for _, k := range keys { + keyAttr, err := k.Describe() + if err != nil { + return err + } + // split string "type;uid;gid;perm;description" + keyAttrs := strings.SplitN(keyAttr, ";", 5) + if len(keyAttrs) < 5 { + return errors.Errorf("Key attributes of %d are not avaliable", k.ID()) + } + keyDescribe := keyAttrs[4] + if strings.HasPrefix(keyDescribe, keyDescribePrefix) { + err := keyctl.Unlink(userkeyring, k) + if err != nil { + return errors.Wrapf(err, "error unlinking key %d", k.ID()) + } + logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr) + } + } + return nil +} + func setAuthToKernelKeyring(registry, username, password string) error { keyring, err := keyctl.SessionKeyring() if err != nil { @@ -75,5 +111,5 @@ func setAuthToKernelKeyring(registry, username, password string) error { } func genDescription(registry string) string { - return fmt.Sprintf("container-registry-login:%s", registry) + return fmt.Sprintf("%s%s", keyDescribePrefix, registry) } diff --git a/vendor/github.com/containers/image/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go similarity index 82% rename from vendor/github.com/containers/image/pkg/docker/config/config_unsupported.go rename to vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go index 1c1a02511c..9b0e8bee25 100644 --- a/vendor/github.com/containers/image/pkg/docker/config/config_unsupported.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go @@ -14,3 +14,7 @@ func deleteAuthFromKernelKeyring(registry string) error { func setAuthToKernelKeyring(registry, username, password string) error { return ErrNotSupported } + +func removeAllAuthFromKernelKeyring() error { + return ErrNotSupported +} diff --git a/vendor/github.com/containers/image/v5/pkg/strslice/README.md b/vendor/github.com/containers/image/v5/pkg/strslice/README.md new file mode 100644 index 0000000000..ae6097e82e --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/strslice/README.md @@ -0,0 +1 @@ +This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice). diff --git a/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go b/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go new file mode 100644 index 0000000000..bad493fb89 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go new file mode 100644 index 0000000000..ff802cefd9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -0,0 +1,482 @@ +package sysregistriesv2 + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/BurntSushi/toml" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// systemRegistriesConfPath is the path to the system-wide registry +// configuration file and is used to add/subtract potential registries for +// obtaining images. You can override this at build time with +// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path' +var systemRegistriesConfPath = builtinRegistriesConfPath + +// builtinRegistriesConfPath is the path to the registry configuration file. +// DO NOT change this, instead see systemRegistriesConfPath above. +const builtinRegistriesConfPath = "/etc/containers/registries.conf" + +// Endpoint describes a remote location of a registry. +type Endpoint struct { + // The endpoint's remote location. + Location string `toml:"location,omitempty"` + // If true, certs verification will be skipped and HTTP (non-TLS) + // connections will be allowed. + Insecure bool `toml:"insecure,omitempty"` +} + +// rewriteReference will substitute the provided reference `prefix` to the +// endpoints `location` from the `ref` and creates a new named reference from it. +// The function errors if the newly created reference is not parsable. +func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) { + refString := ref.String() + if !refMatchesPrefix(refString, prefix) { + return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString) + } + + newNamedRef := strings.Replace(refString, prefix, e.Location, 1) + newParsedRef, err := reference.ParseNamed(newNamedRef) + if err != nil { + return nil, errors.Wrapf(err, "error rewriting reference") + } + logrus.Debugf("reference rewritten from '%v' to '%v'", refString, newParsedRef.String()) + return newParsedRef, nil +} + +// Registry represents a registry. +type Registry struct { + // Prefix is used for matching images, and to translate one namespace to + // another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"` + // and we pull from "example.com/bar/myimage:latest", the image will + // effectively be pulled from "example.com/foo/bar/myimage:latest". + // If no Prefix is specified, it defaults to the specified location. + Prefix string `toml:"prefix"` + // A registry is an Endpoint too + Endpoint + // The registry's mirrors. + Mirrors []Endpoint `toml:"mirror,omitempty"` + // If true, pulling from the registry will be blocked. + Blocked bool `toml:"blocked,omitempty"` + // If true, mirrors will only be used for digest pulls. Pulling images by + // tag can potentially yield different images, depending on which endpoint + // we pull from. Forcing digest-pulls for mirrors avoids that issue. + MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"` +} + +// PullSource consists of an Endpoint and a Reference. Note that the reference is +// rewritten according to the registries prefix and the Endpoint's location. +type PullSource struct { + Endpoint Endpoint + Reference reference.Named +} + +// PullSourcesFromReference returns a slice of PullSource's based on the passed +// reference. +func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { + var endpoints []Endpoint + + if r.MirrorByDigestOnly { + // Only use mirrors when the reference is a digest one. + if _, isDigested := ref.(reference.Canonical); isDigested { + endpoints = append(r.Mirrors, r.Endpoint) + } else { + endpoints = []Endpoint{r.Endpoint} + } + } else { + endpoints = append(r.Mirrors, r.Endpoint) + } + + sources := []PullSource{} + for _, ep := range endpoints { + rewritten, err := ep.rewriteReference(ref, r.Prefix) + if err != nil { + return nil, err + } + sources = append(sources, PullSource{Endpoint: ep, Reference: rewritten}) + } + + return sources, nil +} + +// V1TOMLregistries is for backwards compatibility to sysregistries v1 +type V1TOMLregistries struct { + Registries []string `toml:"registries"` +} + +// V1TOMLConfig is for backwards compatibility to sysregistries v1 +type V1TOMLConfig struct { + Search V1TOMLregistries `toml:"search"` + Insecure V1TOMLregistries `toml:"insecure"` + Block V1TOMLregistries `toml:"block"` +} + +// V1RegistriesConf is the sysregistries v1 configuration format. +type V1RegistriesConf struct { + V1TOMLConfig `toml:"registries"` +} + +// Nonempty returns true if config contains at least one configuration entry. +func (config *V1RegistriesConf) Nonempty() bool { + return (len(config.V1TOMLConfig.Search.Registries) != 0 || + len(config.V1TOMLConfig.Insecure.Registries) != 0 || + len(config.V1TOMLConfig.Block.Registries) != 0) +} + +// V2RegistriesConf is the sysregistries v2 configuration format. +type V2RegistriesConf struct { + Registries []Registry `toml:"registry"` + // An array of host[:port] (not prefix!) entries to use for resolving unqualified image references + UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"` +} + +// Nonempty returns true if config contains at least one configuration entry. +func (config *V2RegistriesConf) Nonempty() bool { + return (len(config.Registries) != 0 || + len(config.UnqualifiedSearchRegistries) != 0) +} + +// tomlConfig is the data type used to unmarshal the toml config. +type tomlConfig struct { + V2RegistriesConf + V1RegistriesConf // for backwards compatibility with sysregistries v1 +} + +// InvalidRegistries represents an invalid registry configurations. An example +// is when "registry.com" is defined multiple times in the configuration but +// with conflicting security settings. +type InvalidRegistries struct { + s string +} + +// Error returns the error string. +func (e *InvalidRegistries) Error() string { + return e.s +} + +// parseLocation parses the input string, performs some sanity checks and returns +// the sanitized input string. An error is returned if the input string is +// empty or if contains an "http{s,}://" prefix. +func parseLocation(input string) (string, error) { + trimmed := strings.TrimRight(input, "/") + + if trimmed == "" { + return "", &InvalidRegistries{s: "invalid location: cannot be empty"} + } + + if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { + msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input) + return "", &InvalidRegistries{s: msg} + } + + return trimmed, nil +} + +// ConvertToV2 returns a v2 config corresponding to a v1 one. +func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) { + regMap := make(map[string]*Registry) + // The order of the registries is not really important, but make it deterministic (the same for the same config file) + // to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations. + registryOrder := []string{} + + getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object + var err error + location, err = parseLocation(location) + if err != nil { + return nil, err + } + reg, exists := regMap[location] + if !exists { + reg = &Registry{ + Endpoint: Endpoint{Location: location}, + Mirrors: []Endpoint{}, + Prefix: location, + } + regMap[location] = reg + registryOrder = append(registryOrder, location) + } + return reg, nil + } + + for _, blocked := range config.V1TOMLConfig.Block.Registries { + reg, err := getRegistry(blocked) + if err != nil { + return nil, err + } + reg.Blocked = true + } + for _, insecure := range config.V1TOMLConfig.Insecure.Registries { + reg, err := getRegistry(insecure) + if err != nil { + return nil, err + } + reg.Insecure = true + } + + res := &V2RegistriesConf{ + UnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries, + } + for _, location := range registryOrder { + reg := regMap[location] + res.Registries = append(res.Registries, *reg) + } + return res, nil +} + +// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries. +var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$") + +// postProcess checks the consistency of all the configuration, looks for conflicts, +// and normalizes the configuration (e.g., sets the Prefix to Location if not set). +func (config *V2RegistriesConf) postProcess() error { + regMap := make(map[string][]*Registry) + + for i := range config.Registries { + reg := &config.Registries[i] + // make sure Location and Prefix are valid + var err error + reg.Location, err = parseLocation(reg.Location) + if err != nil { + return err + } + + if reg.Prefix == "" { + reg.Prefix = reg.Location + } else { + reg.Prefix, err = parseLocation(reg.Prefix) + if err != nil { + return err + } + } + + // make sure mirrors are valid + for _, mir := range reg.Mirrors { + mir.Location, err = parseLocation(mir.Location) + if err != nil { + return err + } + } + regMap[reg.Location] = append(regMap[reg.Location], reg) + } + + // Given a registry can be mentioned multiple times (e.g., to have + // multiple prefixes backed by different mirrors), we need to make sure + // there are no conflicts among them. + // + // Note: we need to iterate over the registries array to ensure a + // deterministic behavior which is not guaranteed by maps. + for _, reg := range config.Registries { + others, _ := regMap[reg.Location] + for _, other := range others { + if reg.Insecure != other.Insecure { + msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location) + return &InvalidRegistries{s: msg} + } + if reg.Blocked != other.Blocked { + msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location) + return &InvalidRegistries{s: msg} + } + } + } + + for i := range config.UnqualifiedSearchRegistries { + registry, err := parseLocation(config.UnqualifiedSearchRegistries[i]) + if err != nil { + return err + } + if !anchoredDomainRegexp.MatchString(registry) { + return &InvalidRegistries{fmt.Sprintf("Invalid unqualified-search-registries entry %#v", registry)} + } + config.UnqualifiedSearchRegistries[i] = registry + } + + return nil +} + +// ConfigPath returns the path to the system-wide registry configuration file. +func ConfigPath(ctx *types.SystemContext) string { + confPath := systemRegistriesConfPath + if ctx != nil { + if ctx.SystemRegistriesConfPath != "" { + confPath = ctx.SystemRegistriesConfPath + } else if ctx.RootForImplicitAbsolutePaths != "" { + confPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath) + } + } + return confPath +} + +// configMutex is used to synchronize concurrent accesses to configCache. +var configMutex = sync.Mutex{} + +// configCache caches already loaded configs with config paths as keys and is +// used to avoid redudantly parsing configs. Concurrent accesses to the cache +// are synchronized via configMutex. +var configCache = make(map[string]*V2RegistriesConf) + +// InvalidateCache invalidates the registry cache. This function is meant to be +// used for long-running processes that need to reload potential changes made to +// the cached registry config files. +func InvalidateCache() { + configMutex.Lock() + defer configMutex.Unlock() + configCache = make(map[string]*V2RegistriesConf) +} + +// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached. +func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) { + configPath := ConfigPath(ctx) + + configMutex.Lock() + // if the config has already been loaded, return the cached registries + if config, inCache := configCache[configPath]; inCache { + configMutex.Unlock() + return config, nil + } + configMutex.Unlock() + + return TryUpdatingCache(ctx) +} + +// TryUpdatingCache loads the configuration from the provided `SystemContext` +// without using the internal cache. On success, the loaded configuration will +// be added into the internal registry cache. +func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) { + configPath := ConfigPath(ctx) + + configMutex.Lock() + defer configMutex.Unlock() + + // load the config + config, err := loadRegistryConf(configPath) + if err != nil { + // Return an empty []Registry if we use the default config, + // which implies that the config path of the SystemContext + // isn't set. Note: if ctx.SystemRegistriesConfPath points to + // the default config, we will still return an error. + if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") { + return &V2RegistriesConf{Registries: []Registry{}}, nil + } + return nil, err + } + + v2Config := &config.V2RegistriesConf + + // backwards compatibility for v1 configs + if config.V1RegistriesConf.Nonempty() { + if config.V2RegistriesConf.Nonempty() { + return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"} + } + v2, err := config.V1RegistriesConf.ConvertToV2() + if err != nil { + return nil, err + } + v2Config = v2 + } + + if err := v2Config.postProcess(); err != nil { + return nil, err + } + + // populate the cache + configCache[configPath] = v2Config + return v2Config, nil +} + +// GetRegistries loads and returns the registries specified in the config. +// Note the parsed content of registry config files is cached. For reloading, +// use `InvalidateCache` and re-call `GetRegistries`. +func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + return config.Registries, nil +} + +// UnqualifiedSearchRegistries returns a list of host[:port] entries to try +// for unqualified image search, in the returned order) +func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + return config.UnqualifiedSearchRegistries, nil +} + +// refMatchesPrefix returns true iff ref, +// which is a registry, repository namespace, repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!), +// matches a Registry.Prefix value. +// (This is split from the caller primarily to make testing easier.) +func refMatchesPrefix(ref, prefix string) bool { + switch { + case len(ref) < len(prefix): + return false + case len(ref) == len(prefix): + return ref == prefix + case len(ref) > len(prefix): + if !strings.HasPrefix(ref, prefix) { + return false + } + c := ref[len(prefix)] + // This allows "example.com:5000" to match "example.com", + // which is unintended; that will get fixed eventually, DON'T RELY + // ON THE CURRENT BEHAVIOR. + return c == ':' || c == '/' || c == '@' + default: + panic("Internal error: impossible comparison outcome") + } +} + +// FindRegistry returns the Registry with the longest prefix for ref, +// which is a registry, repository namespace repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!). +// If no Registry prefixes the image, nil is returned. +func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + + reg := Registry{} + prefixLen := 0 + for _, r := range config.Registries { + if refMatchesPrefix(ref, r.Prefix) { + length := len(r.Prefix) + if length > prefixLen { + reg = r + prefixLen = length + } + } + } + if prefixLen != 0 { + return ®, nil + } + return nil, nil +} + +// Loads the registry configuration file from the filesystem and then unmarshals +// it. Returns the unmarshalled object. +func loadRegistryConf(configPath string) (*tomlConfig, error) { + config := &tomlConfig{} + + configBytes, err := ioutil.ReadFile(configPath) + if err != nil { + return nil, err + } + + err = toml.Unmarshal(configBytes, &config) + return config, err +} diff --git a/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go similarity index 100% rename from vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go rename to vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go new file mode 100644 index 0000000000..07fdd42a9a --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/docker.go @@ -0,0 +1,65 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/opencontainers/go-digest" +) + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity. +func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return nil, err + } + sig := newUntrustedSignature(manifestDigest, dockerReference) + return sig.sign(mech, keyIdentity) +} + +// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, +// using mech. +func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, + expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { + expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference) + if err != nil { + return nil, err + } + sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + if keyIdentity != expectedKeyIdentity { + return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)} + } + return nil + }, + validateSignedDockerReference: func(signedDockerReference string) error { + signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) + if err != nil { + return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)} + } + if signedRef.String() != expectedRef.String() { + return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s", + signedDockerReference, expectedDockerReference)} + } + return nil + }, + validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { + matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) + if err != nil { + return err + } + if !matches { + return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)} + } + return nil + }, + }) + if err != nil { + return nil, err + } + return sig, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/json.go b/vendor/github.com/containers/image/v5/signature/json.go new file mode 100644 index 0000000000..9e592863da --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/json.go @@ -0,0 +1,88 @@ +package signature + +import ( + "bytes" + "encoding/json" + "fmt" + "io" +) + +// jsonFormatError is returned when JSON does not match expected format. +type jsonFormatError string + +func (err jsonFormatError) Error() string { + return string(err) +} + +// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to +// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected. +// +// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, +// we could use reflection to automate this. Later? +func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error { + seenKeys := map[string]struct{}{} + + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return jsonFormatError(err.Error()) + } + if t != json.Delim('{') { + return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t)) + } + for { + t, err := dec.Token() + if err != nil { + return jsonFormatError(err.Error()) + } + if t == json.Delim('}') { + break + } + + key, ok := t.(string) + if !ok { + // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. + return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) + } + if _, ok := seenKeys[key]; ok { + return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) + } + seenKeys[key] = struct{}{} + + valuePtr := fieldResolver(key) + if valuePtr == nil { + return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key)) + } + // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. + if err := dec.Decode(valuePtr); err != nil { + return jsonFormatError(err.Error()) + } + } + if _, err := dec.Token(); err != io.EOF { + return jsonFormatError("Unexpected data after JSON object") + } + return nil +} + +// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields +// must be present exactly once, and none other fields are accepted. +func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error { + seenKeys := map[string]struct{}{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + if valuePtr, ok := exactFields[key]; ok { + seenKeys[key] = struct{}{} + return valuePtr + } + return nil + }); err != nil { + return err + } + for key := range exactFields { + if _, ok := seenKeys[key]; !ok { + return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key)) + } + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go new file mode 100644 index 0000000000..bdf26c531f --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism.go @@ -0,0 +1,85 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "strings" + + "golang.org/x/crypto/openpgp" +) + +// SigningMechanism abstracts a way to sign binary blobs and verify their signatures. +// Each mechanism should eventually be closed by calling Close(). +// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to +// eliminate ambiguities, support CA signatures and perhaps other key properties) +type SigningMechanism interface { + // Close removes resources associated with the mechanism, if any. + Close() error + // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. + SupportsSigning() error + // Sign creates a (non-detached) signature of input using keyIdentity. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + Sign(input []byte, keyIdentity string) ([]byte, error) + // Verify parses unverifiedSignature and returns the content and the signer's identity + Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) + // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, + // along with a short identifier of the key used for signing. + // WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) + // is NOT the same as a "key identity" used in other calls ot this interface, and + // the values may have no recognizable relationship if the public key is not available. + UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) +} + +// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. +type SigningNotSupportedError string + +func (err SigningNotSupportedError) Error() string { + return string(err) +} + +// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default +// GPG configuration ($GNUPGHOME / ~/.gnupg) +// The caller must call .Close() on the returned SigningMechanism. +func NewGPGSigningMechanism() (SigningMechanism, error) { + return newGPGSigningMechanismInDirectory("") +} + +// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + return newEphemeralGPGSigningMechanism(blob) +} + +// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls ot this interface, and +// the values may have no recognizable relationship if the public key is not available. +func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography. + md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("The input is not a signature") + } + content, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: An error during reading the body can happen only if + // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key + // to decrypt the contents anyway), or + // 2) the message is signed AND we give ReadMessage a correspnding public key, which we don’t. + return nil, "", err + } + + // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints + // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)! + return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go new file mode 100644 index 0000000000..4825ab27c6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go @@ -0,0 +1,175 @@ +// +build !containers_image_openpgp + +package signature + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + + "github.com/mtrmac/gpgme" +) + +// A GPG/OpenPGP signing mechanism, implemented using gpgme. +type gpgmeSigningMechanism struct { + ctx *gpgme.Context + ephemeralDir string // If not "", a directory to be removed on Close() +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { + ctx, err := newGPGMEContext(optionalDir) + if err != nil { + return nil, err + } + return &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: "", + }, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") + if err != nil { + return nil, nil, err + } + removeDir := true + defer func() { + if removeDir { + os.RemoveAll(dir) + } + }() + ctx, err := newGPGMEContext(dir) + if err != nil { + return nil, nil, err + } + mech := &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: dir, + } + keyIdentities, err := mech.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + + removeDir = false + return mech, keyIdentities, nil +} + +// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty. +func newGPGMEContext(optionalDir string) (*gpgme.Context, error) { + ctx, err := gpgme.New() + if err != nil { + return nil, err + } + if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil { + return nil, err + } + if optionalDir != "" { + err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir) + if err != nil { + return nil, err + } + } + ctx.SetArmor(false) + ctx.SetTextMode(false) + return ctx, nil +} + +func (m *gpgmeSigningMechanism) Close() error { + if m.ephemeralDir != "" { + os.RemoveAll(m.ephemeralDir) // Ignore an error, if any + } + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism); +// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism. +func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + inputData, err := gpgme.NewDataBytes(blob) + if err != nil { + return nil, err + } + res, err := m.ctx.Import(inputData) + if err != nil { + return nil, err + } + keyIdentities := []string{} + for _, i := range res.Imports { + if i.Result == nil { + keyIdentities = append(keyIdentities, i.Fingerprint) + } + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *gpgmeSigningMechanism) SupportsSigning() error { + return nil +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + key, err := m.ctx.GetKey(keyIdentity, true) + if err != nil { + return nil, err + } + inputData, err := gpgme.NewDataBytes(input) + if err != nil { + return nil, err + } + var sigBuffer bytes.Buffer + sigData, err := gpgme.NewDataWriter(&sigBuffer) + if err != nil { + return nil, err + } + if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { + return nil, err + } + return sigBuffer.Bytes(), nil +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + signedBuffer := bytes.Buffer{} + signedData, err := gpgme.NewDataWriter(&signedBuffer) + if err != nil { + return nil, "", err + } + unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature) + if err != nil { + return nil, "", err + } + _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData) + if err != nil { + return nil, "", err + } + if len(sigs) != 1 { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))} + } + sig := sigs[0] + // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves + if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage { + // FIXME: Better error reporting eventually + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)} + } + return signedBuffer.Bytes(), sig.Fingerprint, nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls ot this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go new file mode 100644 index 0000000000..eccd610c9d --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go @@ -0,0 +1,159 @@ +// +build containers_image_openpgp + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "time" + + "github.com/containers/storage/pkg/homedir" + "golang.org/x/crypto/openpgp" +) + +// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. +type openpgpSigningMechanism struct { + keyring openpgp.EntityList +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + + gpgHome := optionalDir + if gpgHome == "" { + gpgHome = os.Getenv("GNUPGHOME") + if gpgHome == "" { + gpgHome = path.Join(homedir.Get(), ".gnupg") + } + } + + pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg")) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + } else { + _, err := m.importKeysFromBytes(pubring) + if err != nil { + return nil, err + } + } + return m, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + keyIdentities, err := m.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + return m, keyIdentities, nil +} + +func (m *openpgpSigningMechanism) Close() error { + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob)) + if err != nil { + k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob)) + if e2 != nil { + return nil, err // The original error -- FIXME: is this better? + } + keyring = k + } + + keyIdentities := []string{} + for _, entity := range keyring { + if entity.PrimaryKey == nil { + // Coverage: This should never happen, openpgp.ReadEntity fails with a + // openpgp.errors.StructuralError instead of returning an entity with this + // field set to nil. + continue + } + // Uppercase the fingerprint to be compatible with gpgme + keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint))) + m.keyring = append(m.keyring, entity) + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *openpgpSigningMechanism) SupportsSigning() error { + return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("not signed") + } + content, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted + // (and possibly also signed, but it _must_ be encrypted) and the signing + // “modification detection code” detects a mismatch. But in that case, + // we would expect the signature verification to fail as well, and that is checked + // first. Besides, we are not supplying any decryption keys, so we really + // can never reach this “encrypted data MDC mismatch” path. + return nil, "", err + } + if md.SignatureError != nil { + return nil, "", fmt.Errorf("signature error: %v", md.SignatureError) + } + if md.SignedBy == nil { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)} + } + if md.Signature != nil { + if md.Signature.SigLifetimeSecs != nil { + expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) + if time.Now().After(expiry) { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)} + } + } + } else if md.SignatureV3 == nil { + // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3, + // or sets md.SignatureError. + return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"} + } + + // Uppercase the fingerprint to be compatible with gpgme + return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls ot this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go new file mode 100644 index 0000000000..3eee70bc2a --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -0,0 +1,688 @@ +// policy_config.go hanles creation of policy objects, either by parsing JSON +// or by programs building them programmatically. + +// The New* constructors are intended to be a stable API. FIXME: after an independent review. + +// Do not invoke the internals of the JSON marshaling/unmarshaling directly. + +// We can't just blindly call json.Unmarshal because that would silently ignore +// typos, and that would just not do for security policy. + +// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context. +// But at least it is not worse than blind json.Unmarshal()… + +package signature + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// systemDefaultPolicyPath is the policy path used for DefaultPolicy(). +// You can override this at build time with +// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path' +var systemDefaultPolicyPath = builtinDefaultPolicyPath + +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/etc/containers/policy.json" + +// InvalidPolicyFormatError is returned when parsing an invalid policy configuration. +type InvalidPolicyFormatError string + +func (err InvalidPolicyFormatError) Error() string { + return string(err) +} + +// DefaultPolicy returns the default policy of the system. +// Most applications should be using this method to get the policy configured +// by the system administrator. +// sys should usually be nil, can be set to override the default. +// NOTE: When this function returns an error, report it to the user and abort. +// DO NOT hard-code fallback policies in your application. +func DefaultPolicy(sys *types.SystemContext) (*Policy, error) { + return NewPolicyFromFile(defaultPolicyPath(sys)) +} + +// defaultPolicyPath returns a path to the default policy of the system. +func defaultPolicyPath(sys *types.SystemContext) string { + if sys != nil { + if sys.SignaturePolicyPath != "" { + return sys.SignaturePolicyPath + } + if sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) + } + } + return systemDefaultPolicyPath +} + +// NewPolicyFromFile returns a policy configured in the specified file. +func NewPolicyFromFile(fileName string) (*Policy, error) { + contents, err := ioutil.ReadFile(fileName) + if err != nil { + return nil, err + } + policy, err := NewPolicyFromBytes(contents) + if err != nil { + return nil, errors.Wrapf(err, "invalid policy in %q", fileName) + } + return policy, nil +} + +// NewPolicyFromBytes returns a policy parsed from the specified blob. +// Use this function instead of calling json.Unmarshal directly. +func NewPolicyFromBytes(data []byte) (*Policy, error) { + p := Policy{} + if err := json.Unmarshal(data, &p); err != nil { + return nil, InvalidPolicyFormatError(err.Error()) + } + return &p, nil +} + +// Compile-time check that Policy implements json.Unmarshaler. +var _ json.Unmarshaler = (*Policy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (p *Policy) UnmarshalJSON(data []byte) error { + *p = Policy{} + transports := policyTransportsMap{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "default": + return &p.Default + case "transports": + return &transports + default: + return nil + } + }); err != nil { + return err + } + + if p.Default == nil { + return InvalidPolicyFormatError("Default policy is missing") + } + p.Transports = map[string]PolicyTransportScopes(transports) + return nil +} + +// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member. +type policyTransportsMap map[string]PolicyTransportScopes + +// Compile-time check that policyTransportsMap implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportsMap)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyTransportScopes{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + // transport can be nil + transport := transports.Get(key) + // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + ptsWithTransport := policyTransportScopesWithTransport{ + transport: transport, + dest: &PolicyTransportScopes{}, // This allocates a new instance on each call. + } + tmpMap[key] = ptsWithTransport.dest + return &ptsWithTransport + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler. +// we want to only use policyTransportScopesWithTransport +var _ json.Unmarshaler = (*PolicyTransportScopes)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error { + return errors.New("Do not try to unmarshal PolicyTransportScopes directly") +} + +// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes +// while validating using a specific ImageTransport if not nil. +type policyTransportScopesWithTransport struct { + transport types.ImageTransport + dest *PolicyTransportScopes +} + +// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyRequirements{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + if key != "" && m.transport != nil { + if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil { + return nil + } + } + ptr := &PolicyRequirements{} // This allocates a new instance on each call. + tmpMap[key] = ptr + return ptr + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m.dest)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyRequirements implements json.Unmarshaler. +var _ json.Unmarshaler = (*PolicyRequirements)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyRequirements) UnmarshalJSON(data []byte) error { + reqJSONs := []json.RawMessage{} + if err := json.Unmarshal(data, &reqJSONs); err != nil { + return err + } + if len(reqJSONs) == 0 { + return InvalidPolicyFormatError("List of verification policy requirements must not be empty") + } + res := make([]PolicyRequirement, len(reqJSONs)) + for i, reqJSON := range reqJSONs { + req, err := newPolicyRequirementFromJSON(reqJSON) + if err != nil { + return err + } + res[i] = req + } + *m = res + return nil +} + +// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation. +func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { + var typeField prCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyRequirement + switch typeField.Type { + case prTypeInsecureAcceptAnything: + res = &prInsecureAcceptAnything{} + case prTypeReject: + res = &prReject{} + case prTypeSignedBy: + res = &prSignedBy{} + case prTypeSignedBaseLayer: + res = &prSignedBaseLayer{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type. +func newPRInsecureAcceptAnything() *prInsecureAcceptAnything { + return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}} +} + +// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement. +func NewPRInsecureAcceptAnything() PolicyRequirement { + return newPRInsecureAcceptAnything() +} + +// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler. +var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { + *pr = prInsecureAcceptAnything{} + var tmp prInsecureAcceptAnything + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeInsecureAcceptAnything { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *pr = *newPRInsecureAcceptAnything() + return nil +} + +// newPRReject is NewPRReject, except it returns the private type. +func newPRReject() *prReject { + return &prReject{prCommon{Type: prTypeReject}} +} + +// NewPRReject returns a new "reject" PolicyRequirement. +func NewPRReject() PolicyRequirement { + return newPRReject() +} + +// Compile-time check that prReject implements json.Unmarshaler. +var _ json.Unmarshaler = (*prReject)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prReject) UnmarshalJSON(data []byte) error { + *pr = prReject{} + var tmp prReject + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeReject { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *pr = *newPRReject() + return nil +} + +// newPRSignedBy returns a new prSignedBy if parameters are valid. +func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + if !keyType.IsValid() { + return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType)) + } + if len(keyPath) > 0 && len(keyData) > 0 { + return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously") + } + if signedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + return &prSignedBy{ + prCommon: prCommon{Type: prTypeSignedBy}, + KeyType: keyType, + KeyPath: keyPath, + KeyData: keyData, + SignedIdentity: signedIdentity, + }, nil +} + +// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. +func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, keyPath, nil, signedIdentity) +} + +// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath +func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) +} + +// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. +func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, "", keyData, signedIdentity) +} + +// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData +func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyData(keyType, keyData, signedIdentity) +} + +// Compile-time check that prSignedBy implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBy) UnmarshalJSON(data []byte) error { + *pr = prSignedBy{} + var tmp prSignedBy + var gotKeyPath, gotKeyData = false, false + var signedIdentity json.RawMessage + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + case "keyType": + return &tmp.KeyType + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBy { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var res *prSignedBy + var err error + switch { + case gotKeyPath && gotKeyData: + return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously") + case gotKeyPath && !gotKeyData: + res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) + case !gotKeyPath && gotKeyData: + res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyData: + return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified") + default: // Coverage: This should never happen + return errors.Errorf("Impossible keyPath/keyData presence combination!?") + } + if err != nil { + return err + } + *pr = *res + + return nil +} + +// IsValid returns true iff kt is a recognized value +func (kt sbKeyType) IsValid() bool { + switch kt { + case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, + SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + return true + default: + return false + } +} + +// Compile-time check that sbKeyType implements json.Unmarshaler. +var _ json.Unmarshaler = (*sbKeyType)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (kt *sbKeyType) UnmarshalJSON(data []byte) error { + *kt = sbKeyType("") + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if !sbKeyType(s).IsValid() { + return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s)) + } + *kt = sbKeyType(s) + return nil +} + +// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type. +func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) { + if baseLayerIdentity == nil { + return nil, InvalidPolicyFormatError("baseLayerIdentity not specified") + } + return &prSignedBaseLayer{ + prCommon: prCommon{Type: prTypeSignedBaseLayer}, + BaseLayerIdentity: baseLayerIdentity, + }, nil +} + +// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement. +func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedBaseLayer(baseLayerIdentity) +} + +// Compile-time check that prSignedBaseLayer implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBaseLayer)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { + *pr = prSignedBaseLayer{} + var tmp prSignedBaseLayer + var baseLayerIdentity json.RawMessage + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "baseLayerIdentity": &baseLayerIdentity, + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBaseLayer { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) + if err != nil { + return err + } + res, err := newPRSignedBaseLayer(bli) + if err != nil { + // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid. + return err + } + *pr = *res + return nil +} + +// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. +func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { + var typeField prmCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyReferenceMatch + switch typeField.Type { + case prmTypeMatchExact: + res = &prmMatchExact{} + case prmTypeMatchRepoDigestOrExact: + res = &prmMatchRepoDigestOrExact{} + case prmTypeMatchRepository: + res = &prmMatchRepository{} + case prmTypeExactReference: + res = &prmExactReference{} + case prmTypeExactRepository: + res = &prmExactRepository{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRMMatchExact is NewPRMMatchExact, except it resturns the private type. +func newPRMMatchExact() *prmMatchExact { + return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}} +} + +// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch. +func NewPRMMatchExact() PolicyReferenceMatch { + return newPRMMatchExact() +} + +// Compile-time check that prmMatchExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchExact{} + var tmp prmMatchExact + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchExact() + return nil +} + +// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it resturns the private type. +func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact { + return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}} +} + +// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch. +func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch { + return newPRMMatchRepoDigestOrExact() +} + +// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepoDigestOrExact{} + var tmp prmMatchRepoDigestOrExact + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepoDigestOrExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchRepoDigestOrExact() + return nil +} + +// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type. +func newPRMMatchRepository() *prmMatchRepository { + return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} +} + +// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch. +func NewPRMMatchRepository() PolicyReferenceMatch { + return newPRMMatchRepository() +} + +// Compile-time check that prmMatchRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepository{} + var tmp prmMatchRepository + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchRepository() + return nil +} + +// newPRMExactReference is NewPRMExactReference, except it resturns the private type. +func newPRMExactReference(dockerReference string) (*prmExactReference, error) { + ref, err := reference.ParseNormalizedNamed(dockerReference) + if err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error())) + } + if reference.IsNameOnly(ref) { + return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference)) + } + return &prmExactReference{ + prmCommon: prmCommon{Type: prmTypeExactReference}, + DockerReference: dockerReference, + }, nil +} + +// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch. +func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) { + return newPRMExactReference(dockerReference) +} + +// Compile-time check that prmExactReference implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactReference)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactReference) UnmarshalJSON(data []byte) error { + *prm = prmExactReference{} + var tmp prmExactReference + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "dockerReference": &tmp.DockerReference, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactReference { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMExactReference(tmp.DockerReference) + if err != nil { + return err + } + *prm = *res + return nil +} + +// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type. +func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { + if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error())) + } + return &prmExactRepository{ + prmCommon: prmCommon{Type: prmTypeExactRepository}, + DockerRepository: dockerRepository, + }, nil +} + +// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch. +func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) { + return newPRMExactRepository(dockerRepository) +} + +// Compile-time check that prmExactRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { + *prm = prmExactRepository{} + var tmp prmExactRepository + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "dockerRepository": &tmp.DockerRepository, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMExactRepository(tmp.DockerRepository) + if err != nil { + return err + } + *prm = *res + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go new file mode 100644 index 0000000000..e94de2a9ca --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go @@ -0,0 +1,289 @@ +// This defines the top-level policy evaluation API. +// To the extent possible, the interface of the fuctions provided +// here is intended to be completely unambiguous, and stable for users +// to rely on. + +package signature + +import ( + "context" + + "github.com/containers/image/v5/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// PolicyRequirementError is an explanatory text for rejecting a signature or an image. +type PolicyRequirementError string + +func (err PolicyRequirementError) Error() string { + return string(err) +} + +// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted. +type signatureAcceptanceResult string + +const ( + sarAccepted signatureAcceptanceResult = "sarAccepted" + sarRejected signatureAcceptanceResult = "sarRejected" + sarUnknown signatureAcceptanceResult = "sarUnknown" +) + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. +type PolicyRequirement interface { + // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache + // costly initialization like creating temporary GPG home directories and reading files. + // Setup() (someState, error) + // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement. + + // isSignatureAuthorAccepted, given an image and a signature blob, returns: + // - sarAccepted if the signature has been verified against the appropriate public key + // (where "appropriate public key" may depend on the contents of the signature); + // in that case a parsed Signature should be returned. + // - sarRejected if the signature has not been verified; + // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // - sarUnknown if if this PolicyRequirement does not deal with signatures. + // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. + // Returning sarUnknown and a non-nil error value is invalid. + // WARNING: This makes the signature contents acceptable for futher processing, + // but it does not necessarily mean that the contents of the signature are + // consistent with local policy. + // For example: + // - Do not use a true value to determine whether to run + // a container based on this image; use IsRunningImageAllowed instead. + // - Just because a signature is accepted does not automatically mean the contents of the + // signature are authorized to run code as root, or to affect system or cluster configuration. + isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) + + // isRunningImageAllowed returns true if the requirement allows running an image. + // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // WARNING: This validates signatures and the manifest, but does not download or validate the + // layers. Users must validate that the layers match their expected digests. + isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. +type PolicyReferenceMatch interface { + // matchesDockerReference decides whether a specific image identity is accepted for an image + // (or, usually, for the image's Reference().DockerReference()). Note that + // image.Reference().DockerReference() may be nil. + matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool +} + +// PolicyContext encapsulates a policy and possible cached state +// for speeding up its evaluation. +type PolicyContext struct { + Policy *Policy + state policyContextState // Internal consistency checking +} + +// policyContextState is used internally to verify the users are not misusing a PolicyContext. +type policyContextState string + +const ( + pcInvalid policyContextState = "" + pcInitializing policyContextState = "Initializing" + pcReady policyContextState = "Ready" + pcInUse policyContextState = "InUse" + pcDestroying policyContextState = "Destroying" + pcDestroyed policyContextState = "Destroyed" +) + +// changeContextState changes pc.state, or fails if the state is unexpected +func (pc *PolicyContext) changeState(expected, new policyContextState) error { + if pc.state != expected { + return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) + } + pc.state = new + return nil +} + +// NewPolicyContext sets up and initializes a context for the specified policy. +// The policy must not be modified while the context exists. FIXME: make a deep copy? +// If this function succeeds, the caller should call PolicyContext.Destroy() when done. +func NewPolicyContext(policy *Policy) (*PolicyContext, error) { + pc := &PolicyContext{Policy: policy, state: pcInitializing} + // FIXME: initialize + if err := pc.changeState(pcInitializing, pcReady); err != nil { + // Huh?! This should never fail, we didn't give the pointer to anybody. + // Just give up and leave unclean state around. + return nil, err + } + return pc, nil +} + +// Destroy should be called when the user of the context is done with it. +func (pc *PolicyContext) Destroy() error { + if err := pc.changeState(pcReady, pcDestroying); err != nil { + return err + } + // FIXME: destroy + return pc.changeState(pcDestroying, pcDestroyed) +} + +// policyIdentityLogName returns a string description of the image identity for policy purposes. +// ONLY use this for log messages, not for any decisions! +func policyIdentityLogName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity() +} + +// requirementsForImageRef selects the appropriate requirements for ref. +func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements { + // Do we have a PolicyTransportScopes for this transport? + transportName := ref.Transport().Name() + if transportScopes, ok := pc.Policy.Transports[transportName]; ok { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if req, ok := transportScopes[identity]; ok { + logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity) + return req + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if req, ok := transportScopes[name]; ok { + logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name) + return req + } + } + + // Look for a default match for the transport. + if req, ok := transportScopes[""]; ok { + logrus.Debugf(` Using transport "%s" policy section ""`, transportName) + return req + } + } + + logrus.Debugf(" Using default policy section") + return pc.Policy.Default +} + +// GetSignaturesWithAcceptedAuthor returns those signatures from an image +// for which the policy accepts the author (and which have been successfully +// verified). +// NOTE: This may legitimately return an empty list and no error, if the image +// has no signatures or only invalid signatures. +// WARNING: This makes the signature contents acceptable for futher processing, +// but it does not necessarily mean that the contents of the signature are +// consistent with local policy. +// For example: +// - Do not use a an existence of an accepted signature to determine whether to run +// a container based on this image; use IsRunningImageAllowed instead. +// - Just because a signature is accepted does not automatically mean the contents of the +// signature are authorized to run code as root, or to affect system or cluster configuration. +func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, image types.UnparsedImage) (sigs []*Signature, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return nil, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + sigs = nil + finalErr = err + } + }() + + logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + // FIXME: rename Signatures to UnverifiedSignatures + // FIXME: pass context.Context + unverifiedSignatures, err := image.Signatures(ctx) + if err != nil { + return nil, err + } + + res := make([]*Signature, 0, len(unverifiedSignatures)) + for sigNumber, sig := range unverifiedSignatures { + var acceptedSig *Signature // non-nil if accepted + rejected := false + // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?! + logrus.Debugf("Evaluating signature %d:", sigNumber) + interpretingReqs: + for reqNumber, req := range reqs { + // FIXME: Log the requirement itself? For now, we use just the number. + // FIXME: supply state + switch res, as, err := req.isSignatureAuthorAccepted(ctx, image, sig); res { + case sarAccepted: + if as == nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature accepted", reqNumber) + if acceptedSig == nil { + acceptedSig = as + } else if *as != *acceptedSig { // Coverage: this should never happen + // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents? + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber) + rejected = true + acceptedSig = nil + break interpretingReqs + } + case sarRejected: + logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + case sarUnknown: + if err != nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber) + default: // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res)) + rejected = true + break interpretingReqs + } + } + // This also handles the (invalid) case of empty reqs, by rejecting the signature. + if acceptedSig != nil && !rejected { + logrus.Debugf(" Overall: OK, signature accepted") + res = append(res, acceptedSig) + } else { + logrus.Debugf(" Overall: Signature not accepted") + } + } + return res, nil +} + +// IsRunningImageAllowed returns true iff the policy allows running the image. +// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation +// succeeded but the result was rejection. +// WARNING: This validates signatures and the manifest, but does not download or validate the +// layers. Users must validate that the layers match their expected digests. +func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (res bool, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return false, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + res = false + finalErr = err + } + }() + + logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + if len(reqs) == 0 { + return false, PolicyRequirementError("List of verification policy requirements must not be empty") + } + + for reqNumber, req := range reqs { + // FIXME: supply state + allowed, err := req.isRunningImageAllowed(ctx, image) + if !allowed { + logrus.Debugf("Requirement %d: denied, done", reqNumber) + return false, err + } + logrus.Debugf(" Requirement %d: allowed", reqNumber) + } + // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image. + logrus.Debugf("Overall: allowed") + return true, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go new file mode 100644 index 0000000000..55cdd3054f --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go @@ -0,0 +1,20 @@ +// Policy evaluation for prSignedBaseLayer. + +package signature + +import ( + "context" + + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarUnknown, nil, nil +} + +func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + // FIXME? Reject this at policy parsing time already? + logrus.Errorf("signedBaseLayer not implemented yet!") + return false, PolicyRequirementError("signedBaseLayer not implemented yet!") +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go new file mode 100644 index 0000000000..26cca4759e --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -0,0 +1,130 @@ +// Policy evaluation for prSignedBy. + +package signature + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + switch pr.KeyType { + case SBKeyTypeGPGKeys: + case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + // FIXME? Reject this at policy parsing time already? + return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType)) + default: + // This should never happen, newPRSignedBy ensures KeyType.IsValid() + return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType)) + } + + if pr.KeyPath != "" && pr.KeyData != nil { + return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`) + } + // FIXME: move this to per-context initialization + var data []byte + if pr.KeyData != nil { + data = pr.KeyData + } else { + d, err := ioutil.ReadFile(pr.KeyPath) + if err != nil { + return sarRejected, nil, err + } + data = d + } + + // FIXME: move this to per-context initialization + mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data) + if err != nil { + return sarRejected, nil, err + } + defer mech.Close() + if len(trustedIdentities) == 0 { + return sarRejected, nil, PolicyRequirementError("No public keys imported") + } + + signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + for _, trustedIdentity := range trustedIdentities { + if keyIdentity == trustedIdentity { + return nil + } + } + // Coverage: We use a private GPG home directory and only import trusted keys, so this should + // not be reachable. + return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity)) + }, + validateSignedDockerReference: func(ref string) error { + if !pr.SignedIdentity.matchesDockerReference(image, ref) { + return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) + } + return nil + }, + validateSignedDockerManifestDigest: func(digest digest.Digest) error { + m, _, err := image.Manifest(ctx) + if err != nil { + return err + } + digestMatches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return err + } + if !digestMatches { + return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) + } + return nil + }, + }) + if err != nil { + return sarRejected, nil, err + } + + return sarAccepted, signature, nil +} + +func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + // FIXME: pass context.Context + sigs, err := image.Signatures(ctx) + if err != nil { + return false, err + } + var rejections []error + for _, s := range sigs { + var reason error + switch res, _, err := pr.isSignatureAuthorAccepted(ctx, image, s); res { + case sarAccepted: + // One accepted signature is enough. + return true, nil + case sarRejected: + reason = err + case sarUnknown: + // Huh?! This should not happen at all; treat it as any other invalid value. + fallthrough + default: + reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) + } + rejections = append(rejections, reason) + } + var summary error + switch len(rejections) { + case 0: + summary = PolicyRequirementError("A signature was required, but no signature exists") + case 1: + summary = rejections[0] + default: + var msgs []string + for _, e := range rejections { + msgs = append(msgs, e.Error()) + } + summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", + strings.Join(msgs, "; "))) + } + return false, summary +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go new file mode 100644 index 0000000000..f949088b5f --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go @@ -0,0 +1,29 @@ +// Policy evaluation for the various simple PolicyRequirement types. + +package signature + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + // prInsecureAcceptAnything semantics: Every image is allowed to run, + // but this does not consider the signature as verified. + return sarUnknown, nil, nil +} + +func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + return true, nil +} + +func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) +} + +func (pr *prReject) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go new file mode 100644 index 0000000000..a148ede523 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go @@ -0,0 +1,101 @@ +// PolicyReferenceMatch implementations. + +package signature + +import ( + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. +func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { + r1 := image.Reference().DockerReference() + if r1 == nil { + return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", + transports.ImageName(image.Reference()))) + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(signature) { + return false + } + switch intended.(type) { + case reference.NamedTagged: // Includes the case when intended has both a tag and a digest. + return signature.String() == intended.String() + case reference.Canonical: + // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest. + // Becase UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest, + // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms) + return signature.Name() == intended.Name() + default: // !reference.IsNameOnly(intended) + return false + } +} + +func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} + +// parseDockerReferences converts two reference strings into parsed entities, failing on any error +func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) { + r1, err := reference.ParseNormalizedNamed(s1) + if err != nil { + return nil, nil, err + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) + if err != nil { + return false + } + // prm.DockerReference and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go new file mode 100644 index 0000000000..d3b33bb7ac --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_types.go @@ -0,0 +1,152 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// This defines types used to represent a signature verification policy in memory. +// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements +// built using the constructor functions provided in policy_config.go. + +package signature + +// NOTE: Keep this in sync with docs/containers-policy.json.5.md! + +// Policy defines requirements for considering a signature, or an image, valid. +type Policy struct { + // Default applies to any image which does not have a matching policy in Transports. + // Note that this can happen even if a matching PolicyTransportScopes exists in Transports + // if the image matches none of the scopes. + Default PolicyRequirements `json:"default"` + Transports map[string]PolicyTransportScopes `json:"transports"` +} + +// PolicyTransportScopes defines policies for images for a specific transport, +// for various scopes, the map keys. +// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.); +// there is one scope precisely matching to a single image, and namespace scopes as prefixes +// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]]) +// The empty scope, if exists, is considered a parent namespace of all other scopes. +// Most specific scope wins, duplication is prohibited (hard failure). +type PolicyTransportScopes map[string]PolicyRequirements + +// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature). +// Must not be empty, frequently will only contain a single element. +type PolicyRequirements []PolicyRequirement + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. + +// prCommon is the common type field in a JSON encoding of PolicyRequirement. +type prCommon struct { + Type prTypeIdentifier `json:"type"` +} + +// prTypeIdentifier is string designating a kind of a PolicyRequirement. +type prTypeIdentifier string + +const ( + prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything" + prTypeReject prTypeIdentifier = "reject" + prTypeSignedBy prTypeIdentifier = "signedBy" + prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" +) + +// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: +// every image is allowed to run. +// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit). +// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted). +// FIXME? Better name? +type prInsecureAcceptAnything struct { + prCommon +} + +// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected. +type prReject struct { + prCommon +} + +// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity +type prSignedBy struct { + prCommon + + // KeyType specifies what kind of key reference KeyPath/KeyData is. + // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs” + // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only + KeyType sbKeyType `json:"keyType"` + + // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified. + KeyPath string `json:"keyPath,omitempty"` + // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified. + KeyData []byte `json:"keyData,omitempty"` + + // SignedIdentity specifies what image identity the signature must be claiming about the image. + // Defaults to "match-exact" if not specified. + SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` +} + +// sbKeyType are the allowed values for prSignedBy.KeyType +type sbKeyType string + +const ( + // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring + SBKeyTypeGPGKeys sbKeyType = "GPGKeys" + // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring + SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys" + // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates + // FIXME: PEM, DER? + SBKeyTypeX509Certificates sbKeyType = "X509Certificates" + // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs + // FIXME: PEM, DER? + SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs" +) + +// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image. +type prSignedBaseLayer struct { + prCommon + // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful. + BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. + +// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch. +type prmCommon struct { + Type prmTypeIdentifier `json:"type"` +} + +// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch. +type prmTypeIdentifier string + +const ( + prmTypeMatchExact prmTypeIdentifier = "matchExact" + prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact" + prmTypeMatchRepository prmTypeIdentifier = "matchRepository" + prmTypeExactReference prmTypeIdentifier = "exactReference" + prmTypeExactRepository prmTypeIdentifier = "exactRepository" +) + +// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. +type prmMatchExact struct { + prmCommon +} + +// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly, +// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest +type prmMatchRepoDigestOrExact struct { + prmCommon +} + +// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. +type prmMatchRepository struct { + prmCommon +} + +// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly. +type prmExactReference struct { + prmCommon + DockerReference string `json:"dockerReference"` +} + +// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag. +type prmExactRepository struct { + prmCommon + DockerRepository string `json:"dockerRepository"` +} diff --git a/vendor/github.com/containers/image/v5/signature/signature.go b/vendor/github.com/containers/image/v5/signature/signature.go new file mode 100644 index 0000000000..44e70b3b96 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/signature.go @@ -0,0 +1,279 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json! + +package signature + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/containers/image/v5/version" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +const ( + signatureType = "atomic container signature" +) + +// InvalidSignatureError is returned when parsing an invalid signature. +type InvalidSignatureError struct { + msg string +} + +func (err InvalidSignatureError) Error() string { + return err.msg +} + +// Signature is a parsed content of a signature. +// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below. +type Signature struct { + DockerManifestDigest digest.Digest + DockerReference string // FIXME: more precise type? +} + +// untrustedSignature is a parsed content of a signature. +type untrustedSignature struct { + UntrustedDockerManifestDigest digest.Digest + UntrustedDockerReference string // FIXME: more precise type? + UntrustedCreatorID *string + // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, + // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). + // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, + // we would add another field, UntrustedTimestampNS int64. + UntrustedTimestamp *int64 +} + +// UntrustedSignatureInformation is information available in an untrusted signature. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +type UntrustedSignatureInformation struct { + UntrustedDockerManifestDigest digest.Digest + UntrustedDockerReference string // FIXME: more precise type? + UntrustedCreatorID *string + UntrustedTimestamp *time.Time + UntrustedShortKeyIdentifier string +} + +// newUntrustedSignature returns an untrustedSignature object with +// the specified primary contents and appropriate metadata. +func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature { + // Use intermediate variables for these values so that we can take their addresses. + // Golang guarantees that they will have a new address on every execution. + creatorID := "atomic " + version.Version + timestamp := time.Now().Unix() + return untrustedSignature{ + UntrustedDockerManifestDigest: dockerManifestDigest, + UntrustedDockerReference: dockerReference, + UntrustedCreatorID: &creatorID, + UntrustedTimestamp: ×tamp, + } +} + +// Compile-time check that untrustedSignature implements json.Marshaler +var _ json.Marshaler = (*untrustedSignature)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s untrustedSignature) MarshalJSON() ([]byte, error) { + if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" { + return nil, errors.New("Unexpected empty signature content") + } + critical := map[string]interface{}{ + "type": signatureType, + "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.UntrustedDockerReference}, + } + optional := map[string]interface{}{} + if s.UntrustedCreatorID != nil { + optional["creator"] = *s.UntrustedCreatorID + } + if s.UntrustedTimestamp != nil { + optional["timestamp"] = *s.UntrustedTimestamp + } + signature := map[string]interface{}{ + "critical": critical, + "optional": optional, + } + return json.Marshal(signature) +} + +// Compile-time check that untrustedSignature implements json.Unmarshaler +var _ json.Unmarshaler = (*untrustedSignature)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *untrustedSignature) UnmarshalJSON(data []byte) error { + err := s.strictUnmarshalJSON(data) + if err != nil { + if _, ok := err.(jsonFormatError); ok { + err = InvalidSignatureError{msg: err.Error()} + } + } + return err +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type. +// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller. +func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { + var critical, optional json.RawMessage + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "critical": &critical, + "optional": &optional, + }); err != nil { + return err + } + + var creatorID string + var timestamp float64 + var gotCreatorID, gotTimestamp = false, false + if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} { + switch key { + case "creator": + gotCreatorID = true + return &creatorID + case "timestamp": + gotTimestamp = true + return ×tamp + default: + var ignore interface{} + return &ignore + } + }); err != nil { + return err + } + if gotCreatorID { + s.UntrustedCreatorID = &creatorID + } + if gotTimestamp { + intTimestamp := int64(timestamp) + if float64(intTimestamp) != timestamp { + return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"} + } + s.UntrustedTimestamp = &intTimestamp + } + + var t string + var image, identity json.RawMessage + if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ + "type": &t, + "image": &image, + "identity": &identity, + }); err != nil { + return err + } + if t != signatureType { + return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)} + } + + var digestString string + if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ + "docker-manifest-digest": &digestString, + }); err != nil { + return err + } + s.UntrustedDockerManifestDigest = digest.Digest(digestString) + + return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ + "docker-reference": &s.UntrustedDockerReference, + }) +} + +// Sign formats the signature and returns a blob signed using mech and keyIdentity +// (If it seems surprising that this is a method on untrustedSignature, note that there +// isn’t a good reason to think that a key used by the user is trusted by any component +// of the system just because it is a private key — actually the presence of a private key +// on the system increases the likelihood of an a successful attack on that private key +// on that particular system.) +func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) { + json, err := json.Marshal(s) + if err != nil { + return nil, err + } + + return mech.Sign(json, keyIdentity) +} + +// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable. +// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies +// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature +// because the functions have the same or similar types, so there is a risk of exchanging the functions; +// named members of this struct are more explicit. +type signatureAcceptanceRules struct { + validateKeyIdentity func(string) error + validateSignedDockerReference func(string) error + validateSignedDockerManifestDigest func(digest.Digest) error +} + +// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components +// match expected values, both as specified by rules, and returns it +func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) { + signed, keyIdentity, err := mech.Verify(unverifiedSignature) + if err != nil { + return nil, err + } + if err := rules.validateKeyIdentity(keyIdentity); err != nil { + return nil, err + } + + var unmatchedSignature untrustedSignature + if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { + return nil, InvalidSignatureError{msg: err.Error()} + } + if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil { + return nil, err + } + if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil { + return nil, err + } + // signatureAcceptanceRules have accepted this value. + return &Signature{ + DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest, + DockerReference: unmatchedSignature.UntrustedDockerReference, + }, nil +} + +// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature, +// WITHOUT doing any cryptographic verification. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) { + // NOTE: This should eventualy do format autodetection. + mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) + if err != nil { + return nil, err + } + defer mech.Close() + + untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes) + if err != nil { + return nil, err + } + var untrustedDecodedContents untrustedSignature + if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil { + return nil, InvalidSignatureError{msg: err.Error()} + } + + var timestamp *time.Time // = nil + if untrustedDecodedContents.UntrustedTimestamp != nil { + ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0) + timestamp = &ts + } + return &UntrustedSignatureInformation{ + UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest, + UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference, + UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID, + UntrustedTimestamp: timestamp, + UntrustedShortKeyIdentifier: shortKeyIdentifier, + }, nil +} diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go similarity index 80% rename from vendor/github.com/containers/image/storage/storage_image.go rename to vendor/github.com/containers/image/v5/storage/storage_image.go index 946a85f7b1..2b89f329f5 100644 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -6,6 +6,7 @@ import ( "bytes" "context" "encoding/json" + stderrors "errors" "fmt" "io" "io/ioutil" @@ -14,12 +15,12 @@ import ( "sync" "sync/atomic" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/internal/tmpdir" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/blobinfocache/none" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" @@ -32,38 +33,38 @@ import ( var ( // ErrBlobDigestMismatch is returned when PutBlob() is given a blob // with a digest-based name that doesn't match its contents. - ErrBlobDigestMismatch = errors.New("blob digest mismatch") + ErrBlobDigestMismatch = stderrors.New("blob digest mismatch") // ErrBlobSizeMismatch is returned when PutBlob() is given a blob // with an expected size that doesn't match the reader. - ErrBlobSizeMismatch = errors.New("blob size mismatch") - // ErrNoManifestLists is returned when GetManifest() is called. - // with a non-nil instanceDigest. - ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") + ErrBlobSizeMismatch = stderrors.New("blob size mismatch") // ErrNoSuchImage is returned when we attempt to access an image which // doesn't exist in the storage area. ErrNoSuchImage = storage.ErrNotAnImage ) type storageImageSource struct { - imageRef storageReference - image *storage.Image - layerPosition map[digest.Digest]int // Where we are in reading a blob's layers - cachedManifest []byte // A cached copy of the manifest, if already known, or nil - getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + imageRef storageReference + image *storage.Image + layerPosition map[digest.Digest]int // Where we are in reading a blob's layers + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice } type storageImageDestination struct { - imageRef storageReference - directory string // Temporary directory where we store blobs until Commit() time - nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs - manifest []byte // Manifest contents, temporary - signatures []byte // Signature contents, temporary - putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions - blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs - fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes - filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + imageRef storageReference + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + signatures []byte // Signature contents, temporary + signatureses map[digest.Digest][]byte // Instance signature contents, temporary + putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice } type storageImageCloser struct { @@ -72,26 +73,33 @@ type storageImageCloser struct { } // manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions. -// If a specific manifest digest is explicitly requested by the user, the key retruned function should be used preferably; +// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; // for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey func manifestBigDataKey(digest digest.Digest) string { return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String() } +// signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions. +// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; +func signatureBigDataKey(digest digest.Digest) string { + return "signature-" + digest.Encoded() +} + // newImageSource sets up an image for reading. -func newImageSource(imageRef storageReference) (*storageImageSource, error) { +func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) { // First, locate the image. - img, err := imageRef.resolveImage() + img, err := imageRef.resolveImage(sys) if err != nil { return nil, err } // Build the reader object. image := &storageImageSource{ - imageRef: imageRef, - image: img, - layerPosition: make(map[digest.Digest]int), - SignatureSizes: []int{}, + imageRef: imageRef, + image: img, + layerPosition: make(map[digest.Digest]int), + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), } if img.Metadata != "" { if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { @@ -182,7 +190,12 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC // GetManifest() reads the image's manifest. func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { if instanceDigest != nil { - return nil, "", ErrNoManifestLists + key := manifestBigDataKey(*instanceDigest) + blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil { + return nil, "", errors.Wrapf(err, "error reading manifest for image instance %q", *instanceDigest) + } + return blob, manifest.GuessMIMEType(blob), err } if len(s.cachedManifest) == 0 { // The manifest is stored as a big data item. @@ -214,11 +227,14 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di // LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of // the image, after they've been decompressed. -func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - manifestBlob, manifestType, err := s.GetManifest(ctx, nil) +func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest) if err != nil { return nil, errors.Wrapf(err, "error reading image manifest for %q", s.image.ID) } + if manifest.MIMETypeIsMultiImage(manifestType) { + return nil, errors.Errorf("can't copy layers for a manifest list (shouldn't be attempted)") + } man, err := manifest.FromBlob(manifestBlob, manifestType) if err != nil { return nil, errors.Wrapf(err, "error parsing image manifest for %q", s.image.ID) @@ -292,25 +308,33 @@ func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos [] // GetSignatures() parses the image's signatures blob into a slice of byte slices. func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) { - if instanceDigest != nil { - return nil, ErrNoManifestLists - } var offset int sigslice := [][]byte{} signature := []byte{} - if len(s.SignatureSizes) > 0 { - signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, "signatures") + signatureSizes := s.SignatureSizes + key := "signatures" + instance := "default instance" + if instanceDigest != nil { + signatureSizes = s.SignaturesSizes[*instanceDigest] + key = signatureBigDataKey(*instanceDigest) + instance = instanceDigest.Encoded() + } + if len(signatureSizes) > 0 { + signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) if err != nil { - return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.image.ID) + return nil, errors.Wrapf(err, "error looking up signatures data for image %q (%s)", s.image.ID, instance) } signature = signatureBlob } - for _, length := range s.SignatureSizes { + for _, length := range signatureSizes { + if offset+length > len(signature) { + return nil, errors.Wrapf(err, "error looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signature), offset+length) + } sigslice = append(sigslice, signature[offset:offset+length]) offset += length } if offset != len(signature) { - return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) + return nil, errors.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatures)-offset) } return sigslice, nil } @@ -323,12 +347,14 @@ func newImageDestination(imageRef storageReference) (*storageImageDestination, e return nil, errors.Wrapf(err, "error creating a temporary directory") } image := &storageImageDestination{ - imageRef: imageRef, - directory: directory, - blobDiffIDs: make(map[digest.Digest]digest.Digest), - fileSizes: make(map[digest.Digest]int64), - filenames: make(map[digest.Digest]string), - SignatureSizes: []int{}, + imageRef: imageRef, + directory: directory, + signatureses: make(map[digest.Digest][]byte), + blobDiffIDs: make(map[digest.Digest]digest.Digest), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), } return image, nil } @@ -345,9 +371,9 @@ func (s *storageImageDestination) Close() error { } func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression { - // We ultimately have to decompress layers to populate trees on disk, - // so callers shouldn't bother compressing them before handing them to - // us, if they're not already compressed. + // We ultimately have to decompress layers to populate trees on disk + // and need to explicitly ask for it here, so that the layers' MIME + // types can be set accordingly. return types.PreserveOriginal } @@ -404,10 +430,10 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, } // Ensure that any information that we were given about the blob is correct. if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() { - return errorBlobInfo, ErrBlobDigestMismatch + return errorBlobInfo, errors.WithStack(ErrBlobDigestMismatch) } if blobinfo.Size >= 0 && blobinfo.Size != counter.Count { - return errorBlobInfo, ErrBlobSizeMismatch + return errorBlobInfo, errors.WithStack(ErrBlobSizeMismatch) } // Record information about the blob. s.putBlobMutex.Lock() @@ -579,7 +605,34 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er return nil, errors.New("blob not found") } -func (s *storageImageDestination) Commit(ctx context.Context) error { +func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + if len(s.manifest) == 0 { + return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") + } + toplevelManifest, _, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return errors.Wrapf(err, "error retrieving top-level manifest") + } + // If the name we're saving to includes a digest, then check that the + // manifests that we're about to save all either match the one from the + // unparsedToplevel, or match the digest in the name that we're using. + if s.imageRef.named != nil { + if digested, ok := s.imageRef.named.(reference.Digested); ok { + matches, err := manifest.MatchesDigest(s.manifest, digested.Digest()) + if err != nil { + return err + } + if !matches { + matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest()) + if err != nil { + return err + } + } + if !matches { + return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest()) + } + } + } // Find the list of layer blobs. if len(s.manifest) == 0 { return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") @@ -747,7 +800,8 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID) } } - // Set the reference's name on the image. + // Set the reference's name on the image. We don't need to worry about avoiding duplicate + // values because SetNames() will deduplicate the list that we pass to it. if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { names := []string{} if name != nil { @@ -765,26 +819,43 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { } logrus.Debugf("set names of image %q to %v", img.ID, names) } - // Save the manifest. Allow looking it up by digest by using the key convention defined by the Store. + // Save the unparsedToplevel's manifest. + if len(toplevelManifest) != 0 { + manifestDigest, err := manifest.Digest(toplevelManifest) + if err != nil { + return errors.Wrapf(err, "error digesting top-level manifest") + } + key := manifestBigDataKey(manifestDigest) + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err) + return errors.Wrapf(err, "error saving top-level manifest for image %q", img.ID) + } + } + // Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store. // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance, // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers. manifestDigest, err := manifest.Digest(s.manifest) if err != nil { return errors.Wrapf(err, "error computing manifest digest") } - if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest, manifest.Digest); err != nil { + key := manifestBigDataKey(manifestDigest) + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) - return err + return errors.Wrapf(err, "error saving manifest for image %q", img.ID) } - if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest, manifest.Digest); err != nil { + key = storage.ImageDigestBigDataKey + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) - return err + return errors.Wrapf(err, "error saving manifest for image %q", img.ID) } // Save the signatures, if we have any. if len(s.signatures) > 0 { @@ -793,7 +864,17 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) - return err + return errors.Wrapf(err, "error saving signatures for image %q", img.ID) + } + } + for instanceDigest, signatures := range s.signatureses { + key := signatureBigDataKey(instanceDigest) + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return errors.Wrapf(err, "error saving signatures for image %q", img.ID) } } // Save our metadata. @@ -803,7 +884,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) - return err + return errors.Wrapf(err, "error encoding metadata for image %q", img.ID) } if len(metadata) != 0 { if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { @@ -811,7 +892,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) - return err + return errors.Wrapf(err, "error saving metadata for image %q", img.ID) } logrus.Debugf("saved image metadata %q", string(metadata)) } @@ -830,21 +911,10 @@ func (s *storageImageDestination) SupportedManifestMIMETypes() []string { } // PutManifest writes the manifest to the destination. -func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error { - if s.imageRef.named != nil { - if digested, ok := s.imageRef.named.(reference.Digested); ok { - matches, err := manifest.MatchesDigest(manifestBlob, digested.Digest()) - if err != nil { - return err - } - if !matches { - return fmt.Errorf("Manifest does not match expected digest %s", digested.Digest()) - } - } - } - - s.manifest = make([]byte, len(manifestBlob)) - copy(s.manifest, manifestBlob) +func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error { + newBlob := make([]byte, len(manifestBlob)) + copy(newBlob, manifestBlob) + s.manifest = newBlob return nil } @@ -873,7 +943,7 @@ func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool { } // PutSignatures records the image's signatures for committing as a single data blob. -func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { +func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { sizes := []int{} sigblob := []byte{} for _, sig := range signatures { @@ -883,8 +953,21 @@ func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures copy(newblob[len(sigblob):], sig) sigblob = newblob } - s.signatures = sigblob - s.SignatureSizes = sizes + if instanceDigest == nil { + s.signatures = sigblob + s.SignatureSizes = sizes + } + if instanceDigest == nil && len(s.manifest) > 0 { + manifestDigest, err := manifest.Digest(s.manifest) + if err != nil { + return err + } + instanceDigest = &manifestDigest + } + if instanceDigest != nil { + s.signatureses[*instanceDigest] = sigblob + s.SignaturesSizes[*instanceDigest] = sizes + } return nil } @@ -940,7 +1023,7 @@ func (s *storageImageCloser) Size() (int64, error) { // newImage creates an image that also knows its size func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) { - src, err := newImageSource(s) + src, err := newImageSource(ctx, sys, s) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go similarity index 69% rename from vendor/github.com/containers/image/storage/storage_reference.go rename to vendor/github.com/containers/image/v5/storage/storage_reference.go index c046d9f22e..4e137ad1b1 100644 --- a/vendor/github.com/containers/image/storage/storage_reference.go +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -6,9 +6,12 @@ import ( "context" "strings" - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -51,9 +54,79 @@ func imageMatchesRepo(image *storage.Image, ref reference.Named) bool { return false } +// imageMatchesSystemContext checks if the passed-in image both contains a +// manifest that matches the passed-in digest, and identifies itself as being +// appropriate for running on the system that matches sys. +// If we somehow ended up sharing the same storage among multiple types of +// systems, and managed to download multiple images from the same manifest +// list, their image records will all contain copies of the manifest list, and +// this check will help us decide which of them we want to return when we've +// been asked to resolve an image reference that uses the list's digest to a +// specific image ID. +func imageMatchesSystemContext(store storage.Store, img *storage.Image, manifestDigest digest.Digest, sys *types.SystemContext) bool { + // First, check if the image record has a manifest that matches the + // specified digest. + key := manifestBigDataKey(manifestDigest) + manifestBytes, err := store.ImageBigData(img.ID, key) + if err != nil { + return false + } + // The manifest is either a list, or not a list. If it's a list, find + // the digest of the instance that matches the current system, and try + // to load that manifest from the image record, and use it. + manifestType := manifest.GuessMIMEType(manifestBytes) + if manifest.MIMETypeIsMultiImage(manifestType) { + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return false + } + manifestDigest, err = list.ChooseInstance(sys) + if err != nil { + return false + } + key = manifestBigDataKey(manifestDigest) + manifestBytes, err = store.ImageBigData(img.ID, key) + if err != nil { + return false + } + manifestType = manifest.GuessMIMEType(manifestBytes) + } + // Load the image's configuration blob. + m, err := manifest.FromBlob(manifestBytes, manifestType) + getConfig := func(blobInfo types.BlobInfo) ([]byte, error) { + return store.ImageBigData(img.ID, blobInfo.Digest.String()) + } + ii, err := m.Inspect(getConfig) + if err != nil { + return false + } + // Build a dummy index containing one instance and information about + // the image's target system from the image's configuration. + index := manifest.OCI1IndexFromComponents([]imgspecv1.Descriptor{{ + MediaType: imgspecv1.MediaTypeImageManifest, + Digest: manifestDigest, + Size: int64(len(manifestBytes)), + Platform: &imgspecv1.Platform{ + OS: ii.Os, + Architecture: ii.Architecture, + }, + }}, nil) + // Check that ChooseInstance() would select this image for this system, + // from a list of images. + instanceDigest, err := index.ChooseInstance(sys) + if err != nil { + return false + } + // Double-check that we can read the runnable image's manifest from the + // image record. + key = manifestBigDataKey(instanceDigest) + _, err = store.ImageBigData(img.ID, key) + return err == nil +} + // Resolve the reference's name to an image ID in the store, if there's already // one present with the same name or ID, and return the image. -func (s *storageReference) resolveImage() (*storage.Image, error) { +func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Image, error) { var loadedImage *storage.Image if s.id == "" && s.named != nil { // Look for an image that has the expanded reference name as an explicit Name value. @@ -72,9 +145,10 @@ func (s *storageReference) resolveImage() (*storage.Image, error) { if err == nil && len(images) > 0 { for _, image := range images { if imageMatchesRepo(image, s.named) { - loadedImage = image - s.id = image.ID - break + if loadedImage == nil || imageMatchesSystemContext(s.transport.store, image, digested.Digest(), sys) { + loadedImage = image + s.id = image.ID + } } } } @@ -202,7 +276,7 @@ func (s storageReference) NewImage(ctx context.Context, sys *types.SystemContext } func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - img, err := s.resolveImage() + img, err := s.resolveImage(sys) if err != nil { return err } @@ -217,7 +291,7 @@ func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemCont } func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(s) + return newImageSource(ctx, sys, s) } func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go similarity index 98% rename from vendor/github.com/containers/image/storage/storage_transport.go rename to vendor/github.com/containers/image/v5/storage/storage_transport.go index c9a05e6c01..62a091da4c 100644 --- a/vendor/github.com/containers/image/storage/storage_transport.go +++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go @@ -7,14 +7,13 @@ import ( "path/filepath" "strings" - "github.com/pkg/errors" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -288,7 +287,7 @@ func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageRefe } if sref, ok := ref.(*storageReference); ok { tmpRef := *sref - if img, err := tmpRef.resolveImage(); err == nil { + if img, err := tmpRef.resolveImage(&types.SystemContext{}); err == nil { return img, nil } } diff --git a/vendor/github.com/containers/image/tarball/doc.go b/vendor/github.com/containers/image/v5/tarball/doc.go similarity index 84% rename from vendor/github.com/containers/image/tarball/doc.go rename to vendor/github.com/containers/image/v5/tarball/doc.go index a6ced5a0ee..ead2d4263d 100644 --- a/vendor/github.com/containers/image/tarball/doc.go +++ b/vendor/github.com/containers/image/v5/tarball/doc.go @@ -7,11 +7,10 @@ // import ( // "fmt" // -// cp "github.com/containers/image/copy" -// "github.com/containers/image/tarball" -// "github.com/containers/image/transports/alltransports" -// -// imgspecv1 "github.com/containers/image/transports/alltransports" +// cp "github.com/containers/image/v5/copy" +// "github.com/containers/image/v5/tarball" +// "github.com/containers/image/v5/transports/alltransports" +// imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" // ) // // func imageFromTarball() { diff --git a/vendor/github.com/containers/image/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go similarity index 95% rename from vendor/github.com/containers/image/tarball/tarball_reference.go rename to vendor/github.com/containers/image/v5/tarball/tarball_reference.go index fc1230a896..00150c53bb 100644 --- a/vendor/github.com/containers/image/tarball/tarball_reference.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go @@ -6,9 +6,9 @@ import ( "os" "strings" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go similarity index 91% rename from vendor/github.com/containers/image/tarball/tarball_src.go rename to vendor/github.com/containers/image/v5/tarball/tarball_src.go index 76e3e755ff..694ad17bd1 100644 --- a/vendor/github.com/containers/image/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -12,7 +12,7 @@ import ( "strings" "time" - "github.com/containers/image/types" + "github.com/containers/image/v5/types" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" imgspecs "github.com/opencontainers/image-spec/specs-go" @@ -248,9 +248,8 @@ func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *d } // GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as there can be no secondary manifests. func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { if instanceDigest != nil { return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName) @@ -262,7 +261,14 @@ func (is *tarballImageSource) Reference() types.ImageReference { return &is.reference } -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (*tarballImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (*tarballImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { return nil, nil } diff --git a/vendor/github.com/containers/image/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go similarity index 94% rename from vendor/github.com/containers/image/tarball/tarball_transport.go rename to vendor/github.com/containers/image/v5/tarball/tarball_transport.go index 72558b5e83..113545cb79 100644 --- a/vendor/github.com/containers/image/tarball/tarball_transport.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go @@ -7,8 +7,8 @@ import ( "os" "strings" - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" ) const ( diff --git a/vendor/github.com/containers/image/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go similarity index 75% rename from vendor/github.com/containers/image/transports/alltransports/alltransports.go rename to vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go index 3a988f3f8d..2110a091d2 100644 --- a/vendor/github.com/containers/image/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -6,17 +6,17 @@ import ( // register all known transports // NOTE: Make sure docs/containers-policy.json.5.md is updated when adding or updating // a transport. - _ "github.com/containers/image/directory" - _ "github.com/containers/image/docker" - _ "github.com/containers/image/docker/archive" - _ "github.com/containers/image/oci/archive" - _ "github.com/containers/image/oci/layout" - _ "github.com/containers/image/openshift" - _ "github.com/containers/image/tarball" + _ "github.com/containers/image/v5/directory" + _ "github.com/containers/image/v5/docker" + _ "github.com/containers/image/v5/docker/archive" + _ "github.com/containers/image/v5/oci/archive" + _ "github.com/containers/image/v5/oci/layout" + _ "github.com/containers/image/v5/openshift" + _ "github.com/containers/image/v5/tarball" // The ostree transport is registered by ostree*.go // The storage transport is registered by storage*.go - "github.com/containers/image/transports" - "github.com/containers/image/types" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/image/transports/alltransports/docker_daemon.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go similarity index 71% rename from vendor/github.com/containers/image/transports/alltransports/docker_daemon.go rename to vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go index 6d2ba4b304..82224052e6 100644 --- a/vendor/github.com/containers/image/transports/alltransports/docker_daemon.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go @@ -4,5 +4,5 @@ package alltransports import ( // Register the docker-daemon transport - _ "github.com/containers/image/docker/daemon" + _ "github.com/containers/image/v5/docker/daemon" ) diff --git a/vendor/github.com/containers/image/transports/alltransports/docker_daemon_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go similarity index 75% rename from vendor/github.com/containers/image/transports/alltransports/docker_daemon_stub.go rename to vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go index 27f3850f1c..d137007991 100644 --- a/vendor/github.com/containers/image/transports/alltransports/docker_daemon_stub.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go @@ -2,7 +2,7 @@ package alltransports -import "github.com/containers/image/transports" +import "github.com/containers/image/v5/transports" func init() { transports.Register(transports.NewStubTransport("docker-daemon")) diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go similarity index 71% rename from vendor/github.com/containers/image/transports/alltransports/ostree.go rename to vendor/github.com/containers/image/v5/transports/alltransports/ostree.go index cc4d69fe89..72432d1ef8 100644 --- a/vendor/github.com/containers/image/transports/alltransports/ostree.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go @@ -4,5 +4,5 @@ package alltransports import ( // Register the ostree transport - _ "github.com/containers/image/ostree" + _ "github.com/containers/image/v5/ostree" ) diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go similarity index 73% rename from vendor/github.com/containers/image/transports/alltransports/ostree_stub.go rename to vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go index fb5b96e549..f4a862bd4e 100644 --- a/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go @@ -2,7 +2,7 @@ package alltransports -import "github.com/containers/image/transports" +import "github.com/containers/image/v5/transports" func init() { transports.Register(transports.NewStubTransport("ostree")) diff --git a/vendor/github.com/containers/image/transports/alltransports/storage.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go similarity index 71% rename from vendor/github.com/containers/image/transports/alltransports/storage.go rename to vendor/github.com/containers/image/v5/transports/alltransports/storage.go index a867c66446..7041eb876a 100644 --- a/vendor/github.com/containers/image/transports/alltransports/storage.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go @@ -4,5 +4,5 @@ package alltransports import ( // Register the storage transport - _ "github.com/containers/image/storage" + _ "github.com/containers/image/v5/storage" ) diff --git a/vendor/github.com/containers/image/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go similarity index 75% rename from vendor/github.com/containers/image/transports/alltransports/storage_stub.go rename to vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go index 4ac684e58f..67f0291cc0 100644 --- a/vendor/github.com/containers/image/transports/alltransports/storage_stub.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go @@ -2,7 +2,7 @@ package alltransports -import "github.com/containers/image/transports" +import "github.com/containers/image/v5/transports" func init() { transports.Register(transports.NewStubTransport("containers-storage")) diff --git a/vendor/github.com/containers/image/v5/transports/stub.go b/vendor/github.com/containers/image/v5/transports/stub.go new file mode 100644 index 0000000000..2c186a90cc --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/stub.go @@ -0,0 +1,36 @@ +package transports + +import ( + "fmt" + + "github.com/containers/image/v5/types" +) + +// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. +type stubTransport string + +// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. +func NewStubTransport(name string) types.ImageTransport { + return stubTransport(name) +} + +// Name returns the name of the transport, which must be unique among other transports. +func (s stubTransport) Name() string { + return string(s) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) { + return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s)) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error { + // Allowing any reference in here allows tools with some transports stubbed-out to still + // use signature verification policies which refer to these stubbed-out transports. + // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON . + return nil +} diff --git a/vendor/github.com/containers/image/v5/transports/transports.go b/vendor/github.com/containers/image/v5/transports/transports.go new file mode 100644 index 0000000000..46ee3710fc --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/transports.go @@ -0,0 +1,90 @@ +package transports + +import ( + "fmt" + "sort" + "sync" + + "github.com/containers/image/v5/types" +) + +// knownTransports is a registry of known ImageTransport instances. +type knownTransports struct { + transports map[string]types.ImageTransport + mu sync.Mutex +} + +func (kt *knownTransports) Get(k string) types.ImageTransport { + kt.mu.Lock() + t := kt.transports[k] + kt.mu.Unlock() + return t +} + +func (kt *knownTransports) Remove(k string) { + kt.mu.Lock() + delete(kt.transports, k) + kt.mu.Unlock() +} + +func (kt *knownTransports) Add(t types.ImageTransport) { + kt.mu.Lock() + defer kt.mu.Unlock() + name := t.Name() + if t := kt.transports[name]; t != nil { + panic(fmt.Sprintf("Duplicate image transport name %s", name)) + } + kt.transports[name] = t +} + +var kt *knownTransports + +func init() { + kt = &knownTransports{ + transports: make(map[string]types.ImageTransport), + } +} + +// Get returns the transport specified by name or nil when unavailable. +func Get(name string) types.ImageTransport { + return kt.Get(name) +} + +// Delete deletes a transport from the registered transports. +func Delete(name string) { + kt.Remove(name) +} + +// Register registers a transport. +func Register(t types.ImageTransport) { + kt.Add(t) +} + +// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that +// ParseImageName(ImageName(reference)) returns an equivalent reference. +// +// This is the generally recommended way to refer to images in the UI. +// +// NOTE: The returned string is not promised to be equal to the original input to ParseImageName; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +func ImageName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.StringWithinTransport() +} + +// ListNames returns a list of non deprecated transport names. +// Deprecated transports can be used, but are not presented to users. +func ListNames() []string { + kt.mu.Lock() + defer kt.mu.Unlock() + deprecated := map[string]bool{ + "atomic": true, + } + var names []string + for _, transport := range kt.transports { + if !deprecated[transport.Name()] { + names = append(names, transport.Name()) + } + } + sort.Strings(names) + return names +} diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go new file mode 100644 index 0000000000..2db8c78273 --- /dev/null +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -0,0 +1,555 @@ +package types + +import ( + "context" + "io" + "time" + + "github.com/containers/image/v5/docker/reference" + compression "github.com/containers/image/v5/pkg/compression/types" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageTransport is a top-level namespace for ways to to store/load an image. +// It should generally correspond to ImageSource/ImageDestination implementations. +// +// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. +// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS +// (or, even, IPv4 or IPv6). +// +// OTOH all images using the same transport should (apart from versions of the image format), be interoperable. +// For example, several different ImageTransport implementations may be based on local filesystem paths, +// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...) +// +// See also transports.KnownTransports. +type ImageTransport interface { + // Name returns the name of the transport, which must be unique among other transports. + Name() string + // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. + ParseReference(reference string) (ImageReference, error) + // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys + // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). + // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. + // scope passed to this function will not be "", that value is always allowed. + ValidatePolicyConfigurationScope(scope string) error +} + +// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport. +// +// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening +// within an ImageTransport.ParseReference() or equivalent API creating the reference object. +// That's also why the various identification/formatting methods of this type do not support returning errors. +// +// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside +// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on. +type ImageReference interface { + Transport() ImageTransport + // StringWithinTransport returns a string representation of the reference, which MUST be such that + // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. + // NOTE: The returned string is not promised to be equal to the original input to ParseReference; + // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. + // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; + // instead, see transports.ImageName(). + StringWithinTransport() string + + // DockerReference returns a Docker reference associated with this reference + // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, + // not e.g. after redirect or alias processing), or nil if unknown/not applicable. + DockerReference() reference.Named + + // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. + // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; + // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical + // (i.e. various references with exactly the same semantics should return the same configuration identity) + // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but + // not required/guaranteed that it will be a valid input to Transport().ParseReference(). + // Returns "" if configuration identities for these references are not supported. + PolicyConfigurationIdentity() string + + // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search + // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed + // in order, terminating on first match, and an implicit "" is always checked at the end. + // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), + // and each following element to be a prefix of the element preceding it. + PolicyConfigurationNamespaces() []string + + // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. + // The caller must call .Close() on the returned ImageCloser. + // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, + // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. + // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. + NewImage(ctx context.Context, sys *SystemContext) (ImageCloser, error) + // NewImageSource returns a types.ImageSource for this reference. + // The caller must call .Close() on the returned ImageSource. + NewImageSource(ctx context.Context, sys *SystemContext) (ImageSource, error) + // NewImageDestination returns a types.ImageDestination for this reference. + // The caller must call .Close() on the returned ImageDestination. + NewImageDestination(ctx context.Context, sys *SystemContext) (ImageDestination, error) + + // DeleteImage deletes the named image from the registry, if supported. + DeleteImage(ctx context.Context, sys *SystemContext) error +} + +// LayerCompression indicates if layers must be compressed, decompressed or preserved +type LayerCompression int + +const ( + // PreserveOriginal indicates the layer must be preserved, ie + // no compression or decompression. + PreserveOriginal LayerCompression = iota + // Decompress indicates the layer must be decompressed + Decompress + // Compress indicates the layer must be compressed + Compress +) + +// BlobInfo collects known information about a blob (layer/config). +// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. +type BlobInfo struct { + Digest digest.Digest // "" if unknown. + Size int64 // -1 if unknown + URLs []string + Annotations map[string]string + MediaType string + // CompressionOperation is used in Image.UpdateLayerInfos to instruct + // whether the original layer should be preserved or (de)compressed. The + // field defaults to preserve the original layer. + CompressionOperation LayerCompression + // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct + // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be + // set when `CompressionOperation == Compress`. + CompressionAlgorithm *compression.Algorithm +} + +// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. +// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest). +// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICTransportScope struct { + Opaque string +} + +// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope. +// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob +// can look it up using BlobInfoCache.CandidateLocations. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICLocationReference struct { + Opaque string +} + +// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations. +type BICReplacementCandidate struct { + Digest digest.Digest + Location BICLocationReference +} + +// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies. +// +// It records two kinds of data: +// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: +// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. +// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion), +// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ +// +// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known +// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). +// +// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently +// compress/decompress blobs for their own purposes. +// +// - Known blob locations, managed by individual transports: +// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), +// recording transport-specific information that allows the transport to reuse the blob in the future; +// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. +// +// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs +// can be directly reused within a registry, or mounted across registries within a registry server.) +// +// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; +// users of the cahce should just fall back to copying the blobs the usual way. +type BlobInfoCache interface { + // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. + // May return anyDigest if it is known to be uncompressed. + // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). + UncompressedDigest(anyDigest digest.Digest) digest.Digest + // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. + // It’s allowed for anyDigest == uncompressed. + // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. + // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. + // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) + RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) + + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, + // and can be reused given the opaque location data. + RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference) + // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused + // within the specified (transport scope) (if they still exist, which is not guaranteed). + // + // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, + // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same + // uncompressed digest. + CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate +} + +// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). +// This is primarily useful for copying images around; for examining their properties, Image (below) +// is usually more useful. +// Each ImageSource should eventually be closed by calling Close(). +// +// WARNING: Various methods which return an object identified by digest generally do not +// validate that the returned data actually matches that digest; this is the caller’s responsibility. +type ImageSource interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Close removes resources associated with an initialized ImageSource, if any. + Close() error + // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). + // It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); + // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). + GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). + // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. + GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) + // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. + HasThreadSafeGetBlob() bool + // GetSignatures returns the image's signatures. It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer + // blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() + // to read the image's layers. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]BlobInfo, error) +} + +// ImageDestination is a service, possibly remote (= slow), to store components of a single image. +// +// There is a specific required order for some of the calls: +// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) +// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) +// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. +// +// Each ImageDestination should eventually be closed by calling Close(). +type ImageDestination interface { + // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, + // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. + Reference() ImageReference + // Close removes resources associated with an initialized ImageDestination, if any. + Close() error + + // SupportedManifestMIMETypes tells which manifest mime types the destination supports + // If an empty slice or nil it's returned, then any mime type can be tried to upload + SupportedManifestMIMETypes() []string + // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. + // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. + SupportsSignatures(ctx context.Context) error + // DesiredLayerCompression indicates the kind of compression to apply on layers + DesiredLayerCompression() LayerCompression + // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually + // uploaded to the image destination, true otherwise. + AcceptsForeignLayerURLs() bool + // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. + MustMatchRuntimeOS() bool + // IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), + // and would prefer to receive an unmodified manifest instead of one modified for the destination. + // Does not make a difference if Reference().DockerReference() is nil. + IgnoresEmbeddedDockerReference() bool + + // PutBlob writes contents of stream and returns data representing the result. + // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. + // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. + // May update cache. + // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available + // to any other readers for download using the supplied digest. + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. + PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) + // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. + HasThreadSafePutBlob() bool + // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination + // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). + // info.Digest must not be empty. + // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. + // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. + // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. + // May use and/or update cache. + TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) + // PutManifest writes manifest to the destination. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for + // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. + // It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated + // by `manifest.Digest()`. + // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. + // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), + // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. + PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error + // PutSignatures writes a set of signatures to the destination. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for + // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. + // MUST be called after PutManifest (signatures may reference manifest contents). + PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error + // Commit marks the process of storing the image as successful and asks for the image to be persisted. + // WARNING: This does not have any transactional semantics: + // - Uploaded data MAY be visible to others before Commit() is called + // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) + Commit(ctx context.Context, unparsedToplevel UnparsedImage) error +} + +// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, +// refuses specifically this manifest type, but may accept a different manifest type. +type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. + Err error +} + +func (e ManifestTypeRejectedError) Error() string { + return e.Err.Error() +} + +// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. +// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, +// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. +// This also makes the UnparsedImage→Image conversion an explicitly visible step. +// +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +type UnparsedImage interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. + Manifest(ctx context.Context) ([]byte, string, error) + // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. + Signatures(ctx context.Context) ([][]byte, error) +} + +// Image is the primary API for inspecting properties of images. +// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The Image must not be used after the underlying ImageSource is Close()d. +type Image interface { + // Note that Reference may return nil in the return value of UpdatedImage! + UnparsedImage + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob(context.Context) ([]byte, error) + // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about + // layers in the resulting configuration isn't guaranteed to be returned to due how + // old image manifests work (docker v2s1 especially). + OCIConfig(context.Context) (*v1.Image, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []BlobInfo + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(context.Context) ([]BlobInfo, error) + // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. + // It returns false if the manifest does not embed a Docker reference. + // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) + EmbeddedDockerReferenceConflicts(ref reference.Named) bool + // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. + Inspect(context.Context) (*ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. + // This does not change the state of the original Image object. + UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) + // Size returns an approximation of the amount of disk space which is consumed by the image in its current + // location. If the size is not known, -1 will be returned. + Size() (int64, error) +} + +// ImageCloser is an Image with a Close() method which must be called by the user. +// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, +// to ensure that the ImageSource is closed. +type ImageCloser interface { + Image + // Close removes resources associated with an initialized ImageCloser. + Close() error +} + +// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest +type ManifestUpdateOptions struct { + LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. + EmbeddedDockerReference reference.Named + ManifestMIMEType string + // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. + InformationOnly ManifestUpdateInformation +} + +// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here +// only to make writing struct literals possible. +type ManifestUpdateInformation struct { + Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) + LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) + LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. +} + +// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. +// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported +// for other manifest types. +type ImageInspectInfo struct { + Tag string + Created *time.Time + DockerVersion string + Labels map[string]string + Architecture string + Os string + Layers []string + Env []string +} + +// DockerAuthConfig contains authorization information for connecting to a registry. +// the value of Username and Password can be empty for accessing the registry anonymously +type DockerAuthConfig struct { + Username string + Password string +} + +// OptionalBool is a boolean with an additional undefined value, which is meant +// to be used in the context of user input to distinguish between a +// user-specified value and a default value. +type OptionalBool byte + +const ( + // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written. + OptionalBoolUndefined OptionalBool = iota + // OptionalBoolTrue represents the boolean true. + OptionalBoolTrue + // OptionalBoolFalse represents the boolean false. + OptionalBoolFalse +) + +// NewOptionalBool converts the input bool into either OptionalBoolTrue or +// OptionalBoolFalse. The function is meant to avoid boilerplate code of users. +func NewOptionalBool(b bool) OptionalBool { + o := OptionalBoolFalse + if b == true { + o = OptionalBoolTrue + } + return o +} + +// SystemContext allows parameterizing access to implicitly-accessed resources, +// like configuration files in /etc and users' login state in their home directory. +// Various components can share the same field only if their semantics is exactly +// the same; if in doubt, add a new field. +// It is always OK to pass nil instead of a SystemContext. +type SystemContext struct { + // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/). + // Not used for any of the more specific path overrides available in this struct. + // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it). + // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths . + // and there is no need to worry about the environment.) + // NOTE: This does NOT affect paths starting by $HOME. + RootForImplicitAbsolutePaths string + + // === Global configuration overrides === + // If not "", overrides the system's default path for signature.Policy configuration. + SignaturePolicyPath string + // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) + RegistriesDirPath string + // Path to the system-wide registries configuration file + SystemRegistriesConfPath string + // If not "", overrides the default path for the authentication file, but only new format files + AuthFilePath string + // if not "", overrides the default path for the authentication file, but with the legacy format; + // the code currently will by default look for legacy format files like .dockercfg in the $HOME dir; + // but in addition to the home dir, openshift may mount .dockercfg files (via secret mount) + // in locations other than the home dir; openshift components should then set this field in those cases; + // this field is ignored if `AuthFilePath` is set (we favor the newer format); + // only reading of this data is supported; + LegacyFormatAuthFilePath string + // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. + ArchitectureChoice string + // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. + OSChoice string + // If not "", overrides the system's default directory containing a blob info cache. + BlobInfoCacheDir string + + // Additional tags when creating or copying a docker-archive. + DockerArchiveAdditionalTags []reference.NamedTagged + + // === OCI.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client ceritificate key + // (ending with ".key") used when downloading OCI image layers. + OCICertPath string + // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + OCIInsecureSkipTLSVerify bool + // If not "", use a shared directory for storing blobs rather than within OCI layouts + OCISharedBlobDirPath string + // Allow UnCompress image layer for OCI image layer + OCIAcceptUncompressedLayers bool + + // === docker.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client ceritificate key + // (ending with ".key") used when talking to a Docker Registry. + DockerCertPath string + // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. + // Ignored if DockerCertPath is non-empty. + DockerPerHostCertDirPath string + // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + DockerInsecureSkipTLSVerify OptionalBool + // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials + DockerAuthConfig *DockerAuthConfig + // if not "", an User-Agent header is added to each request when contacting a registry. + DockerRegistryUserAgent string + // if true, a V1 ping attempt isn't done to give users a better error. Default is false. + // Note that this field is used mainly to integrate containers/image into projectatomic/docker + // in order to not break any existing docker's integration tests. + DockerDisableV1Ping bool + // Directory to use for OSTree temporary files + OSTreeTmpDirPath string + + // === docker/daemon.Transport overrides === + // A directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when talking to a Docker daemon. + DockerDaemonCertPath string + // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. + DockerDaemonHost string + // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. + DockerDaemonInsecureSkipTLSVerify bool + + // === dir.Transport overrides === + // DirForceCompress compresses the image layers if set to true + DirForceCompress bool + + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *compression.Algorithm + // CompressionLevel specifies what compression level is used + CompressionLevel *int +} + +// ProgressProperties is used to pass information from the copy code to a monitor which +// can use the real-time information to produce output or react to changes. +type ProgressProperties struct { + Artifact BlobInfo + Offset uint64 +} diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go new file mode 100644 index 0000000000..572be2b890 --- /dev/null +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -0,0 +1,18 @@ +package version + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 5 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 0 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 0 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/containers/libpod/cmd/podman/cliconfig/config.go b/vendor/github.com/containers/libpod/cmd/podman/cliconfig/config.go index d5098ee51f..58d67ddc14 100644 --- a/vendor/github.com/containers/libpod/cmd/podman/cliconfig/config.go +++ b/vendor/github.com/containers/libpod/cmd/podman/cliconfig/config.go @@ -1,6 +1,8 @@ package cliconfig import ( + "net" + "github.com/spf13/cobra" ) @@ -39,6 +41,9 @@ type MainFlags struct { VarlinkAddress string ConnectionName string RemoteConfigFilePath string + Port int + IdentityFile string + IgnoreHosts bool } type AttachValues struct { @@ -156,6 +161,7 @@ type GenerateKubeValues struct { type GenerateSystemdValues struct { PodmanCommand Name bool + Files bool RestartPolicy string StopTimeout int } @@ -245,7 +251,7 @@ type LogsValues struct { Details bool Follow bool Since string - Tail uint64 + Tail int64 Timestamps bool Latest bool } @@ -258,6 +264,33 @@ type MountValues struct { Latest bool } +type NetworkCreateValues struct { + PodmanCommand + Driver string + DisableDNS bool + Gateway net.IP + Internal bool + IPamDriver string + IPRange net.IPNet + IPV6 bool + Network net.IPNet +} + +type NetworkListValues struct { + PodmanCommand + Filter []string + Quiet bool +} + +type NetworkRmValues struct { + PodmanCommand + Force bool +} + +type NetworkInspectValues struct { + PodmanCommand +} + type PauseValues struct { PodmanCommand All bool @@ -286,6 +319,7 @@ type PodCreateValues struct { LabelFile []string Labels []string Name string + Hostname string PodIDFile string Publish []string Share string @@ -397,6 +431,8 @@ type PullValues struct { Authfile string CertDir string Creds string + OverrideArch string + OverrideOS string Quiet bool SignaturePolicy string TlsVerify bool @@ -408,6 +444,7 @@ type PushValues struct { CertDir string Compress bool Creds string + Digestfile string Format string Quiet bool RemoveSignatures bool @@ -488,10 +525,15 @@ type SearchValues struct { TlsVerify bool } +type TrustValues struct { + PodmanCommand +} + type SignValues struct { PodmanCommand Directory string SignBy string + CertDir string } type StartValues struct { @@ -612,6 +654,7 @@ type SystemRenumberValues struct { type SystemMigrateValues struct { PodmanCommand + NewRuntime string } type SystemDfValues struct { diff --git a/vendor/github.com/containers/libpod/cmd/podman/cliconfig/create.go b/vendor/github.com/containers/libpod/cmd/podman/cliconfig/create.go index 5fb2eed10f..c27dfbbeef 100644 --- a/vendor/github.com/containers/libpod/cmd/podman/cliconfig/create.go +++ b/vendor/github.com/containers/libpod/cmd/podman/cliconfig/create.go @@ -12,13 +12,20 @@ type RunValues struct { PodmanCommand } +// PodmanBuildResults represents the results for Podman Build flags +// that are unique to Podman. +type PodmanBuildResults struct { + SquashAll bool +} + type BuildValues struct { PodmanCommand *buildahcli.BudResults *buildahcli.UserNSResults *buildahcli.FromAndBudResults - *buildahcli.NameSpaceResults *buildahcli.LayerResults + *buildahcli.NameSpaceResults + *PodmanBuildResults } type CpValues struct { diff --git a/vendor/github.com/containers/libpod/cmd/podman/cliconfig/defaults.go b/vendor/github.com/containers/libpod/cmd/podman/cliconfig/defaults.go index d5dae0874c..ce695d1539 100644 --- a/vendor/github.com/containers/libpod/cmd/podman/cliconfig/defaults.go +++ b/vendor/github.com/containers/libpod/cmd/podman/cliconfig/defaults.go @@ -1,10 +1,5 @@ package cliconfig -const ( - // DefaultSystemD value - DefaultSystemD bool = true -) - var ( // DefaultHealthCheckInterval default value DefaultHealthCheckInterval = "30s" diff --git a/vendor/github.com/containers/libpod/libpod/boltdb_state.go b/vendor/github.com/containers/libpod/libpod/boltdb_state.go index 176781f079..608a279c3b 100644 --- a/vendor/github.com/containers/libpod/libpod/boltdb_state.go +++ b/vendor/github.com/containers/libpod/libpod/boltdb_state.go @@ -2,9 +2,11 @@ package libpod import ( "bytes" + "os" "strings" "sync" + "github.com/containers/libpod/libpod/config" "github.com/containers/libpod/libpod/define" bolt "github.com/etcd-io/bbolt" jsoniter "github.com/json-iterator/go" @@ -159,6 +161,16 @@ func (s *BoltState) Refresh() error { return err } + allVolsBucket, err := getAllVolsBucket(tx) + if err != nil { + return err + } + + volBucket, err := getVolBucket(tx) + if err != nil { + return err + } + // Iterate through all IDs. Check if they are containers. // If they are, unmarshal their state, and then clear // PID, mountpoint, and state for all of them @@ -235,6 +247,44 @@ func (s *BoltState) Refresh() error { return nil }) + if err != nil { + return err + } + + // Now refresh volumes + err = allVolsBucket.ForEach(func(id, name []byte) error { + dbVol := volBucket.Bucket(id) + if dbVol == nil { + return errors.Wrapf(define.ErrInternal, "inconsistency in state - volume %s is in all volumes bucket but volume not found", string(id)) + } + + // Get the state + volStateBytes := dbVol.Get(stateKey) + if volStateBytes == nil { + // If the volume doesn't have a state, nothing to do + return nil + } + + oldState := new(VolumeState) + + if err := json.Unmarshal(volStateBytes, oldState); err != nil { + return errors.Wrapf(err, "error unmarshalling state for volume %s", string(id)) + } + + // Reset mount count to 0 + oldState.MountCount = 0 + + newState, err := json.Marshal(oldState) + if err != nil { + return errors.Wrapf(err, "error marshalling state for volume %s", string(id)) + } + + if err := dbVol.Put(stateKey, newState); err != nil { + return errors.Wrapf(err, "error storing new state for volume %s", string(id)) + } + + return nil + }) return err }) return err @@ -242,12 +292,12 @@ func (s *BoltState) Refresh() error { // GetDBConfig retrieves runtime configuration fields that were created when // the database was first initialized -func (s *BoltState) GetDBConfig() (*DBConfig, error) { +func (s *BoltState) GetDBConfig() (*config.DBConfig, error) { if !s.valid { return nil, define.ErrDBClosed } - cfg := new(DBConfig) + cfg := new(config.DBConfig) db, err := s.getDBCon() if err != nil { @@ -359,6 +409,60 @@ func (s *BoltState) Container(id string) (*Container, error) { return ctr, nil } +// LookupContainerID retrieves a container ID from the state by full or unique +// partial ID or name +func (s *BoltState) LookupContainerID(idOrName string) (string, error) { + if idOrName == "" { + return "", define.ErrEmptyID + } + + if !s.valid { + return "", define.ErrDBClosed + } + + db, err := s.getDBCon() + if err != nil { + return "", err + } + defer s.deferredCloseDBCon(db) + + var id []byte + err = db.View(func(tx *bolt.Tx) error { + ctrBucket, err := getCtrBucket(tx) + if err != nil { + return err + } + + namesBucket, err := getNamesBucket(tx) + if err != nil { + return err + } + + nsBucket, err := getNSBucket(tx) + if err != nil { + return err + } + + fullID, err := s.lookupContainerID(idOrName, ctrBucket, namesBucket, nsBucket) + // Check if it is in our namespace + if s.namespaceBytes != nil { + ns := nsBucket.Get(fullID) + if !bytes.Equal(ns, s.namespaceBytes) { + return errors.Wrapf(define.ErrNoSuchCtr, "no container found with name or ID %s", idOrName) + } + } + id = fullID + return err + }) + + if err != nil { + return "", err + } + + retID := string(id) + return retID, nil +} + // LookupContainer retrieves a container from the state by full or unique // partial ID or name func (s *BoltState) LookupContainer(idOrName string) (*Container, error) { @@ -396,67 +500,9 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) { return err } - // First, check if the ID given was the actual container ID - var id []byte - ctrExists := ctrBucket.Bucket([]byte(idOrName)) - if ctrExists != nil { - // A full container ID was given. - // It might not be in our namespace, but - // getContainerFromDB() will handle that case. - id = []byte(idOrName) - return s.getContainerFromDB(id, ctr, ctrBucket) - } - - // Next, check if the full name was given - isPod := false - fullID := namesBucket.Get([]byte(idOrName)) - if fullID != nil { - // The name exists and maps to an ID. - // However, we are not yet certain the ID is a - // container. - ctrExists = ctrBucket.Bucket(fullID) - if ctrExists != nil { - // A container bucket matching the full ID was - // found. - return s.getContainerFromDB(fullID, ctr, ctrBucket) - } - // Don't error if we have a name match but it's not a - // container - there's a chance we have a container with - // an ID starting with those characters. - // However, so we can return a good error, note whether - // this is a pod. - isPod = true - } - - // We were not given a full container ID or name. - // Search for partial ID matches. - exists := false - err = ctrBucket.ForEach(func(checkID, checkName []byte) error { - // If the container isn't in our namespace, we - // can't match it - if s.namespaceBytes != nil { - ns := nsBucket.Get(checkID) - if !bytes.Equal(ns, s.namespaceBytes) { - return nil - } - } - if strings.HasPrefix(string(checkID), idOrName) { - if exists { - return errors.Wrapf(define.ErrCtrExists, "more than one result for container ID %s", idOrName) - } - id = checkID - exists = true - } - - return nil - }) + id, err := s.lookupContainerID(idOrName, ctrBucket, namesBucket, nsBucket) if err != nil { return err - } else if !exists { - if isPod { - return errors.Wrapf(define.ErrNoSuchCtr, "%s is a pod, not a container", idOrName) - } - return errors.Wrapf(define.ErrNoSuchCtr, "no container with name or ID %s found", idOrName) } return s.getContainerFromDB(id, ctr, ctrBucket) @@ -614,9 +660,13 @@ func (s *BoltState) UpdateContainer(ctr *Container) error { return err } - // Handle network namespace - if err := replaceNetNS(netNSPath, ctr, newState); err != nil { - return err + // Handle network namespace. + if os.Geteuid() == 0 { + // Do it only when root, either on the host or as root in the + // user namespace. + if err := replaceNetNS(netNSPath, ctr, newState); err != nil { + return err + } } // New state compiled successfully, swap it into the current state @@ -812,6 +862,39 @@ func (s *BoltState) AllContainers() ([]*Container, error) { return ctrs, nil } +// GetContainerConfig returns a container config from the database by full ID +func (s *BoltState) GetContainerConfig(id string) (*ContainerConfig, error) { + if len(id) == 0 { + return nil, define.ErrEmptyID + } + + if !s.valid { + return nil, define.ErrDBClosed + } + + config := new(ContainerConfig) + + db, err := s.getDBCon() + if err != nil { + return nil, err + } + defer s.deferredCloseDBCon(db) + + err = db.View(func(tx *bolt.Tx) error { + ctrBucket, err := getCtrBucket(tx) + if err != nil { + return err + } + + return s.getContainerConfigFromDB([]byte(id), config, ctrBucket) + }) + if err != nil { + return nil, err + } + + return config, nil +} + // RewriteContainerConfig rewrites a container's configuration. // WARNING: This function is DANGEROUS. Do not use without reading the full // comment on this function in state.go. @@ -870,7 +953,7 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error { newCfgJSON, err := json.Marshal(newCfg) if err != nil { - return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", pod.ID()) + return errors.Wrapf(err, "error marshalling new configuration JSON for pod %s", pod.ID()) } db, err := s.getDBCon() @@ -900,6 +983,50 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error { return err } +// RewriteVolumeConfig rewrites a volume's configuration. +// WARNING: This function is DANGEROUS. Do not use without reading the full +// comment on this function in state.go. +func (s *BoltState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) error { + if !s.valid { + return define.ErrDBClosed + } + + if !volume.valid { + return define.ErrVolumeRemoved + } + + newCfgJSON, err := json.Marshal(newCfg) + if err != nil { + return errors.Wrapf(err, "error marshalling new configuration JSON for volume %q", volume.Name()) + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + err = db.Update(func(tx *bolt.Tx) error { + volBkt, err := getVolBucket(tx) + if err != nil { + return err + } + + volDB := volBkt.Bucket([]byte(volume.Name())) + if volDB == nil { + volume.valid = false + return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %q found in DB", volume.Name()) + } + + if err := volDB.Put(configKey, newCfgJSON); err != nil { + return errors.Wrapf(err, "error updating volume %q config JSON", volume.Name()) + } + + return nil + }) + return err +} + // Pod retrieves a pod given its full ID func (s *BoltState) Pod(id string) (*Pod, error) { if id == "" { @@ -1308,6 +1435,15 @@ func (s *BoltState) AddVolume(volume *Volume) error { return errors.Wrapf(err, "error marshalling volume %s config to JSON", volume.Name()) } + // Volume state is allowed to not exist + var volStateJSON []byte + if volume.state != nil { + volStateJSON, err = json.Marshal(volume.state) + if err != nil { + return errors.Wrapf(err, "error marshalling volume %s state to JSON", volume.Name()) + } + } + db, err := s.getDBCon() if err != nil { return err @@ -1348,6 +1484,12 @@ func (s *BoltState) AddVolume(volume *Volume) error { return errors.Wrapf(err, "error storing volume %s configuration in DB", volume.Name()) } + if volStateJSON != nil { + if err := newVol.Put(stateKey, volStateJSON); err != nil { + return errors.Wrapf(err, "error storing volume %s state in DB", volume.Name()) + } + } + if err := allVolsBkt.Put(volName, volName); err != nil { return errors.Wrapf(err, "error storing volume %s in all volumes bucket in DB", volume.Name()) } @@ -1439,6 +1581,103 @@ func (s *BoltState) RemoveVolume(volume *Volume) error { return err } +// UpdateVolume updates the volume's state from the database. +func (s *BoltState) UpdateVolume(volume *Volume) error { + if !s.valid { + return define.ErrDBClosed + } + + if !volume.valid { + return define.ErrVolumeRemoved + } + + newState := new(VolumeState) + volumeName := []byte(volume.Name()) + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + err = db.View(func(tx *bolt.Tx) error { + volBucket, err := getVolBucket(tx) + if err != nil { + return err + } + + volToUpdate := volBucket.Bucket(volumeName) + if volToUpdate == nil { + volume.valid = false + return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database", volume.Name()) + } + + stateBytes := volToUpdate.Get(stateKey) + if stateBytes == nil { + // Having no state is valid. + // Return nil, use the empty state. + return nil + } + + if err := json.Unmarshal(stateBytes, newState); err != nil { + return errors.Wrapf(err, "error unmarshalling volume %s state", volume.Name()) + } + + return nil + }) + if err != nil { + return err + } + + volume.state = newState + + return nil +} + +// SaveVolume saves the volume's state to the database. +func (s *BoltState) SaveVolume(volume *Volume) error { + if !s.valid { + return define.ErrDBClosed + } + + if !volume.valid { + return define.ErrVolumeRemoved + } + + volumeName := []byte(volume.Name()) + + var newStateJSON []byte + if volume.state != nil { + stateJSON, err := json.Marshal(volume.state) + if err != nil { + return errors.Wrapf(err, "error marshalling volume %s state to JSON", volume.Name()) + } + newStateJSON = stateJSON + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + err = db.Update(func(tx *bolt.Tx) error { + volBucket, err := getVolBucket(tx) + if err != nil { + return err + } + + volToUpdate := volBucket.Bucket(volumeName) + if volToUpdate == nil { + volume.valid = false + return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database", volume.Name()) + } + + return volToUpdate.Put(stateKey, newStateJSON) + }) + return err +} + // AllVolumes returns all volumes present in the state func (s *BoltState) AllVolumes() ([]*Volume, error) { if !s.valid { @@ -1473,6 +1712,7 @@ func (s *BoltState) AllVolumes() ([]*Volume, error) { volume := new(Volume) volume.config = new(VolumeConfig) + volume.state = new(VolumeState) if err := s.getVolumeFromDB(id, volume, volBucket); err != nil { if errors.Cause(err) != define.ErrNSMismatch { @@ -1507,6 +1747,7 @@ func (s *BoltState) Volume(name string) (*Volume, error) { volume := new(Volume) volume.config = new(VolumeConfig) + volume.state = new(VolumeState) db, err := s.getDBCon() if err != nil { @@ -1529,6 +1770,75 @@ func (s *BoltState) Volume(name string) (*Volume, error) { return volume, nil } +// LookupVolume locates a volume from a partial name. +func (s *BoltState) LookupVolume(name string) (*Volume, error) { + if name == "" { + return nil, define.ErrEmptyID + } + + if !s.valid { + return nil, define.ErrDBClosed + } + + volName := []byte(name) + + volume := new(Volume) + volume.config = new(VolumeConfig) + volume.state = new(VolumeState) + + db, err := s.getDBCon() + if err != nil { + return nil, err + } + defer s.deferredCloseDBCon(db) + + err = db.View(func(tx *bolt.Tx) error { + volBkt, err := getVolBucket(tx) + if err != nil { + return err + } + + allVolsBkt, err := getAllVolsBucket(tx) + if err != nil { + return err + } + + // Check for exact match on name + volDB := volBkt.Bucket(volName) + if volDB != nil { + return s.getVolumeFromDB(volName, volume, volBkt) + } + + // No exact match. Search all names. + foundMatch := false + err = allVolsBkt.ForEach(func(checkName, checkName2 []byte) error { + if strings.HasPrefix(string(checkName), name) { + if foundMatch { + return errors.Wrapf(define.ErrVolumeExists, "more than one result for volume name %q", name) + } + foundMatch = true + volName = checkName + } + return nil + }) + if err != nil { + return err + } + + if !foundMatch { + return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %q found", name) + } + + return s.getVolumeFromDB(volName, volume, volBkt) + }) + if err != nil { + return nil, err + } + + return volume, nil + +} + // HasVolume returns true if the given volume exists in the state, otherwise it returns false func (s *BoltState) HasVolume(name string) (bool, error) { if name == "" { diff --git a/vendor/github.com/containers/libpod/libpod/boltdb_state_internal.go b/vendor/github.com/containers/libpod/libpod/boltdb_state_internal.go index 408ef7224b..3347a36481 100644 --- a/vendor/github.com/containers/libpod/libpod/boltdb_state_internal.go +++ b/vendor/github.com/containers/libpod/libpod/boltdb_state_internal.go @@ -347,7 +347,7 @@ func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) { return bkt, nil } -func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.Bucket) error { +func (s *BoltState) getContainerConfigFromDB(id []byte, config *ContainerConfig, ctrsBkt *bolt.Bucket) error { ctrBkt := ctrsBkt.Bucket(id) if ctrBkt == nil { return errors.Wrapf(define.ErrNoSuchCtr, "container %s not found in DB", string(id)) @@ -365,10 +365,18 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt. return errors.Wrapf(define.ErrInternal, "container %s missing config key in DB", string(id)) } - if err := json.Unmarshal(configBytes, ctr.config); err != nil { + if err := json.Unmarshal(configBytes, config); err != nil { return errors.Wrapf(err, "error unmarshalling container %s config", string(id)) } + return nil +} + +func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.Bucket) error { + if err := s.getContainerConfigFromDB(id, ctr.config, ctrsBkt); err != nil { + return err + } + // Get the lock lock, err := s.runtime.lockManager.RetrieveLock(ctr.config.LockID) if err != nil { @@ -388,7 +396,11 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt. ociRuntime, ok := s.runtime.ociRuntimes[runtimeName] if !ok { - return errors.Wrapf(define.ErrInternal, "container %s was created with OCI runtime %s, but that runtime is not available in the current configuration", ctr.ID(), ctr.config.OCIRuntime) + // Use a MissingRuntime implementation + ociRuntime, err = getMissingRuntime(runtimeName, s.runtime) + if err != nil { + return err + } } ctr.ociRuntime = ociRuntime } @@ -449,6 +461,21 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name)) } + // Volume state is allowed to be nil for legacy compatibility + volStateBytes := volDB.Get(stateKey) + if volStateBytes != nil { + if err := json.Unmarshal(volStateBytes, volume.state); err != nil { + return errors.Wrapf(err, "error unmarshalling volume %s state from DB", string(name)) + } + } + + // Get the lock + lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID) + if err != nil { + return errors.Wrapf(err, "error retrieving lock for volume %q", string(name)) + } + volume.lock = lock + volume.runtime = s.runtime volume.valid = true @@ -847,3 +874,72 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error return nil } + +// lookupContainerID retrieves a container ID from the state by full or unique +// partial ID or name. +// NOTE: the retrieved container ID namespace may not match the state namespace. +func (s *BoltState) lookupContainerID(idOrName string, ctrBucket, namesBucket, nsBucket *bolt.Bucket) ([]byte, error) { + // First, check if the ID given was the actual container ID + ctrExists := ctrBucket.Bucket([]byte(idOrName)) + if ctrExists != nil { + // A full container ID was given. + // It might not be in our namespace, but this will be handled + // the callers. + return []byte(idOrName), nil + } + + // Next, check if the full name was given + isPod := false + fullID := namesBucket.Get([]byte(idOrName)) + if fullID != nil { + // The name exists and maps to an ID. + // However, we are not yet certain the ID is a + // container. + ctrExists = ctrBucket.Bucket(fullID) + if ctrExists != nil { + // A container bucket matching the full ID was + // found. + return fullID, nil + } + // Don't error if we have a name match but it's not a + // container - there's a chance we have a container with + // an ID starting with those characters. + // However, so we can return a good error, note whether + // this is a pod. + isPod = true + } + + var id []byte + // We were not given a full container ID or name. + // Search for partial ID matches. + exists := false + err := ctrBucket.ForEach(func(checkID, checkName []byte) error { + // If the container isn't in our namespace, we + // can't match it + if s.namespaceBytes != nil { + ns := nsBucket.Get(checkID) + if !bytes.Equal(ns, s.namespaceBytes) { + return nil + } + } + if strings.HasPrefix(string(checkID), idOrName) { + if exists { + return errors.Wrapf(define.ErrCtrExists, "more than one result for container ID %s", idOrName) + } + id = checkID + exists = true + } + + return nil + }) + + if err != nil { + return nil, err + } else if !exists { + if isPod { + return nil, errors.Wrapf(define.ErrNoSuchCtr, "%s is a pod, not a container", idOrName) + } + return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container with name or ID %s found", idOrName) + } + return id, nil +} diff --git a/vendor/github.com/containers/libpod/libpod/config/config.go b/vendor/github.com/containers/libpod/libpod/config/config.go new file mode 100644 index 0000000000..5b4b57f3a4 --- /dev/null +++ b/vendor/github.com/containers/libpod/libpod/config/config.go @@ -0,0 +1,549 @@ +package config + +import ( + "bytes" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/BurntSushi/toml" + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/pkg/rootless" + "github.com/containers/libpod/pkg/util" + "github.com/containers/storage" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // _defaultTransport is a prefix that we apply to an image name to check + // docker hub first for the image. + _defaultTransport = "docker://" + + // _rootlessConfigPath is the path to the rootless libpod.conf in $HOME. + _rootlessConfigPath = ".config/containers/libpod.conf" + + // _conmonMinMajorVersion is the major version required for conmon. + _conmonMinMajorVersion = 2 + + // _conmonMinMinorVersion is the minor version required for conmon. + _conmonMinMinorVersion = 0 + + // _conmonMinPatchVersion is the sub-minor version required for conmon. + _conmonMinPatchVersion = 1 + + // _conmonVersionFormatErr is used when the expected versio-format of conmon + // has changed. + _conmonVersionFormatErr = "conmon version changed format" + + // InstallPrefix is the prefix where podman will be installed. + // It can be overridden at build time. + _installPrefix = "/usr" + + // EtcDir is the sysconfdir where podman should look for system config files. + // It can be overridden at build time. + _etcDir = "/etc" + + // SeccompDefaultPath defines the default seccomp path. + SeccompDefaultPath = _installPrefix + "/share/containers/seccomp.json" + + // SeccompOverridePath if this exists it overrides the default seccomp path. + SeccompOverridePath = _etcDir + "/crio/seccomp.json" + + // _rootConfigPath is the path to the libpod configuration file + // This file is loaded to replace the builtin default config before + // runtime options (e.g. WithStorageConfig) are applied. + // If it is not present, the builtin default config is used instead + // This path can be overridden when the runtime is created by using + // NewRuntimeFromConfig() instead of NewRuntime(). + _rootConfigPath = _installPrefix + "/share/containers/libpod.conf" + + // _rootOverrideConfigPath is the path to an override for the default libpod + // configuration file. If OverrideConfigPath exists, it will be used in + // place of the configuration file pointed to by ConfigPath. + _rootOverrideConfigPath = _etcDir + "/containers/libpod.conf" +) + +// SetOptions contains a subset of options in a Config. It's used to indicate if +// a given option has either been set by the user or by a parsed libpod +// configuration file. If not, the corresponding option might be overwritten by +// values from the database. This behavior guarantess backwards compat with +// older version of libpod and Podman. +type SetOptions struct { + // StorageConfigRunRootSet indicates if the RunRoot has been explicitly set + // by the config or by the user. It's required to guarantee backwards + // compatibility with older versions of libpod for which we must query the + // database configuration. Not included in the on-disk config. + StorageConfigRunRootSet bool `toml:"-"` + + // StorageConfigGraphRootSet indicates if the RunRoot has been explicitly + // set by the config or by the user. It's required to guarantee backwards + // compatibility with older versions of libpod for which we must query the + // database configuration. Not included in the on-disk config. + StorageConfigGraphRootSet bool `toml:"-"` + + // StorageConfigGraphDriverNameSet indicates if the GraphDriverName has been + // explicitly set by the config or by the user. It's required to guarantee + // backwards compatibility with older versions of libpod for which we must + // query the database configuration. Not included in the on-disk config. + StorageConfigGraphDriverNameSet bool `toml:"-"` + + // VolumePathSet indicates if the VolumePath has been explicitly set by the + // config or by the user. It's required to guarantee backwards compatibility + // with older versions of libpod for which we must query the database + // configuration. Not included in the on-disk config. + VolumePathSet bool `toml:"-"` + + // StaticDirSet indicates if the StaticDir has been explicitly set by the + // config or by the user. It's required to guarantee backwards compatibility + // with older versions of libpod for which we must query the database + // configuration. Not included in the on-disk config. + StaticDirSet bool `toml:"-"` + + // TmpDirSet indicates if the TmpDir has been explicitly set by the config + // or by the user. It's required to guarantee backwards compatibility with + // older versions of libpod for which we must query the database + // configuration. Not included in the on-disk config. + TmpDirSet bool `toml:"-"` +} + +// Config contains configuration options used to set up a libpod runtime +type Config struct { + // NOTE: when changing this struct, make sure to update (*Config).Merge(). + + // SetOptions contains a subset of config options. It's used to indicate if + // a given option has either been set by the user or by a parsed libpod + // configuration file. If not, the corresponding option might be + // overwritten by values from the database. This behavior guarantess + // backwards compat with older version of libpod and Podman. + SetOptions + + // StateType is the type of the backing state store. Avoid using multiple + // values for this with the same containers/storage configuration on the + // same system. Different state types do not interact, and each will see a + // separate set of containers, which may cause conflicts in + // containers/storage. As such this is not exposed via the config file. + StateType define.RuntimeStateStore `toml:"-"` + + // StorageConfig is the configuration used by containers/storage Not + // included in the on-disk config, use the dedicated containers/storage + // configuration file instead. + StorageConfig storage.StoreOptions `toml:"-"` + + // VolumePath is the default location that named volumes will be created + // under. This convention is followed by the default volume driver, but + // may not be by other drivers. + VolumePath string `toml:"volume_path"` + + // ImageDefaultTransport is the default transport method used to fetch + // images. + ImageDefaultTransport string `toml:"image_default_transport"` + + // SignaturePolicyPath is the path to a signature policy to use for + // validating images. If left empty, the containers/image default signature + // policy will be used. + SignaturePolicyPath string `toml:"signature_policy_path,omitempty"` + + // OCIRuntime is the OCI runtime to use. + OCIRuntime string `toml:"runtime"` + + // OCIRuntimes are the set of configured OCI runtimes (default is runc). + OCIRuntimes map[string][]string `toml:"runtimes"` + + // RuntimeSupportsJSON is the list of the OCI runtimes that support + // --format=json. + RuntimeSupportsJSON []string `toml:"runtime_supports_json"` + + // RuntimeSupportsNoCgroups is a list of OCI runtimes that support + // running containers without CGroups. + RuntimeSupportsNoCgroups []string `toml:"runtime_supports_nocgroups"` + + // RuntimePath is the path to OCI runtime binary for launching containers. + // The first path pointing to a valid file will be used This is used only + // when there are no OCIRuntime/OCIRuntimes defined. It is used only to be + // backward compatible with older versions of Podman. + RuntimePath []string `toml:"runtime_path"` + + // ConmonPath is the path to the Conmon binary used for managing containers. + // The first path pointing to a valid file will be used. + ConmonPath []string `toml:"conmon_path"` + + // ConmonEnvVars are environment variables to pass to the Conmon binary + // when it is launched. + ConmonEnvVars []string `toml:"conmon_env_vars"` + + // CGroupManager is the CGroup Manager to use Valid values are "cgroupfs" + // and "systemd". + CgroupManager string `toml:"cgroup_manager"` + + // InitPath is the path to the container-init binary. + InitPath string `toml:"init_path"` + + // StaticDir is the path to a persistent directory to store container + // files. + StaticDir string `toml:"static_dir"` + + // TmpDir is the path to a temporary directory to store per-boot container + // files. Must be stored in a tmpfs. + TmpDir string `toml:"tmp_dir"` + + // MaxLogSize is the maximum size of container logfiles. + MaxLogSize int64 `toml:"max_log_size,omitempty"` + + // NoPivotRoot sets whether to set no-pivot-root in the OCI runtime. + NoPivotRoot bool `toml:"no_pivot_root"` + + // CNIConfigDir sets the directory where CNI configuration files are + // stored. + CNIConfigDir string `toml:"cni_config_dir"` + + // CNIPluginDir sets a number of directories where the CNI network + // plugins can be located. + CNIPluginDir []string `toml:"cni_plugin_dir"` + + // CNIDefaultNetwork is the network name of the default CNI network + // to attach pods to. + CNIDefaultNetwork string `toml:"cni_default_network,omitempty"` + + // HooksDir holds paths to the directories containing hooks + // configuration files. When the same filename is present in in + // multiple directories, the file in the directory listed last in + // this slice takes precedence. + HooksDir []string `toml:"hooks_dir"` + + // DefaultMountsFile is the path to the default mounts file for testing + // purposes only. + DefaultMountsFile string `toml:"-"` + + // Namespace is the libpod namespace to use. Namespaces are used to create + // scopes to separate containers and pods in the state. When namespace is + // set, libpod will only view containers and pods in the same namespace. All + // containers and pods created will default to the namespace set here. A + // namespace of "", the empty string, is equivalent to no namespace, and all + // containers and pods will be visible. The default namespace is "". + Namespace string `toml:"namespace,omitempty"` + + // InfraImage is the image a pod infra container will use to manage + // namespaces. + InfraImage string `toml:"infra_image"` + + // InfraCommand is the command run to start up a pod infra container. + InfraCommand string `toml:"infra_command"` + + // EnablePortReservation determines whether libpod will reserve ports on the + // host when they are forwarded to containers. When enabled, when ports are + // forwarded to containers, they are held open by conmon as long as the + // container is running, ensuring that they cannot be reused by other + // programs on the host. However, this can cause significant memory usage if + // a container has many ports forwarded to it. Disabling this can save + // memory. + EnablePortReservation bool `toml:"enable_port_reservation"` + + // EnableLabeling indicates whether libpod will support container labeling. + EnableLabeling bool `toml:"label"` + + // NetworkCmdPath is the path to the slirp4netns binary. + NetworkCmdPath string `toml:"network_cmd_path"` + + // NumLocks is the number of locks to make available for containers and + // pods. + NumLocks uint32 `toml:"num_locks,omitempty"` + + // LockType is the type of locking to use. + LockType string `toml:"lock_type,omitempty"` + + // EventsLogger determines where events should be logged. + EventsLogger string `toml:"events_logger"` + + // EventsLogFilePath is where the events log is stored. + EventsLogFilePath string `toml:"events_logfile_path"` + + //DetachKeys is the sequence of keys used to detach a container. + DetachKeys string `toml:"detach_keys"` + + // SDNotify tells Libpod to allow containers to notify the host systemd of + // readiness using the SD_NOTIFY mechanism. + SDNotify bool +} + +// DBConfig is a set of Libpod runtime configuration settings that are saved in +// a State when it is first created, and can subsequently be retrieved. +type DBConfig struct { + LibpodRoot string + LibpodTmp string + StorageRoot string + StorageTmp string + GraphDriver string + VolumePath string +} + +// readConfigFromFile reads the specified config file at `path` and attempts to +// unmarshal its content into a Config. +func readConfigFromFile(path string) (*Config, error) { + var config Config + + configBytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + logrus.Debugf("Reading configuration file %q", path) + err = toml.Unmarshal(configBytes, &config) + + // For the sake of backwards compat we need to check if the config fields + // with *Set suffix are set in the config. Note that the storage-related + // fields are NOT set in the config here but in the storage.conf OR directly + // by the user. + if config.VolumePath != "" { + config.VolumePathSet = true + } + if config.StaticDir != "" { + config.StaticDirSet = true + } + if config.TmpDir != "" { + config.TmpDirSet = true + } + + return &config, err +} + +// Write decodes the config as TOML and writes it to the specified path. +func (c *Config) Write(path string) error { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return errors.Wrapf(err, "error opening config file %q", path) + } + + buffer := new(bytes.Buffer) + if err := toml.NewEncoder(buffer).Encode(c); err != nil { + return errors.Wrapf(err, "error encoding config") + } + + if _, err := f.WriteString(buffer.String()); err != nil { + return errors.Wrapf(err, "error writing config %q", path) + } + return err +} + +// FindConmon iterates over (*Config).ConmonPath and returns the path to first +// (version) matching conmon binary. If non is found, we try to do a path lookup +// of "conmon". +func (c *Config) FindConmon() (string, error) { + foundOutdatedConmon := false + for _, path := range c.ConmonPath { + stat, err := os.Stat(path) + if err != nil { + continue + } + if stat.IsDir() { + continue + } + if err := probeConmon(path); err != nil { + logrus.Warnf("Conmon at %s invalid: %v", path, err) + foundOutdatedConmon = true + continue + } + logrus.Debugf("Using conmon: %q", path) + return path, nil + } + + // Search the $PATH as last fallback + if path, err := exec.LookPath("conmon"); err == nil { + if err := probeConmon(path); err != nil { + logrus.Warnf("Conmon at %s is invalid: %v", path, err) + foundOutdatedConmon = true + } else { + logrus.Debugf("Using conmon from $PATH: %q", path) + return path, nil + } + } + + if foundOutdatedConmon { + return "", errors.Wrapf(define.ErrConmonOutdated, + "please update to v%d.%d.%d or later", + _conmonMinMajorVersion, _conmonMinMinorVersion, _conmonMinPatchVersion) + } + + return "", errors.Wrapf(define.ErrInvalidArg, + "could not find a working conmon binary (configured options: %v)", + c.ConmonPath) +} + +// probeConmon calls conmon --version and verifies it is a new enough version for +// the runtime expectations podman currently has. +func probeConmon(conmonBinary string) error { + cmd := exec.Command(conmonBinary, "--version") + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return err + } + r := regexp.MustCompile(`^conmon version (?P\d+).(?P\d+).(?P\d+)`) + + matches := r.FindStringSubmatch(out.String()) + if len(matches) != 4 { + return errors.Wrap(err, _conmonVersionFormatErr) + } + major, err := strconv.Atoi(matches[1]) + if err != nil { + return errors.Wrap(err, _conmonVersionFormatErr) + } + if major < _conmonMinMajorVersion { + return define.ErrConmonOutdated + } + if major > _conmonMinMajorVersion { + return nil + } + + minor, err := strconv.Atoi(matches[2]) + if err != nil { + return errors.Wrap(err, _conmonVersionFormatErr) + } + if minor < _conmonMinMinorVersion { + return define.ErrConmonOutdated + } + if minor > _conmonMinMinorVersion { + return nil + } + + patch, err := strconv.Atoi(matches[3]) + if err != nil { + return errors.Wrap(err, _conmonVersionFormatErr) + } + if patch < _conmonMinPatchVersion { + return define.ErrConmonOutdated + } + if patch > _conmonMinPatchVersion { + return nil + } + + return nil +} + +// NewConfig creates a new Config. It starts with an empty config and, if +// specified, merges the config at `userConfigPath` path. Depending if we're +// running as root or rootless, we then merge the system configuration followed +// by merging the default config (hard-coded default in memory). +// +// Note that the OCI runtime is hard-set to `crun` if we're running on a system +// with cgroupsv2. Other OCI runtimes are not yet supporting cgroupsv2. This +// might change in the future. +func NewConfig(userConfigPath string) (*Config, error) { + config := &Config{} // start with an empty config + + // First, try to read the user-specified config + if userConfigPath != "" { + var err error + config, err = readConfigFromFile(userConfigPath) + if err != nil { + return nil, errors.Wrapf(err, "error reading user config %q", userConfigPath) + } + } + + // Now, check if the user can access system configs and merge them if needed. + if configs, err := systemConfigs(); err != nil { + return nil, errors.Wrapf(err, "error finding config on system") + } else { + for _, path := range configs { + systemConfig, err := readConfigFromFile(path) + if err != nil { + return nil, errors.Wrapf(err, "error reading system config %q", path) + } + // Merge the it into the config. Any unset field in config will be + // over-written by the systemConfig. + if err := config.mergeConfig(systemConfig); err != nil { + return nil, errors.Wrapf(err, "error merging system config") + } + logrus.Debugf("Merged system config %q: %v", path, config) + } + } + + // Finally, create a default config from memory and forcefully merge it into + // the config. This way we try to make sure that all fields are properly set + // and that user AND system config can partially set. + if defaultConfig, err := defaultConfigFromMemory(); err != nil { + return nil, errors.Wrapf(err, "error generating default config from memory") + } else { + if err := config.mergeConfig(defaultConfig); err != nil { + return nil, errors.Wrapf(err, "error merging default config from memory") + } + } + + // Relative paths can cause nasty bugs, because core paths we use could + // shift between runs (or even parts of the program - the OCI runtime + // uses a different working directory than we do, for example. + if !filepath.IsAbs(config.StaticDir) { + return nil, errors.Wrapf(define.ErrInvalidArg, "static directory must be an absolute path - instead got %q", config.StaticDir) + } + if !filepath.IsAbs(config.TmpDir) { + return nil, errors.Wrapf(define.ErrInvalidArg, "temporary directory must be an absolute path - instead got %q", config.TmpDir) + } + if !filepath.IsAbs(config.VolumePath) { + return nil, errors.Wrapf(define.ErrInvalidArg, "volume path must be an absolute path - instead got %q", config.VolumePath) + } + + // Check if we need to switch to cgroupfs on rootless. + config.checkCgroupsAndAdjustConfig() + + return config, nil +} + +func rootlessConfigPath() (string, error) { + home, err := util.HomeDir() + if err != nil { + return "", err + } + + return filepath.Join(home, _rootlessConfigPath), nil +} + +func systemConfigs() ([]string, error) { + if rootless.IsRootless() { + path, err := rootlessConfigPath() + if err != nil { + return nil, err + } + if _, err := os.Stat(path); err == nil { + return []string{path}, nil + } + return nil, err + } + + configs := []string{} + if _, err := os.Stat(_rootOverrideConfigPath); err == nil { + configs = append(configs, _rootOverrideConfigPath) + } + if _, err := os.Stat(_rootConfigPath); err == nil { + configs = append(configs, _rootConfigPath) + } + return configs, nil +} + +// checkCgroupsAndAdjustConfig checks if we're running rootless with the systemd +// cgroup manager. In case the user session isn't available, we're switching the +// cgroup manager to cgroupfs. Note, this only applies to rootless. +func (c *Config) checkCgroupsAndAdjustConfig() { + if !rootless.IsRootless() || c.CgroupManager != define.SystemdCgroupsManager { + return + } + + session := os.Getenv("DBUS_SESSION_BUS_ADDRESS") + hasSession := session != "" + if hasSession && strings.HasPrefix(session, "unix:path=") { + _, err := os.Stat(strings.TrimPrefix(session, "unix:path=")) + hasSession = err == nil + } + + if !hasSession { + logrus.Warningf("The cgroups manager is set to systemd but there is no systemd user session available") + logrus.Warningf("For using systemd, you may need to login using an user session") + logrus.Warningf("Alternatively, you can enable lingering with: `loginctl enable-linger %d` (possibly as root)", rootless.GetRootlessUID()) + logrus.Warningf("Falling back to --cgroup-manager=cgroupfs") + c.CgroupManager = define.CgroupfsCgroupsManager + } +} diff --git a/vendor/github.com/containers/libpod/libpod/config/default.go b/vendor/github.com/containers/libpod/libpod/config/default.go new file mode 100644 index 0000000000..17574c0592 --- /dev/null +++ b/vendor/github.com/containers/libpod/libpod/config/default.go @@ -0,0 +1,137 @@ +package config + +import ( + "os" + "path/filepath" + + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/libpod/events" + "github.com/containers/libpod/pkg/rootless" + "github.com/containers/libpod/pkg/util" + "github.com/containers/storage" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // _defaultGraphRoot points to the default path of the graph root. + _defaultGraphRoot = "/var/lib/containers/storage" + // _defaultRootlessSignaturePolicyPath points to the default path of the + // rootless policy.json file. + _defaultRootlessSignaturePolicyPath = ".config/containers/policy.json" +) + +// defaultConfigFromMemory returns a default libpod configuration. Note that the +// config is different for root and rootless. It also parses the storage.conf. +func defaultConfigFromMemory() (*Config, error) { + c := new(Config) + if tmp, err := defaultTmpDir(); err != nil { + return nil, err + } else { + c.TmpDir = tmp + } + c.EventsLogFilePath = filepath.Join(c.TmpDir, "events", "events.log") + + storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) + if err != nil { + return nil, err + } + if storeOpts.GraphRoot == "" { + logrus.Warnf("Storage configuration is unset - using hardcoded default graph root %q", _defaultGraphRoot) + storeOpts.GraphRoot = _defaultGraphRoot + } + c.StaticDir = filepath.Join(storeOpts.GraphRoot, "libpod") + c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes") + c.StorageConfig = storeOpts + + c.ImageDefaultTransport = _defaultTransport + c.StateType = define.BoltDBStateStore + c.OCIRuntime = "runc" + c.OCIRuntimes = map[string][]string{ + "runc": { + "/usr/bin/runc", + "/usr/sbin/runc", + "/usr/local/bin/runc", + "/usr/local/sbin/runc", + "/sbin/runc", + "/bin/runc", + "/usr/lib/cri-o-runc/sbin/runc", + "/run/current-system/sw/bin/runc", + }, + // TODO - should we add "crun" defaults here as well? + } + c.ConmonPath = []string{ + "/usr/libexec/podman/conmon", + "/usr/local/libexec/podman/conmon", + "/usr/local/lib/podman/conmon", + "/usr/bin/conmon", + "/usr/sbin/conmon", + "/usr/local/bin/conmon", + "/usr/local/sbin/conmon", + "/run/current-system/sw/bin/conmon", + } + c.ConmonEnvVars = []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + } + c.RuntimeSupportsJSON = []string{ + "crun", + "runc", + } + c.RuntimeSupportsNoCgroups = []string{"crun"} + c.InitPath = define.DefaultInitPath + c.CgroupManager = define.SystemdCgroupsManager + c.MaxLogSize = -1 + c.NoPivotRoot = false + c.CNIConfigDir = _etcDir + "/cni/net.d/" + c.CNIPluginDir = []string{ + "/usr/libexec/cni", + "/usr/lib/cni", + "/usr/local/lib/cni", + "/opt/cni/bin", + } + c.CNIDefaultNetwork = "podman" + c.InfraCommand = define.DefaultInfraCommand + c.InfraImage = define.DefaultInfraImage + c.EnablePortReservation = true + c.EnableLabeling = true + c.NumLocks = 2048 + c.EventsLogger = events.DefaultEventerType.String() + c.DetachKeys = define.DefaultDetachKeys + // TODO - ideally we should expose a `type LockType string` along with + // constants. + c.LockType = "shm" + + if rootless.IsRootless() { + home, err := util.HomeDir() + if err != nil { + return nil, err + } + sigPath := filepath.Join(home, _defaultRootlessSignaturePolicyPath) + if _, err := os.Stat(sigPath); err == nil { + c.SignaturePolicyPath = sigPath + } + } + return c, nil +} + +func defaultTmpDir() (string, error) { + if !rootless.IsRootless() { + return "/var/run/libpod", nil + } + + runtimeDir, err := util.GetRuntimeDir() + if err != nil { + return "", err + } + libpodRuntimeDir := filepath.Join(runtimeDir, "libpod") + + if err := os.Mkdir(libpodRuntimeDir, 0700|os.ModeSticky); err != nil { + if !os.IsExist(err) { + return "", errors.Wrapf(err, "cannot mkdir %s", libpodRuntimeDir) + } else if err := os.Chmod(libpodRuntimeDir, 0700|os.ModeSticky); err != nil { + // The directory already exist, just set the sticky bit + return "", errors.Wrapf(err, "could not set sticky bit on %s", libpodRuntimeDir) + } + } + return filepath.Join(libpodRuntimeDir, "tmp"), nil +} diff --git a/vendor/github.com/containers/libpod/libpod/config/merge.go b/vendor/github.com/containers/libpod/libpod/config/merge.go new file mode 100644 index 0000000000..798a63da7d --- /dev/null +++ b/vendor/github.com/containers/libpod/libpod/config/merge.go @@ -0,0 +1,183 @@ +package config + +import ( + "path/filepath" + + "github.com/containers/libpod/libpod/define" + "github.com/sirupsen/logrus" +) + +// Merge merges the other config into the current one. Note that a field of the +// other config is only merged when it's not already set in the current one. +// +// Note that the StateType and the StorageConfig will NOT be changed. +func (c *Config) mergeConfig(other *Config) error { + // strings + c.CgroupManager = mergeStrings(c.CgroupManager, other.CgroupManager) + c.CNIConfigDir = mergeStrings(c.CNIConfigDir, other.CNIConfigDir) + c.CNIDefaultNetwork = mergeStrings(c.CNIDefaultNetwork, other.CNIDefaultNetwork) + c.DefaultMountsFile = mergeStrings(c.DefaultMountsFile, other.DefaultMountsFile) + c.DetachKeys = mergeStrings(c.DetachKeys, other.DetachKeys) + c.EventsLogFilePath = mergeStrings(c.EventsLogFilePath, other.EventsLogFilePath) + c.EventsLogger = mergeStrings(c.EventsLogger, other.EventsLogger) + c.ImageDefaultTransport = mergeStrings(c.ImageDefaultTransport, other.ImageDefaultTransport) + c.InfraCommand = mergeStrings(c.InfraCommand, other.InfraCommand) + c.InfraImage = mergeStrings(c.InfraImage, other.InfraImage) + c.InitPath = mergeStrings(c.InitPath, other.InitPath) + c.LockType = mergeStrings(c.LockType, other.LockType) + c.Namespace = mergeStrings(c.Namespace, other.Namespace) + c.NetworkCmdPath = mergeStrings(c.NetworkCmdPath, other.NetworkCmdPath) + c.OCIRuntime = mergeStrings(c.OCIRuntime, other.OCIRuntime) + c.SignaturePolicyPath = mergeStrings(c.SignaturePolicyPath, other.SignaturePolicyPath) + c.StaticDir = mergeStrings(c.StaticDir, other.StaticDir) + c.TmpDir = mergeStrings(c.TmpDir, other.TmpDir) + c.VolumePath = mergeStrings(c.VolumePath, other.VolumePath) + + // string map of slices + c.OCIRuntimes = mergeStringMaps(c.OCIRuntimes, other.OCIRuntimes) + + // string slices + c.CNIPluginDir = mergeStringSlices(c.CNIPluginDir, other.CNIPluginDir) + c.ConmonEnvVars = mergeStringSlices(c.ConmonEnvVars, other.ConmonEnvVars) + c.ConmonPath = mergeStringSlices(c.ConmonPath, other.ConmonPath) + c.HooksDir = mergeStringSlices(c.HooksDir, other.HooksDir) + c.RuntimePath = mergeStringSlices(c.RuntimePath, other.RuntimePath) + c.RuntimeSupportsJSON = mergeStringSlices(c.RuntimeSupportsJSON, other.RuntimeSupportsJSON) + c.RuntimeSupportsNoCgroups = mergeStringSlices(c.RuntimeSupportsNoCgroups, other.RuntimeSupportsNoCgroups) + + // int64s + c.MaxLogSize = mergeInt64s(c.MaxLogSize, other.MaxLogSize) + + // uint32s + c.NumLocks = mergeUint32s(c.NumLocks, other.NumLocks) + + // bools + c.EnableLabeling = mergeBools(c.EnableLabeling, other.EnableLabeling) + c.EnablePortReservation = mergeBools(c.EnablePortReservation, other.EnablePortReservation) + c.NoPivotRoot = mergeBools(c.NoPivotRoot, other.NoPivotRoot) + c.SDNotify = mergeBools(c.SDNotify, other.SDNotify) + + // state type + if c.StateType == define.InvalidStateStore { + c.StateType = other.StateType + } + + // store options - need to check all fields since some configs might only + // set it partially + c.StorageConfig.RunRoot = mergeStrings(c.StorageConfig.RunRoot, other.StorageConfig.RunRoot) + c.StorageConfig.GraphRoot = mergeStrings(c.StorageConfig.GraphRoot, other.StorageConfig.GraphRoot) + c.StorageConfig.GraphDriverName = mergeStrings(c.StorageConfig.GraphDriverName, other.StorageConfig.GraphDriverName) + c.StorageConfig.GraphDriverOptions = mergeStringSlices(c.StorageConfig.GraphDriverOptions, other.StorageConfig.GraphDriverOptions) + if c.StorageConfig.UIDMap == nil { + c.StorageConfig.UIDMap = other.StorageConfig.UIDMap + } + if c.StorageConfig.GIDMap == nil { + c.StorageConfig.GIDMap = other.StorageConfig.GIDMap + } + + // backwards compat *Set fields + c.StorageConfigRunRootSet = mergeBools(c.StorageConfigRunRootSet, other.StorageConfigRunRootSet) + c.StorageConfigGraphRootSet = mergeBools(c.StorageConfigGraphRootSet, other.StorageConfigGraphRootSet) + c.StorageConfigGraphDriverNameSet = mergeBools(c.StorageConfigGraphDriverNameSet, other.StorageConfigGraphDriverNameSet) + c.VolumePathSet = mergeBools(c.VolumePathSet, other.VolumePathSet) + c.StaticDirSet = mergeBools(c.StaticDirSet, other.StaticDirSet) + c.TmpDirSet = mergeBools(c.TmpDirSet, other.TmpDirSet) + + return nil +} + +// MergeDBConfig merges the configuration from the database. +func (c *Config) MergeDBConfig(dbConfig *DBConfig) error { + + if !c.StorageConfigRunRootSet && dbConfig.StorageTmp != "" { + if c.StorageConfig.RunRoot != dbConfig.StorageTmp && + c.StorageConfig.RunRoot != "" { + logrus.Debugf("Overriding run root %q with %q from database", + c.StorageConfig.RunRoot, dbConfig.StorageTmp) + } + c.StorageConfig.RunRoot = dbConfig.StorageTmp + } + + if !c.StorageConfigGraphRootSet && dbConfig.StorageRoot != "" { + if c.StorageConfig.GraphRoot != dbConfig.StorageRoot && + c.StorageConfig.GraphRoot != "" { + logrus.Debugf("Overriding graph root %q with %q from database", + c.StorageConfig.GraphRoot, dbConfig.StorageRoot) + } + c.StorageConfig.GraphRoot = dbConfig.StorageRoot + } + + if !c.StorageConfigGraphDriverNameSet && dbConfig.GraphDriver != "" { + if c.StorageConfig.GraphDriverName != dbConfig.GraphDriver && + c.StorageConfig.GraphDriverName != "" { + logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve", + c.StorageConfig.GraphDriverName, dbConfig.GraphDriver) + } + c.StorageConfig.GraphDriverName = dbConfig.GraphDriver + } + + if !c.StaticDirSet && dbConfig.LibpodRoot != "" { + if c.StaticDir != dbConfig.LibpodRoot && c.StaticDir != "" { + logrus.Debugf("Overriding static dir %q with %q from database", c.StaticDir, dbConfig.LibpodRoot) + } + c.StaticDir = dbConfig.LibpodRoot + } + + if !c.TmpDirSet && dbConfig.LibpodTmp != "" { + if c.TmpDir != dbConfig.LibpodTmp && c.TmpDir != "" { + logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp) + } + c.TmpDir = dbConfig.LibpodTmp + c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log") + } + + if !c.VolumePathSet && dbConfig.VolumePath != "" { + if c.VolumePath != dbConfig.VolumePath && c.VolumePath != "" { + logrus.Debugf("Overriding volume path %q with %q from database", c.VolumePath, dbConfig.VolumePath) + } + c.VolumePath = dbConfig.VolumePath + } + return nil +} + +func mergeStrings(a, b string) string { + if a == "" { + return b + } + return a +} + +func mergeStringSlices(a, b []string) []string { + if len(a) == 0 && b != nil { + return b + } + return a +} + +func mergeStringMaps(a, b map[string][]string) map[string][]string { + if len(a) == 0 && b != nil { + return b + } + return a +} + +func mergeInt64s(a, b int64) int64 { + if a == 0 { + return b + } + return a +} + +func mergeUint32s(a, b uint32) uint32 { + if a == 0 { + return b + } + return a +} + +func mergeBools(a, b bool) bool { + if !a { + return b + } + return a +} diff --git a/vendor/github.com/containers/libpod/libpod/container.go b/vendor/github.com/containers/libpod/libpod/container.go index 9c01d2adf5..8e24391b99 100644 --- a/vendor/github.com/containers/libpod/libpod/container.go +++ b/vendor/github.com/containers/libpod/libpod/container.go @@ -10,7 +10,7 @@ import ( "github.com/containernetworking/cni/pkg/types" cnitypes "github.com/containernetworking/cni/pkg/types/current" - "github.com/containers/image/manifest" + "github.com/containers/image/v5/manifest" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/lock" "github.com/containers/libpod/pkg/namespaces" @@ -129,7 +129,7 @@ type Container struct { valid bool lock lock.Locker runtime *Runtime - ociRuntime *OCIRuntime + ociRuntime OCIRuntime rootlessSlirpSyncR *os.File rootlessSlirpSyncW *os.File @@ -356,6 +356,9 @@ type ContainerConfig struct { StopTimeout uint `json:"stopTimeout,omitempty"` // Time container was created CreatedTime time.Time `json:"createdTime"` + // NoCgroups indicates that the container will not create CGroups. It is + // incompatible with CgroupParent. + NoCgroups bool `json:"noCgroups,omitempty"` // Cgroup parent of the container CgroupParent string `json:"cgroupParent"` // LogPath log location @@ -1056,9 +1059,9 @@ func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:in // CGroupPath returns a cgroups "path" for a given container. func (c *Container) CGroupPath() (string, error) { switch c.runtime.config.CgroupManager { - case CgroupfsCgroupsManager: + case define.CgroupfsCgroupsManager: return filepath.Join(c.config.CgroupParent, fmt.Sprintf("libpod-%s", c.ID())), nil - case SystemdCgroupsManager: + case define.SystemdCgroupsManager: if rootless.IsRootless() { uid := rootless.GetRootlessUID() return filepath.Join(c.config.CgroupParent, fmt.Sprintf("user-%d.slice/user@%d.service/user.slice", uid, uid), createUnitName("libpod", c.ID())), nil @@ -1182,3 +1185,12 @@ func (c *Container) HasHealthCheck() bool { func (c *Container) HealthCheckConfig() *manifest.Schema2HealthConfig { return c.config.HealthCheckConfig } + +// AutoRemove indicates whether the container will be removed after it is executed +func (c *Container) AutoRemove() bool { + spec := c.config.Spec + if spec.Annotations == nil { + return false + } + return c.Spec().Annotations[InspectAnnotationAutoremove] == InspectResponseTrue +} diff --git a/vendor/github.com/containers/libpod/libpod/container_api.go b/vendor/github.com/containers/libpod/libpod/container_api.go index abcfcb2715..a6f5b54d53 100644 --- a/vendor/github.com/containers/libpod/libpod/container_api.go +++ b/vendor/github.com/containers/libpod/libpod/container_api.go @@ -1,6 +1,7 @@ package libpod import ( + "bufio" "context" "io" "io/ioutil" @@ -14,7 +15,6 @@ import ( "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/remotecommand" ) @@ -33,9 +33,7 @@ func (c *Container) Init(ctx context.Context) (err error) { } } - if !(c.state.State == define.ContainerStateConfigured || - c.state.State == define.ContainerStateStopped || - c.state.State == define.ContainerStateExited) { + if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateStopped, define.ContainerStateExited) { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID()) } @@ -177,18 +175,15 @@ func (c *Container) StopWithTimeout(timeout uint) error { } } - if c.state.State == define.ContainerStateConfigured || - c.state.State == define.ContainerStateUnknown || - c.state.State == define.ContainerStatePaused { - return errors.Wrapf(define.ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s is in state %s", c.ID(), c.state.State.String()) + if c.ensureState(define.ContainerStateStopped, define.ContainerStateExited) { + return define.ErrCtrStopped } - if c.state.State == define.ContainerStateStopped || - c.state.State == define.ContainerStateExited { - return define.ErrCtrStopped + if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) { + return errors.Wrapf(define.ErrCtrStateInvalid, "can only stop created or running containers. %s is in state %s", c.ID(), c.state.State.String()) } - return c.stop(timeout) + return c.stop(timeout, false) } // Kill sends a signal to a container @@ -202,27 +197,30 @@ func (c *Container) Kill(signal uint) error { } } + // TODO: Is killing a paused container OK? if c.state.State != define.ContainerStateRunning { return errors.Wrapf(define.ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String()) } - defer c.newContainerEvent(events.Kill) - if err := c.ociRuntime.killContainer(c, signal); err != nil { + // Hardcode all = false, we only use all when removing. + if err := c.ociRuntime.KillContainer(c, signal, false); err != nil { return err } c.state.StoppedByUser = true + c.newContainerEvent(events.Kill) + return c.save() } // Exec starts a new process inside the container -// Returns an exit code and an error. If Exec was not able to exec in the container before a failure, an exit code of 126 is returned. -// If another generic error happens, an exit code of 125 is returned. +// Returns an exit code and an error. If Exec was not able to exec in the container before a failure, an exit code of define.ExecErrorCodeCannotInvoke is returned. +// If another generic error happens, an exit code of define.ExecErrorCodeGeneric is returned. // Sometimes, the $RUNTIME exec call errors, and if that is the case, the exit code is the exit code of the call. // Otherwise, the exit code will be the exit code of the executed call inside of the container. // TODO investigate allowing exec without attaching -func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir string, streams *AttachStreams, preserveFDs int, resize chan remotecommand.TerminalSize, detachKeys string) (int, error) { +func (c *Container) Exec(tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *AttachStreams, preserveFDs uint, resize chan remotecommand.TerminalSize, detachKeys string) (int, error) { var capList []string if !c.batched { c.lock.Lock() @@ -233,10 +231,7 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir } } - conState := c.state.State - - // TODO can probably relax this once we track exec sessions - if conState != define.ContainerStateRunning { + if c.state.State != define.ContainerStateRunning { return define.ExecErrorCodeCannotInvoke, errors.Wrapf(define.ErrCtrStateInvalid, "cannot exec into container that is not running") } @@ -274,7 +269,24 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir } }() - pid, attachChan, err := c.ociRuntime.execContainer(c, cmd, capList, env, tty, workDir, user, sessionID, streams, preserveFDs, resize, detachKeys) + // if the user is empty, we should inherit the user that the container is currently running with + if user == "" { + user = c.config.User + } + + opts := new(ExecOptions) + opts.Cmd = cmd + opts.CapAdd = capList + opts.Env = env + opts.Terminal = tty + opts.Cwd = workDir + opts.User = user + opts.Streams = streams + opts.PreserveFDs = preserveFDs + opts.Resize = resize + opts.DetachKeys = detachKeys + + pid, attachChan, err := c.ociRuntime.ExecContainer(c, sessionID, opts) if err != nil { ec := define.ExecErrorCodeGeneric // Conmon will pass a non-zero exit code from the runtime as a pid here. @@ -350,7 +362,7 @@ type AttachStreams struct { // ErrorStream will be attached to container's STDERR ErrorStream io.WriteCloser // InputStream will be attached to container's STDIN - InputStream io.Reader + InputStream *bufio.Reader // AttachOutput is whether to attach to STDOUT // If false, stdout will not be attached AttachOutput bool @@ -373,11 +385,10 @@ func (c *Container) Attach(streams *AttachStreams, keys string, resize <-chan re c.lock.Unlock() } - if c.state.State != define.ContainerStateCreated && - c.state.State != define.ContainerStateRunning && - c.state.State != define.ContainerStateExited { + if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) { return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers") } + defer c.newContainerEvent(events.Attach) return c.attach(streams, keys, resize, false, nil) } @@ -414,7 +425,7 @@ func (c *Container) Unmount(force bool) error { return errors.Wrapf(err, "can't determine how many times %s is mounted, refusing to unmount", c.ID()) } if mounted == 1 { - if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused { + if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID()) } if len(c.state.ExecSessions) != 0 { @@ -519,24 +530,28 @@ func (c *Container) WaitWithInterval(waitTimeout time.Duration) (int32, error) { if !c.valid { return -1, define.ErrCtrRemoved } - err := wait.PollImmediateInfinite(waitTimeout, - func() (bool, error) { - logrus.Debugf("Checking container %s status...", c.ID()) - stopped, err := c.isStopped() - if err != nil { - return false, err - } - if !stopped { - return false, nil - } - return true, nil - }, - ) + + exitFile, err := c.exitFilePath() if err != nil { - return 0, err + return -1, err + } + chWait := make(chan error, 1) + + defer close(chWait) + + for { + // ignore errors here, it is only used to avoid waiting + // too long. + _, _ = WaitForFile(exitFile, chWait, waitTimeout) + + stopped, err := c.isStopped() + if err != nil { + return -1, err + } + if stopped { + return c.state.ExitCode, nil + } } - exitCode := c.state.ExitCode - return exitCode, nil } // Cleanup unmounts all mount points in container and cleans up container storage @@ -552,7 +567,7 @@ func (c *Container) Cleanup(ctx context.Context) error { } // Check if state is good - if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused { + if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateExited) { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID()) } @@ -630,11 +645,9 @@ func (c *Container) Sync() error { // If runtime knows about the container, update its status in runtime // And then save back to disk - if (c.state.State != define.ContainerStateUnknown) && - (c.state.State != define.ContainerStateConfigured) && - (c.state.State != define.ContainerStateExited) { + if c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopped) { oldState := c.state.State - if err := c.ociRuntime.updateContainerStatus(c, true); err != nil { + if err := c.ociRuntime.UpdateContainerStatus(c); err != nil { return err } // Only save back to DB if state changed @@ -644,6 +657,7 @@ func (c *Container) Sync() error { } } } + defer c.newContainerEvent(events.Sync) return nil } @@ -682,7 +696,7 @@ func (c *Container) Refresh(ctx context.Context) error { // Next, if the container is running, stop it if c.state.State == define.ContainerStateRunning { - if err := c.stop(c.config.StopTimeout); err != nil { + if err := c.stop(c.config.StopTimeout, false); err != nil { return err } } @@ -691,8 +705,10 @@ func (c *Container) Refresh(ctx context.Context) error { if len(c.state.ExecSessions) > 0 { logrus.Infof("Killing %d exec sessions in container %s. They will not be restored after refresh.", len(c.state.ExecSessions), c.ID()) - if err := c.ociRuntime.execStopContainer(c, c.config.StopTimeout); err != nil { - return err + } + for _, session := range c.state.ExecSessions { + if err := c.ociRuntime.ExecStopContainer(c, session.ID, c.StopTimeout()); err != nil { + return errors.Wrapf(err, "error stopping exec session %s of container %s", session.ID, c.ID()) } } diff --git a/vendor/github.com/containers/libpod/libpod/container_commit.go b/vendor/github.com/containers/libpod/libpod/container_commit.go index 8dfeee9b84..42f298a812 100644 --- a/vendor/github.com/containers/libpod/libpod/container_commit.go +++ b/vendor/github.com/containers/libpod/libpod/container_commit.go @@ -8,7 +8,7 @@ import ( "github.com/containers/buildah" "github.com/containers/buildah/util" - is "github.com/containers/image/storage" + is "github.com/containers/image/v5/storage" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/libpod/image" @@ -50,11 +50,11 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai } if c.state.State == define.ContainerStateRunning && options.Pause { - if err := c.ociRuntime.pauseContainer(c); err != nil { + if err := c.pause(); err != nil { return nil, errors.Wrapf(err, "error pausing container %q", c.ID()) } defer func() { - if err := c.ociRuntime.unpauseContainer(c); err != nil { + if err := c.unpause(); err != nil { logrus.Errorf("error unpausing container %q: %v", c.ID(), err) } }() diff --git a/vendor/github.com/containers/libpod/libpod/container_graph.go b/vendor/github.com/containers/libpod/libpod/container_graph.go index 5aa51bc2fd..f6988e1ac9 100644 --- a/vendor/github.com/containers/libpod/libpod/container_graph.go +++ b/vendor/github.com/containers/libpod/libpod/container_graph.go @@ -16,14 +16,30 @@ type containerNode struct { dependedOn []*containerNode } -type containerGraph struct { +// ContainerGraph is a dependency graph based on a set of containers. +type ContainerGraph struct { nodes map[string]*containerNode noDepNodes []*containerNode notDependedOnNodes map[string]*containerNode } -func buildContainerGraph(ctrs []*Container) (*containerGraph, error) { - graph := new(containerGraph) +// DependencyMap returns the dependency graph as map with the key being a +// container and the value being the containers the key depends on. +func (cg *ContainerGraph) DependencyMap() (dependencies map[*Container][]*Container) { + dependencies = make(map[*Container][]*Container) + for _, node := range cg.nodes { + dependsOn := make([]*Container, len(node.dependsOn)) + for i, d := range node.dependsOn { + dependsOn[i] = d.container + } + dependencies[node.container] = dependsOn + } + return dependencies +} + +// BuildContainerGraph builds a dependency graph based on the container slice. +func BuildContainerGraph(ctrs []*Container) (*ContainerGraph, error) { + graph := new(ContainerGraph) graph.nodes = make(map[string]*containerNode) graph.notDependedOnNodes = make(map[string]*containerNode) @@ -78,7 +94,7 @@ func buildContainerGraph(ctrs []*Container) (*containerGraph, error) { // Detect cycles in a container graph using Tarjan's strongly connected // components algorithm // Return true if a cycle is found, false otherwise -func detectCycles(graph *containerGraph) (bool, error) { +func detectCycles(graph *ContainerGraph) (bool, error) { type nodeInfo struct { index int lowLink int diff --git a/vendor/github.com/containers/libpod/libpod/container_inspect.go b/vendor/github.com/containers/libpod/libpod/container_inspect.go index 1b6dd829c2..66aca23ed0 100644 --- a/vendor/github.com/containers/libpod/libpod/container_inspect.go +++ b/vendor/github.com/containers/libpod/libpod/container_inspect.go @@ -5,7 +5,7 @@ import ( "strings" "time" - "github.com/containers/image/manifest" + "github.com/containers/image/v5/manifest" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/driver" "github.com/containers/libpod/pkg/util" @@ -96,7 +96,7 @@ type InspectContainerData struct { Path string `json:"Path"` Args []string `json:"Args"` State *InspectContainerState `json:"State"` - ImageID string `json:"Image"` + Image string `json:"Image"` ImageName string `json:"ImageName"` Rootfs string `json:"Rootfs"` Pod string `json:"Pod"` @@ -268,6 +268,11 @@ type InspectContainerHostConfig struct { // populated. // TODO. Cgroup string `json:"Cgroup"` + // Cgroups contains the container's CGroup mode. + // Allowed values are "default" (container is creating CGroups) and + // "disabled" (container is not creating CGroups). + // This is Libpod-specific and not included in `docker inspect`. + Cgroups string `json:"Cgroups"` // Links is unused, and provided purely for Docker compatibility. Links []string `json:"Links"` // OOMScoreAdj is an adjustment that will be made to the container's OOM @@ -713,7 +718,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) StartedAt: runtimeInfo.StartedTime, FinishedAt: runtimeInfo.FinishedTime, }, - ImageID: config.RootfsImageID, + Image: config.RootfsImageID, ImageName: config.RootfsImageName, ExitCommand: config.ExitCommand, Namespace: config.Namespace, @@ -958,6 +963,11 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named restartPolicy.Name = c.config.RestartPolicy restartPolicy.MaximumRetryCount = c.config.RestartRetries hostConfig.RestartPolicy = restartPolicy + if c.config.NoCgroups { + hostConfig.Cgroups = "disabled" + } else { + hostConfig.Cgroups = "default" + } hostConfig.Dns = make([]string, 0, len(c.config.DNSServer)) for _, dns := range c.config.DNSServer { @@ -1312,9 +1322,9 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named // Need to check if it's the default, and not print if so. defaultCgroupParent := "" switch c.runtime.config.CgroupManager { - case CgroupfsCgroupsManager: + case define.CgroupfsCgroupsManager: defaultCgroupParent = CgroupfsDefaultCgroupParent - case SystemdCgroupsManager: + case define.SystemdCgroupsManager: defaultCgroupParent = SystemdDefaultCgroupParent } if c.config.CgroupParent != defaultCgroupParent { diff --git a/vendor/github.com/containers/libpod/libpod/container_internal.go b/vendor/github.com/containers/libpod/libpod/container_internal.go index 313f679639..028d7601db 100644 --- a/vendor/github.com/containers/libpod/libpod/container_internal.go +++ b/vendor/github.com/containers/libpod/libpod/container_internal.go @@ -14,6 +14,7 @@ import ( "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" + "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/ctime" "github.com/containers/libpod/pkg/hooks" "github.com/containers/libpod/pkg/hooks/exec" @@ -21,6 +22,7 @@ import ( "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/mount" + "github.com/cyphar/filepath-securejoin" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" @@ -129,13 +131,13 @@ func (c *Container) CheckpointPath() string { } // AttachSocketPath retrieves the path of the container's attach socket -func (c *Container) AttachSocketPath() string { - return filepath.Join(c.ociRuntime.socketsDir, c.ID(), "attach") +func (c *Container) AttachSocketPath() (string, error) { + return c.ociRuntime.AttachSocketPath(c) } // exitFilePath gets the path to the container's exit file -func (c *Container) exitFilePath() string { - return filepath.Join(c.ociRuntime.exitsDir, c.ID()) +func (c *Container) exitFilePath() (string, error) { + return c.ociRuntime.ExitFilePath(c) } // create a bundle path and associated files for an exec session @@ -162,7 +164,11 @@ func (c *Container) createExecBundle(sessionID string) (err error) { // cleanup an exec session after its done func (c *Container) cleanupExecBundle(sessionID string) error { - return os.RemoveAll(c.execBundlePath(sessionID)) + if err := os.RemoveAll(c.execBundlePath(sessionID)); err != nil && !os.IsNotExist(err) { + return err + } + + return c.ociRuntime.ExecContainerCleanup(c, sessionID) } // the path to a containers exec session bundle @@ -181,8 +187,8 @@ func (c *Container) execLogPath(sessionID string) string { } // the socket conmon creates for an exec session -func (c *Container) execAttachSocketPath(sessionID string) string { - return filepath.Join(c.ociRuntime.socketsDir, sessionID, "attach") +func (c *Container) execAttachSocketPath(sessionID string) (string, error) { + return c.ociRuntime.ExecAttachSocketPath(c, sessionID) } // execExitFileDir gets the path to the container's exit file @@ -192,7 +198,7 @@ func (c *Container) execExitFileDir(sessionID string) string { // execOCILog returns the file path for the exec sessions oci log func (c *Container) execOCILog(sessionID string) string { - if !c.ociRuntime.supportsJSON { + if !c.ociRuntime.SupportsJSONErrors() { return "" } return filepath.Join(c.execBundlePath(sessionID), "oci-log") @@ -223,12 +229,15 @@ func (c *Container) readExecExitCode(sessionID string) (int, error) { // Wait for the container's exit file to appear. // When it does, update our state based on it. func (c *Container) waitForExitFileAndSync() error { - exitFile := c.exitFilePath() + exitFile, err := c.exitFilePath() + if err != nil { + return err + } chWait := make(chan error) defer close(chWait) - _, err := WaitForFile(exitFile, chWait, time.Second*5) + _, err = WaitForFile(exitFile, chWait, time.Second*5) if err != nil { // Exit file did not appear // Reset our state @@ -243,7 +252,7 @@ func (c *Container) waitForExitFileAndSync() error { return err } - if err := c.ociRuntime.updateContainerStatus(c, false); err != nil { + if err := c.checkExitFile(); err != nil { return err } @@ -319,7 +328,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er // Is the container running again? // If so, we don't have to do anything - if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused { + if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { return false, nil } else if c.state.State == define.ContainerStateUnknown { return false, errors.Wrapf(define.ErrInternal, "invalid container state encountered in restart attempt!") @@ -350,8 +359,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er if err := c.reinit(ctx, true); err != nil { return false, err } - } else if c.state.State == define.ContainerStateConfigured || - c.state.State == define.ContainerStateExited { + } else if c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) { // Initialize the container if err := c.init(ctx, true); err != nil { return false, err @@ -363,6 +371,18 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er return true, nil } +// Ensure that the container is in a specific state or state. +// Returns true if the container is in one of the given states, +// or false otherwise. +func (c *Container) ensureState(states ...define.ContainerStatus) bool { + for _, state := range states { + if state == c.state.State { + return true + } + } + return false +} + // Sync this container with on-disk state and runtime status // Should only be called with container lock held // This function should suffice to ensure a container's state is accurate and @@ -373,14 +393,13 @@ func (c *Container) syncContainer() error { } // If runtime knows about the container, update its status in runtime // And then save back to disk - if (c.state.State != define.ContainerStateUnknown) && - (c.state.State != define.ContainerStateConfigured) && - (c.state.State != define.ContainerStateExited) { + if c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopped, define.ContainerStatePaused) { oldState := c.state.State - // TODO: optionally replace this with a stat for the exit file - if err := c.ociRuntime.updateContainerStatus(c, false); err != nil { + + if err := c.checkExitFile(); err != nil { return err } + // Only save back to DB if state changed if c.state.State != oldState { // Check for a restart policy match @@ -506,7 +525,7 @@ func (c *Container) setupStorage(ctx context.Context) error { // Tear down a container's storage prior to removal func (c *Container) teardownStorage() error { - if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused { + if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID()) } @@ -524,7 +543,7 @@ func (c *Container) teardownStorage() error { // error - we wanted it gone, it is already gone. // Potentially another tool using containers/storage already // removed it? - if err == storage.ErrNotAContainer || err == storage.ErrContainerUnknown { + if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown { logrus.Warnf("Storage for container %s already removed", c.ID()) return nil } @@ -612,7 +631,11 @@ func (c *Container) refresh() error { return err } - return nil + if rootless.IsRootless() { + return nil + } + + return c.refreshCNI() } // Remove conmon attach socket and terminal resize FIFO @@ -635,15 +658,12 @@ func (c *Container) removeConmonFiles() error { } // Remove the exit file so we don't leak memory in tmpfs - exitFile := filepath.Join(c.ociRuntime.exitsDir, c.ID()) - if _, err := os.Stat(exitFile); err != nil { - if !os.IsNotExist(err) { - return errors.Wrapf(err, "error running stat on container %s exit file", c.ID()) - } - } else { - if err := os.Remove(exitFile); err != nil { - return errors.Wrapf(err, "error removing container %s exit file", c.ID()) - } + exitFile, err := c.exitFilePath() + if err != nil { + return err + } + if err := os.Remove(exitFile); err != nil && !os.IsNotExist(err) { + return errors.Wrapf(err, "error removing container %s exit file", c.ID()) } return nil @@ -710,10 +730,7 @@ func (c *Container) save() error { // Otherwise, this function will return with error if there are dependencies of this container that aren't running. func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err error) { // Container must be created or stopped to be started - if !(c.state.State == define.ContainerStateConfigured || - c.state.State == define.ContainerStateCreated || - c.state.State == define.ContainerStateStopped || - c.state.State == define.ContainerStateExited) { + if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateExited) { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID()) } @@ -744,8 +761,7 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err err if err := c.reinit(ctx, false); err != nil { return err } - } else if c.state.State == define.ContainerStateConfigured || - c.state.State == define.ContainerStateExited { + } else if c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) { // Or initialize it if necessary if err := c.init(ctx, false); err != nil { return err @@ -788,7 +804,7 @@ func (c *Container) startDependencies(ctx context.Context) error { } // Build a dependency graph of containers - graph, err := buildContainerGraph(depCtrs) + graph, err := BuildContainerGraph(depCtrs) if err != nil { return errors.Wrapf(err, "error generating dependency graph for container %s", c.ID()) } @@ -912,6 +928,12 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { span.SetTag("struct", "container") defer span.Finish() + // Unconditionally remove conmon temporary files. + // We've been running into far too many issues where they block startup. + if err := c.removeConmonFiles(); err != nil { + return err + } + // Generate the OCI newSpec newSpec, err := c.generateSpec(ctx) if err != nil { @@ -924,7 +946,14 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { } // With the spec complete, do an OCI create - if err := c.ociRuntime.createContainer(c, nil); err != nil { + if err := c.ociRuntime.CreateContainer(c, nil); err != nil { + // Fedora 31 is carrying a patch to display improved error + // messages to better handle the V2 transition. This is NOT + // upstream in any OCI runtime. + // TODO: Remove once runc supports cgroupsv2 + if strings.Contains(err.Error(), "this version of runc doesn't work on cgroups v2") { + logrus.Errorf("oci runtime %q does not support CGroups V2: use system migrate to mitigate", c.ociRuntime.Name()) + } return err } @@ -963,7 +992,7 @@ func (c *Container) cleanupRuntime(ctx context.Context) error { // If the container is not ContainerStateStopped or // ContainerStateCreated, do nothing. - if c.state.State != define.ContainerStateStopped && c.state.State != define.ContainerStateCreated { + if !c.ensureState(define.ContainerStateStopped, define.ContainerStateCreated) { return nil } @@ -1054,8 +1083,7 @@ func (c *Container) initAndStart(ctx context.Context) (err error) { if err := c.reinit(ctx, false); err != nil { return err } - } else if c.state.State == define.ContainerStateConfigured || - c.state.State == define.ContainerStateExited { + } else if c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) { if err := c.init(ctx, false); err != nil { return err } @@ -1071,7 +1099,7 @@ func (c *Container) start() error { logrus.Debugf("Starting container %s with command %v", c.ID(), c.config.Spec.Process.Args) } - if err := c.ociRuntime.startContainer(c); err != nil { + if err := c.ociRuntime.StartContainer(c); err != nil { return err } logrus.Debugf("Started container %s", c.ID()) @@ -1093,10 +1121,28 @@ func (c *Container) start() error { } // Internal, non-locking function to stop container -func (c *Container) stop(timeout uint) error { +func (c *Container) stop(timeout uint, all bool) error { logrus.Debugf("Stopping ctr %s (timeout %d)", c.ID(), timeout) - if err := c.ociRuntime.stopContainer(c, timeout); err != nil { + // We can't use --all if CGroups aren't present. + // Rootless containers with CGroups v1 and NoCgroups are both cases + // where this can happen. + if all { + if c.config.NoCgroups { + all = false + } else if rootless.IsRootless() { + // Only do this check if we need to + unified, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return err + } + if !unified { + all = false + } + } + } + + if err := c.ociRuntime.StopContainer(c, timeout, all); err != nil { return err } @@ -1119,7 +1165,21 @@ func (c *Container) stop(timeout uint) error { // Internal, non-locking function to pause a container func (c *Container) pause() error { - if err := c.ociRuntime.pauseContainer(c); err != nil { + if c.config.NoCgroups { + return errors.Wrapf(define.ErrNoCgroups, "cannot pause without using CGroups") + } + + if rootless.IsRootless() { + cgroupv2, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return errors.Wrap(err, "failed to determine cgroupversion") + } + if !cgroupv2 { + return errors.Wrap(define.ErrNoCgroups, "can not pause containers on rootless containers with cgroup V1") + } + } + + if err := c.ociRuntime.PauseContainer(c); err != nil { return err } @@ -1132,7 +1192,11 @@ func (c *Container) pause() error { // Internal, non-locking function to unpause a container func (c *Container) unpause() error { - if err := c.ociRuntime.unpauseContainer(c); err != nil { + if c.config.NoCgroups { + return errors.Wrapf(define.ErrNoCgroups, "cannot unpause without using CGroups") + } + + if err := c.ociRuntime.UnpauseContainer(c); err != nil { return err } @@ -1145,7 +1209,7 @@ func (c *Container) unpause() error { // Internal, non-locking function to restart a container func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err error) { - if c.state.State == define.ContainerStateUnknown || c.state.State == define.ContainerStatePaused { + if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopped, define.ContainerStateExited) { return errors.Wrapf(define.ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state") } @@ -1153,7 +1217,7 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err e if c.state.State == define.ContainerStateRunning { conmonPID := c.state.ConmonPID - if err := c.stop(timeout); err != nil { + if err := c.stop(timeout, false); err != nil { return err } // Old versions of conmon have a bug where they create the exit file before @@ -1205,7 +1269,7 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err e // TODO: Add ability to override mount label so we can use this for Mount() too // TODO: Can we use this for export? Copying SHM into the export might not be // good -func (c *Container) mountStorage() (string, error) { +func (c *Container) mountStorage() (_ string, Err error) { var err error // Container already mounted, nothing to do if c.state.Mounted { @@ -1225,20 +1289,93 @@ func (c *Container) mountStorage() (string, error) { if err := os.Chown(c.config.ShmDir, c.RootUID(), c.RootGID()); err != nil { return "", errors.Wrapf(err, "failed to chown %s", c.config.ShmDir) } + defer func() { + if Err != nil { + if err := c.unmountSHM(c.config.ShmDir); err != nil { + logrus.Errorf("Error unmounting SHM for container %s after mount error: %v", c.ID(), err) + } + } + }() } - // TODO: generalize this mount code so it will mount every mount in ctr.config.Mounts + // We need to mount the container before volumes - to ensure the copyup + // works properly. mountPoint := c.config.Rootfs if mountPoint == "" { mountPoint, err = c.mount() if err != nil { return "", err } + defer func() { + if Err != nil { + if err := c.unmount(false); err != nil { + logrus.Errorf("Error unmounting container %s after mount error: %v", c.ID(), err) + } + } + }() + } + + // Request a mount of all named volumes + for _, v := range c.config.NamedVolumes { + vol, err := c.mountNamedVolume(v, mountPoint) + if err != nil { + return "", err + } + defer func() { + if Err == nil { + return + } + vol.lock.Lock() + if err := vol.unmount(false); err != nil { + logrus.Errorf("Error unmounting volume %s after error mounting container %s: %v", vol.Name(), c.ID(), err) + } + vol.lock.Unlock() + }() } return mountPoint, nil } +// Mount a single named volume into the container. +// If necessary, copy up image contents into the volume. +// Does not verify that the name volume given is actually present in container +// config. +// Returns the volume that was mounted. +func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) (*Volume, error) { + vol, err := c.runtime.state.Volume(v.Name) + if err != nil { + return nil, errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID()) + } + + vol.lock.Lock() + defer vol.lock.Unlock() + if vol.needsMount() { + if err := vol.mount(); err != nil { + return nil, errors.Wrapf(err, "error mounting volume %s for container %s", vol.Name(), c.ID()) + } + } + // The volume may need a copy-up. Check the state. + if err := vol.update(); err != nil { + return nil, err + } + if vol.state.NeedsCopyUp { + logrus.Debugf("Copying up contents from container %s to volume %s", c.ID(), vol.Name()) + srcDir, err := securejoin.SecureJoin(mountpoint, v.Dest) + if err != nil { + return nil, errors.Wrapf(err, "error calculating destination path to copy up container %s volume %s", c.ID(), vol.Name()) + } + if err := c.copyWithTarFromImage(srcDir, vol.MountPoint()); err != nil && !os.IsNotExist(err) { + return nil, errors.Wrapf(err, "error copying content from container %s into volume %s", c.ID(), vol.Name()) + } + + vol.state.NeedsCopyUp = false + if err := vol.save(); err != nil { + return nil, err + } + } + return vol, nil +} + // cleanupStorage unmounts and cleans up the container's root filesystem func (c *Container) cleanupStorage() error { if !c.state.Mounted { @@ -1247,14 +1384,19 @@ func (c *Container) cleanupStorage() error { return nil } + var cleanupErr error + for _, containerMount := range c.config.Mounts { if err := c.unmountSHM(containerMount); err != nil { - return err + if cleanupErr != nil { + logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr) + } + cleanupErr = err } } if c.config.Rootfs != "" { - return nil + return cleanupErr } if err := c.unmount(false); err != nil { @@ -1262,21 +1404,54 @@ func (c *Container) cleanupStorage() error { // error // We still want to be able to kick the container out of the // state - if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown { + if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown || errors.Cause(err) == storage.ErrLayerNotMounted { logrus.Errorf("Storage for container %s has been removed", c.ID()) - return nil + } else { + if cleanupErr != nil { + logrus.Errorf("Error cleaning up container %s storage: %v", c.ID(), cleanupErr) + } + cleanupErr = err } + } - return err + // Request an unmount of all named volumes + for _, v := range c.config.NamedVolumes { + vol, err := c.runtime.state.Volume(v.Name) + if err != nil { + if cleanupErr != nil { + logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr) + } + cleanupErr = errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID()) + + // We need to try and unmount every volume, so continue + // if they fail. + continue + } + + if vol.needsMount() { + vol.lock.Lock() + if err := vol.unmount(false); err != nil { + if cleanupErr != nil { + logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr) + } + cleanupErr = errors.Wrapf(err, "error unmounting volume %s for container %s", vol.Name(), c.ID()) + } + vol.lock.Unlock() + } } c.state.Mountpoint = "" c.state.Mounted = false if c.valid { - return c.save() + if err := c.save(); err != nil { + if cleanupErr != nil { + logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr) + } + cleanupErr = err + } } - return nil + return cleanupErr } // Unmount the a container and free its resources @@ -1329,7 +1504,7 @@ func (c *Container) delete(ctx context.Context) (err error) { span.SetTag("struct", "container") defer span.Finish() - if err := c.ociRuntime.deleteContainer(c); err != nil { + if err := c.ociRuntime.DeleteContainer(c); err != nil { return errors.Wrapf(err, "error removing container %s from runtime", c.ID()) } @@ -1542,15 +1717,11 @@ func (c *Container) unmount(force bool) error { } // this should be from chrootarchive. -func (c *Container) copyWithTarFromImage(src, dest string) error { - mountpoint, err := c.mount() - if err != nil { - return err - } +// Container MUST be mounted before calling. +func (c *Container) copyWithTarFromImage(source, dest string) error { a := archive.NewDefaultArchiver() - source := filepath.Join(mountpoint, src) - if err = c.copyOwnerAndPerms(source, dest); err != nil { + if err := c.copyOwnerAndPerms(source, dest); err != nil { return err } return a.CopyWithTar(source, dest) @@ -1566,9 +1737,8 @@ func (c *Container) checkReadyForRemoval() error { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in invalid state", c.ID()) } - if c.state.State == define.ContainerStateRunning || - c.state.State == define.ContainerStatePaused { - return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed", c.ID(), c.state.State.String()) + if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed without force", c.ID(), c.state.State.String()) } if len(c.state.ExecSessions) != 0 { @@ -1645,3 +1815,35 @@ func (c *Container) sortUserVolumes(ctrSpec *spec.Spec) ([]*ContainerNamedVolume } return namedUserVolumes, userMounts } + +// Check for an exit file, and handle one if present +func (c *Container) checkExitFile() error { + // If the container's not running, nothing to do. + if !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { + return nil + } + + exitFile, err := c.exitFilePath() + if err != nil { + return err + } + + // Check for the exit file + info, err := os.Stat(exitFile) + if err != nil { + if os.IsNotExist(err) { + // Container is still running, no error + return nil + } + + return errors.Wrapf(err, "error running stat on container %s exit file", c.ID()) + } + + // Alright, it exists. Transition to Stopped state. + c.state.State = define.ContainerStateStopped + c.state.PID = 0 + c.state.ConmonPID = 0 + + // Read the exit file to get our stopped time and exit code. + return c.handleExitFile(exitFile, info) +} diff --git a/vendor/github.com/containers/libpod/libpod/container_internal_linux.go b/vendor/github.com/containers/libpod/libpod/container_internal_linux.go index 5aa4ee9a9e..471648bc80 100644 --- a/vendor/github.com/containers/libpod/libpod/container_internal_linux.go +++ b/vendor/github.com/containers/libpod/libpod/container_internal_linux.go @@ -21,7 +21,7 @@ import ( "github.com/containernetworking/plugins/pkg/ns" "github.com/containers/buildah/pkg/secrets" "github.com/containers/libpod/libpod/define" - crioAnnotations "github.com/containers/libpod/pkg/annotations" + "github.com/containers/libpod/pkg/annotations" "github.com/containers/libpod/pkg/apparmor" "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/criu" @@ -49,19 +49,19 @@ func (c *Container) mountSHM(shmOptions string) error { } func (c *Container) unmountSHM(mount string) error { - if err := unix.Unmount(mount, unix.MNT_DETACH); err != nil { - if err != syscall.EINVAL { - logrus.Warnf("container %s failed to unmount %s : %v", c.ID(), mount, err) - } else { - logrus.Debugf("container %s failed to unmount %s : %v", c.ID(), mount, err) + if err := unix.Unmount(mount, 0); err != nil { + if err != syscall.EINVAL && err != syscall.ENOENT { + return errors.Wrapf(err, "error unmounting container %s SHM mount %s", c.ID(), mount) } + // If it's just an EINVAL or ENOENT, debug logs only + logrus.Debugf("container %s failed to unmount %s : %v", c.ID(), mount, err) } return nil } // prepare mounts the container and sets up other required resources like net // namespaces -func (c *Container) prepare() (err error) { +func (c *Container) prepare() (Err error) { var ( wg sync.WaitGroup netNS ns.NetNS @@ -78,15 +78,21 @@ func (c *Container) prepare() (err error) { // Set up network namespace if not already set up if c.config.CreateNetNS && c.state.NetNS == nil && !c.config.PostConfigureNetNS { netNS, networkStatus, createNetNSErr = c.runtime.createNetNS(c) + if createNetNSErr != nil { + return + } tmpStateLock.Lock() defer tmpStateLock.Unlock() // Assign NetNS attributes to container - if createNetNSErr == nil { - c.state.NetNS = netNS - c.state.NetworkStatus = networkStatus - } + c.state.NetNS = netNS + c.state.NetworkStatus = networkStatus + } + + // handle rootless network namespace setup + if c.state.NetNS != nil && c.config.NetMode == "slirp4netns" && !c.config.PostConfigureNetNS { + createNetNSErr = c.runtime.setupRootlessNetNS(c) } }() // Mount storage if not mounted @@ -108,31 +114,44 @@ func (c *Container) prepare() (err error) { logrus.Debugf("Created root filesystem for container %s at %s", c.ID(), c.state.Mountpoint) }() - defer func() { - if err != nil { - if err2 := c.cleanupNetwork(); err2 != nil { - logrus.Errorf("Error cleaning up container %s network: %v", c.ID(), err2) - } - if err2 := c.cleanupStorage(); err2 != nil { - logrus.Errorf("Error cleaning up container %s storage: %v", c.ID(), err2) - } - } - }() - wg.Wait() + var createErr error if createNetNSErr != nil { - if mountStorageErr != nil { - logrus.Error(createNetNSErr) - return mountStorageErr - } - return createNetNSErr + createErr = createNetNSErr } if mountStorageErr != nil { - return mountStorageErr + if createErr != nil { + logrus.Errorf("Error preparing container %s: %v", c.ID(), createErr) + } + createErr = mountStorageErr + } + + // Only trigger storage cleanup if mountStorage was successful. + // Otherwise, we may mess up mount counters. + if createNetNSErr != nil && mountStorageErr == nil { + if err := c.cleanupStorage(); err != nil { + // createErr is guaranteed non-nil, so print + // unconditionally + logrus.Errorf("Error preparing container %s: %v", c.ID(), createErr) + createErr = errors.Wrapf(err, "error unmounting storage for container %s after network create failure", c.ID()) + } + } + + // It's OK to unconditionally trigger network cleanup. If the network + // isn't ready it will do nothing. + if createErr != nil { + if err := c.cleanupNetwork(); err != nil { + logrus.Errorf("Error preparing container %s: %v", c.ID(), createErr) + createErr = errors.Wrapf(err, "error cleaning up container %s network after setup failure", c.ID()) + } + } + + if createErr != nil { + return createErr } - // Save the container + // Save changes to container state return c.save() } @@ -168,6 +187,30 @@ func (c *Container) cleanupNetwork() error { return nil } +func (c *Container) getUserOverrides() *lookup.Overrides { + var hasPasswdFile, hasGroupFile bool + overrides := lookup.Overrides{} + for _, m := range c.config.Spec.Mounts { + if m.Destination == "/etc/passwd" { + overrides.ContainerEtcPasswdPath = m.Source + hasPasswdFile = true + } + if m.Destination == "/etc/group" { + overrides.ContainerEtcGroupPath = m.Source + hasGroupFile = true + } + if m.Destination == "/etc" { + if !hasPasswdFile { + overrides.ContainerEtcPasswdPath = filepath.Join(m.Source, "passwd") + } + if !hasGroupFile { + overrides.ContainerEtcGroupPath = filepath.Join(m.Source, "group") + } + } + } + return &overrides +} + // Generate spec for a container // Accepts a map of the container's dependencies func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { @@ -175,7 +218,8 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { span.SetTag("type", "container") defer span.Finish() - execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.config.User, nil) + overrides := c.getUserOverrides() + execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.config.User, overrides) if err != nil { return nil, err } @@ -266,6 +310,17 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { } } + hasHomeSet := false + for _, s := range c.config.Spec.Process.Env { + if strings.HasPrefix(s, "HOME=") { + hasHomeSet = true + break + } + } + if !hasHomeSet { + c.config.Spec.Process.Env = append(c.config.Spec.Process.Env, fmt.Sprintf("HOME=%s", execUser.Home)) + } + if c.config.User != "" { // User and Group must go together g.SetProcessUID(uint32(execUser.Uid)) @@ -336,9 +391,13 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { } g.SetRootPath(c.state.Mountpoint) - g.AddAnnotation(crioAnnotations.Created, c.config.CreatedTime.Format(time.RFC3339Nano)) + g.AddAnnotation(annotations.Created, c.config.CreatedTime.Format(time.RFC3339Nano)) g.AddAnnotation("org.opencontainers.image.stopSignal", fmt.Sprintf("%d", c.config.StopSignal)) + if _, exists := g.Config.Annotations[annotations.ContainerManager]; !exists { + g.AddAnnotation(annotations.ContainerManager, annotations.ContainerManagerLibpod) + } + for _, i := range c.config.Spec.Linux.Namespaces { if i.Type == spec.UTSNamespace { hostname := c.Hostname() @@ -360,27 +419,11 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { g.AddProcessEnv("container", "libpod") } - unified, err := cgroups.IsCgroup2UnifiedMode() + cgroupPath, err := c.getOCICgroupPath() if err != nil { return nil, err } - if rootless.IsRootless() && !unified { - g.SetLinuxCgroupsPath("") - } else if c.runtime.config.CgroupManager == SystemdCgroupsManager { - // When runc is set to use Systemd as a cgroup manager, it - // expects cgroups to be passed as follows: - // slice:prefix:name - systemdCgroups := fmt.Sprintf("%s:libpod:%s", path.Base(c.config.CgroupParent), c.ID()) - logrus.Debugf("Setting CGroups for container %s to %s", c.ID(), systemdCgroups) - g.SetLinuxCgroupsPath(systemdCgroups) - } else { - cgroupPath, err := c.CGroupPath() - if err != nil { - return nil, err - } - logrus.Debugf("Setting CGroup path for container %s to %s", c.ID(), cgroupPath) - g.SetLinuxCgroupsPath(cgroupPath) - } + g.SetLinuxCgroupsPath(cgroupPath) // Mounts need to be sorted so paths will not cover other paths mounts := sortMounts(g.Mounts()) @@ -474,12 +517,29 @@ func (c *Container) setupSystemd(mounts []spec.Mount, g generate.Generator) erro if unified { g.RemoveMount("/sys/fs/cgroup") - sourcePath := filepath.Join("/sys/fs/cgroup") - systemdMnt := spec.Mount{ - Destination: "/sys/fs/cgroup", - Type: "bind", - Source: sourcePath, - Options: []string{"bind", "private", "rw"}, + hasCgroupNs := false + for _, ns := range c.config.Spec.Linux.Namespaces { + if ns.Type == spec.CgroupNamespace { + hasCgroupNs = true + break + } + } + + var systemdMnt spec.Mount + if hasCgroupNs { + systemdMnt = spec.Mount{ + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"private", "rw"}, + } + } else { + systemdMnt = spec.Mount{ + Destination: "/sys/fs/cgroup", + Type: "bind", + Source: "/sys/fs/cgroup", + Options: []string{"bind", "private", "rw"}, + } } g.AddMount(systemdMnt) } else { @@ -490,6 +550,7 @@ func (c *Container) setupSystemd(mounts []spec.Mount, g generate.Generator) erro Options: []string{"bind", "nodev", "noexec", "nosuid"}, } g.AddMount(systemdMnt) + g.AddLinuxMaskedPaths("/sys/fs/cgroup/systemd/release_agent") } return nil @@ -583,7 +644,7 @@ func (c *Container) checkpointRestoreSupported() (err error) { if !criu.CheckForCriu() { return errors.Errorf("Checkpoint/Restore requires at least CRIU %d", criu.MinCriuVersion) } - if !c.ociRuntime.featureCheckCheckpointing() { + if !c.ociRuntime.SupportsCheckpoint() { return errors.Errorf("Configured runtime does not support checkpoint/restore") } return nil @@ -619,7 +680,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO return err } - if err := c.ociRuntime.checkpointContainer(c, options); err != nil { + if err := c.ociRuntime.CheckpointContainer(c, options); err != nil { return err } @@ -847,7 +908,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti } } - if err := c.ociRuntime.createContainer(c, &options); err != nil { + if err := c.ociRuntime.CreateContainer(c, &options); err != nil { return err } @@ -1027,7 +1088,7 @@ func (c *Container) makeBindMounts() error { } // Add Secret Mounts - secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.DefaultMountsFile, c.state.RunDir, c.RootUID(), c.RootGID(), rootless.IsRootless()) + secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.DefaultMountsFile, c.state.RunDir, c.RootUID(), c.RootGID(), rootless.IsRootless(), false) for _, mount := range secretMounts { if _, ok := c.state.BindMounts[mount.Destination]; !ok { c.state.BindMounts[mount.Destination] = mount.Source @@ -1039,6 +1100,11 @@ func (c *Container) makeBindMounts() error { // generateResolvConf generates a containers resolv.conf func (c *Container) generateResolvConf() (string, error) { + var ( + nameservers []string + cniNameServers []string + ) + resolvConf := "/etc/resolv.conf" for _, namespace := range c.config.Spec.Linux.Namespaces { if namespace.Type == spec.NetworkNamespace { @@ -1074,18 +1140,31 @@ func (c *Container) generateResolvConf() (string, error) { return "", errors.Wrapf(err, "error parsing host resolv.conf") } - // Make a new resolv.conf - nameservers := resolvconf.GetNameservers(resolv.Content) - // slirp4netns has a built in DNS server. - if c.config.NetMode.IsSlirp4netns() { - nameservers = append([]string{"10.0.2.3"}, nameservers...) + // Check if CNI gave back and DNS servers for us to add in + cniResponse := c.state.NetworkStatus + for _, i := range cniResponse { + if i.DNS.Nameservers != nil { + cniNameServers = append(cniNameServers, i.DNS.Nameservers...) + logrus.Debugf("adding nameserver(s) from cni response of '%q'", i.DNS.Nameservers) + } } + + // If the user provided dns, it trumps all; then dns masq; then resolv.conf if len(c.config.DNSServer) > 0 { // We store DNS servers as net.IP, so need to convert to string - nameservers = []string{} for _, server := range c.config.DNSServer { nameservers = append(nameservers, server.String()) } + } else if len(cniNameServers) > 0 { + nameservers = append(nameservers, cniNameServers...) + } else { + // Make a new resolv.conf + nameservers = resolvconf.GetNameservers(resolv.Content) + // slirp4netns has a built in DNS server. + if c.config.NetMode.IsSlirp4netns() { + nameservers = append([]string{"10.0.2.3"}, nameservers...) + } + } search := resolvconf.GetSearchDomains(resolv.Content) @@ -1231,3 +1310,37 @@ func (c *Container) copyOwnerAndPerms(source, dest string) error { } return nil } + +// Teardown CNI config on refresh +func (c *Container) refreshCNI() error { + // Let's try and delete any lingering network config... + podNetwork := c.runtime.getPodNetwork(c.ID(), c.config.Name, "", c.config.Networks, c.config.PortMappings, c.config.StaticIP) + return c.runtime.netPlugin.TearDownPod(podNetwork) +} + +// Get cgroup path in a format suitable for the OCI spec +func (c *Container) getOCICgroupPath() (string, error) { + unified, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return "", err + } + if (rootless.IsRootless() && !unified) || c.config.NoCgroups { + return "", nil + } else if c.runtime.config.CgroupManager == define.SystemdCgroupsManager { + // When runc is set to use Systemd as a cgroup manager, it + // expects cgroups to be passed as follows: + // slice:prefix:name + systemdCgroups := fmt.Sprintf("%s:libpod:%s", path.Base(c.config.CgroupParent), c.ID()) + logrus.Debugf("Setting CGroups for container %s to %s", c.ID(), systemdCgroups) + return systemdCgroups, nil + } else if c.runtime.config.CgroupManager == define.CgroupfsCgroupsManager { + cgroupPath, err := c.CGroupPath() + if err != nil { + return "", err + } + logrus.Debugf("Setting CGroup path for container %s to %s", c.ID(), cgroupPath) + return cgroupPath, nil + } else { + return "", errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager %s requested", c.runtime.config.CgroupManager) + } +} diff --git a/vendor/github.com/containers/libpod/libpod/container_internal_unsupported.go b/vendor/github.com/containers/libpod/libpod/container_internal_unsupported.go index 6fa19a778c..4abaa63621 100644 --- a/vendor/github.com/containers/libpod/libpod/container_internal_unsupported.go +++ b/vendor/github.com/containers/libpod/libpod/container_internal_unsupported.go @@ -40,3 +40,11 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti func (c *Container) copyOwnerAndPerms(source, dest string) error { return nil } + +func (c *Container) refreshCNI() error { + return define.ErrNotImplemented +} + +func (c *Container) getOCICgroupPath() (string, error) { + return "", define.ErrNotImplemented +} diff --git a/vendor/github.com/containers/libpod/libpod/container_log_linux.go b/vendor/github.com/containers/libpod/libpod/container_log_linux.go index 8a87a8796e..c4acc3d4f6 100644 --- a/vendor/github.com/containers/libpod/libpod/container_log_linux.go +++ b/vendor/github.com/containers/libpod/libpod/container_log_linux.go @@ -6,6 +6,7 @@ package libpod import ( "fmt" "io" + "math" "strings" "time" @@ -30,7 +31,11 @@ const ( func (c *Container) readFromJournal(options *logs.LogOptions, logChannel chan *logs.LogLine) error { var config journal.JournalReaderConfig - config.NumFromTail = options.Tail + if options.Tail < 0 { + config.NumFromTail = math.MaxUint64 + } else { + config.NumFromTail = uint64(options.Tail) + } config.Formatter = journalFormatter defaultTime := time.Time{} if options.Since != defaultTime { @@ -54,7 +59,7 @@ func (c *Container) readFromJournal(options *logs.LogOptions, logChannel chan *l if r == nil { return errors.Errorf("journal reader creation failed") } - if options.Tail == 0 { + if options.Tail == math.MaxInt64 { r.Rewind() } diff --git a/vendor/github.com/containers/libpod/libpod/container_top_linux.go b/vendor/github.com/containers/libpod/libpod/container_top_linux.go index ce471838dc..5f4f281309 100644 --- a/vendor/github.com/containers/libpod/libpod/container_top_linux.go +++ b/vendor/github.com/containers/libpod/libpod/container_top_linux.go @@ -15,6 +15,10 @@ import ( // Top gathers statistics about the running processes in a container. It returns a // []string for output func (c *Container) Top(descriptors []string) ([]string, error) { + if c.config.NoCgroups { + return nil, errors.Wrapf(define.ErrNoCgroups, "cannot run top on container %s as it did not create a cgroup", c.ID()) + } + conStat, err := c.State() if err != nil { return nil, errors.Wrapf(err, "unable to look up state for %s", c.ID()) diff --git a/vendor/github.com/containers/libpod/libpod/define/config.go b/vendor/github.com/containers/libpod/libpod/define/config.go index d8d6ccf55e..8bd59be753 100644 --- a/vendor/github.com/containers/libpod/libpod/define/config.go +++ b/vendor/github.com/containers/libpod/libpod/define/config.go @@ -7,14 +7,30 @@ var ( DefaultInfraImage = "k8s.gcr.io/pause:3.1" // DefaultInfraCommand to be run in an infra container DefaultInfraCommand = "/pause" + // DefaultSHMLockPath is the default path for SHM locks + DefaultSHMLockPath = "/libpod_lock" + // DefaultRootlessSHMLockPath is the default path for rootless SHM locks + DefaultRootlessSHMLockPath = "/libpod_rootless_lock" + // DefaultDetachKeys is the default keys sequence for detaching a + // container + DefaultDetachKeys = "ctrl-p,ctrl-q" ) -// CtrRemoveTimeout is the default number of seconds to wait after stopping a container -// before sending the kill signal -const CtrRemoveTimeout = 10 +const ( + // CtrRemoveTimeout is the default number of seconds to wait after stopping a container + // before sending the kill signal + CtrRemoveTimeout = 10 + // DefaultTransport is a prefix that we apply to an image name + // to check docker hub first for the image + DefaultTransport = "docker://" +) // InfoData holds the info type, i.e store, host etc and the data for each type type InfoData struct { Type string Data map[string]interface{} } + +// VolumeDriverLocal is the "local" volume driver. It is managed by libpod +// itself. +const VolumeDriverLocal = "local" diff --git a/vendor/github.com/containers/libpod/libpod/define/errors.go b/vendor/github.com/containers/libpod/libpod/define/errors.go index 9d532263c6..5230628665 100644 --- a/vendor/github.com/containers/libpod/libpod/define/errors.go +++ b/vendor/github.com/containers/libpod/libpod/define/errors.go @@ -61,6 +61,14 @@ var ( // the user. ErrDetach = utils.ErrDetach + // ErrNoCgroups indicates that the container does not have its own + // CGroup. + ErrNoCgroups = errors.New("this container does not have a cgroup") + + // ErrRootless indicates that the given command cannot but run without + // root. + ErrRootless = errors.New("operation requires root privileges") + // ErrRuntimeStopped indicates that the runtime has already been shut // down and no further operations can be performed on it ErrRuntimeStopped = errors.New("runtime has already been stopped") @@ -108,6 +116,10 @@ var ( // that was not found ErrOCIRuntimeNotFound = errors.New("OCI runtime command not found error") + // ErrOCIRuntimeUnavailable indicates that the OCI runtime associated to a container + // could not be found in the configuration + ErrOCIRuntimeUnavailable = errors.New("OCI runtime not available in the current configuration") + // ErrConmonOutdated indicates the version of conmon found (whether via the configuration or $PATH) // is out of date for the current podman version ErrConmonOutdated = errors.New("outdated conmon version") diff --git a/vendor/github.com/containers/libpod/libpod/define/exec_codes.go b/vendor/github.com/containers/libpod/libpod/define/exec_codes.go index 7184f1e59f..f94616b332 100644 --- a/vendor/github.com/containers/libpod/libpod/define/exec_codes.go +++ b/vendor/github.com/containers/libpod/libpod/define/exec_codes.go @@ -1,7 +1,10 @@ package define import ( + "strings" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) const ( @@ -28,3 +31,20 @@ func TranslateExecErrorToExitCode(originalEC int, err error) int { } return originalEC } + +// ExitCode reads the error message when failing to executing container process +// and then returns 0 if no error, ExecErrorCodeNotFound if command does not exist, or ExecErrorCodeCannotInvoke for +// all other errors +func ExitCode(err error) int { + if err == nil { + return 0 + } + e := strings.ToLower(err.Error()) + logrus.Debugf("ExitCode msg: %q", e) + if strings.Contains(e, "not found") || + strings.Contains(e, "no such file") { + return ExecErrorCodeNotFound + } + + return ExecErrorCodeCannotInvoke +} diff --git a/vendor/github.com/containers/libpod/libpod/define/runtime.go b/vendor/github.com/containers/libpod/libpod/define/runtime.go new file mode 100644 index 0000000000..4d8c6cb4db --- /dev/null +++ b/vendor/github.com/containers/libpod/libpod/define/runtime.go @@ -0,0 +1,37 @@ +package define + +import "time" + +// RuntimeStateStore is a constant indicating which state store implementation +// should be used by libpod +type RuntimeStateStore int + +const ( + // InvalidStateStore is an invalid state store + InvalidStateStore RuntimeStateStore = iota + // InMemoryStateStore is an in-memory state that will not persist data + // on containers and pods between libpod instances or after system + // reboot + InMemoryStateStore RuntimeStateStore = iota + // SQLiteStateStore is a state backed by a SQLite database + // It is presently disabled + SQLiteStateStore RuntimeStateStore = iota + // BoltDBStateStore is a state backed by a BoltDB database + BoltDBStateStore RuntimeStateStore = iota + // CgroupfsCgroupsManager represents cgroupfs native cgroup manager + CgroupfsCgroupsManager = "cgroupfs" + // SystemdCgroupsManager represents systemd native cgroup manager + SystemdCgroupsManager = "systemd" + // ContainerCreateTimeout is the timeout before we decide we've failed + // to create a container. + // TODO: Make this generic - all OCI runtime operations should use the + // same timeout, this one. + // TODO: Consider dropping from 240 to 60 seconds. I don't think waiting + // 4 minutes versus 1 minute makes a real difference. + ContainerCreateTimeout = 240 * time.Second + // DefaultShmSize is the default shm size + DefaultShmSize = 64 * 1024 * 1024 + // NsRunDir is the default directory in which running network namespaces + // are stored + NsRunDir = "/var/run/netns" +) diff --git a/vendor/github.com/containers/libpod/libpod/healthcheck.go b/vendor/github.com/containers/libpod/libpod/healthcheck.go index 0338828e48..e9c9507134 100644 --- a/vendor/github.com/containers/libpod/libpod/healthcheck.go +++ b/vendor/github.com/containers/libpod/libpod/healthcheck.go @@ -133,7 +133,9 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) { streams := new(AttachStreams) streams.OutputStream = hcw streams.ErrorStream = hcw - streams.InputStream = os.Stdin + + streams.InputStream = bufio.NewReader(os.Stdin) + streams.AttachOutput = true streams.AttachError = true streams.AttachInput = true @@ -141,7 +143,7 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) { logrus.Debugf("executing health check command %s for %s", strings.Join(newCommand, " "), c.ID()) timeStart := time.Now() hcResult := HealthCheckSuccess - _, hcErr := c.Exec(false, false, []string{}, newCommand, "", "", streams, 0, nil, "") + _, hcErr := c.Exec(false, false, map[string]string{}, newCommand, "", "", streams, 0, nil, "") if hcErr != nil { errCause := errors.Cause(hcErr) hcResult = HealthCheckFailure diff --git a/vendor/github.com/containers/libpod/libpod/image/docker_registry_options.go b/vendor/github.com/containers/libpod/libpod/image/docker_registry_options.go index 60bb3c33f4..62a4af4653 100644 --- a/vendor/github.com/containers/libpod/libpod/image/docker_registry_options.go +++ b/vendor/github.com/containers/libpod/libpod/image/docker_registry_options.go @@ -3,8 +3,8 @@ package image import ( "fmt" - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" podmanVersion "github.com/containers/libpod/version" ) @@ -26,6 +26,10 @@ type DockerRegistryOptions struct { // certificates and allows connecting to registries without encryption // - or forces it on even if registries.conf has the registry configured as insecure. DockerInsecureSkipTLSVerify types.OptionalBool + // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. + OSChoice string + // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. + ArchitectureChoice string } // GetSystemContext constructs a new system context from a parent context. the values in the DockerRegistryOptions, and other parameters. @@ -35,12 +39,16 @@ func (o DockerRegistryOptions) GetSystemContext(parent *types.SystemContext, add DockerCertPath: o.DockerCertPath, DockerInsecureSkipTLSVerify: o.DockerInsecureSkipTLSVerify, DockerArchiveAdditionalTags: additionalDockerArchiveTags, + OSChoice: o.OSChoice, + ArchitectureChoice: o.ArchitectureChoice, } if parent != nil { sc.SignaturePolicyPath = parent.SignaturePolicyPath sc.AuthFilePath = parent.AuthFilePath sc.DirForceCompress = parent.DirForceCompress sc.DockerRegistryUserAgent = parent.DockerRegistryUserAgent + sc.OSChoice = parent.OSChoice + sc.ArchitectureChoice = parent.ArchitectureChoice } return sc } diff --git a/vendor/github.com/containers/libpod/libpod/image/image.go b/vendor/github.com/containers/libpod/libpod/image/image.go index 068491f282..c912ac2ca4 100644 --- a/vendor/github.com/containers/libpod/libpod/image/image.go +++ b/vendor/github.com/containers/libpod/libpod/image/image.go @@ -3,33 +3,35 @@ package image import ( "context" "encoding/json" + stderrors "errors" "fmt" "io" + "io/ioutil" "os" "path/filepath" + "sort" "strings" "syscall" "time" - types2 "github.com/containernetworking/cni/pkg/types" - cp "github.com/containers/image/copy" - "github.com/containers/image/directory" - dockerarchive "github.com/containers/image/docker/archive" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - ociarchive "github.com/containers/image/oci/archive" - is "github.com/containers/image/storage" - "github.com/containers/image/tarball" - "github.com/containers/image/transports" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/directory" + dockerarchive "github.com/containers/image/v5/docker/archive" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + ociarchive "github.com/containers/image/v5/oci/archive" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/tarball" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" "github.com/containers/libpod/libpod/driver" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/inspect" "github.com/containers/libpod/pkg/registries" "github.com/containers/libpod/pkg/util" "github.com/containers/storage" - "github.com/containers/storage/pkg/reexec" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ociv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -43,8 +45,8 @@ import ( type Image struct { // Adding these two structs for now but will cull when we near // completion of this library. - imgRef types.Image - storeRef types.ImageReference + imgRef types.Image + imgSrcRef types.ImageSource inspect.ImageData inspect.ImageResult inspectInfo *types.ImageInspectInfo @@ -73,7 +75,10 @@ type InfoImage struct { } // ErrRepoTagNotFound is the error returned when the image id given doesn't match a rep tag in store -var ErrRepoTagNotFound = errors.New("unable to match user input to any specific repotag") +var ErrRepoTagNotFound = stderrors.New("unable to match user input to any specific repotag") + +// ErrImageIsBareList is the error returned when the image is just a list or index +var ErrImageIsBareList = stderrors.New("image contains a manifest list or image index, but no runnable image") // NewImageRuntimeFromStore creates an ImageRuntime based on a provided store func NewImageRuntimeFromStore(store storage.Store) *Runtime { @@ -85,9 +90,6 @@ func NewImageRuntimeFromStore(store storage.Store) *Runtime { // NewImageRuntimeFromOptions creates an Image Runtime including the store given // store options func NewImageRuntimeFromOptions(options storage.StoreOptions) (*Runtime, error) { - if reexec.Init() { - return nil, errors.Errorf("unable to reexec") - } store, err := setStore(options) if err != nil { return nil, err @@ -135,7 +137,7 @@ func (ir *Runtime) NewFromLocal(name string) (*Image, error) { // New creates a new image object where the image could be local // or remote -func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, forcePull bool, label *string) (*Image, error) { +func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile string, writer io.Writer, dockeroptions *DockerRegistryOptions, signingoptions SigningOptions, label *string, pullType util.PullType) (*Image, error) { span, _ := opentracing.StartSpanFromContext(ctx, "newImage") span.SetTag("type", "runtime") defer span.Finish() @@ -145,11 +147,13 @@ func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile InputName: name, imageruntime: ir, } - if !forcePull { + if pullType != util.PullImageAlways { localImage, err := newImage.getLocalImage() if err == nil { newImage.image = localImage return &newImage, nil + } else if pullType == util.PullImageNever { + return nil, err } } @@ -296,9 +300,23 @@ func (i *Image) Digest() digest.Digest { return i.image.Digest } +// Digests returns the image's digests +func (i *Image) Digests() []digest.Digest { + return i.image.Digests +} + +// GetManifest returns the image's manifest as a byte array +// and manifest type as a string. +func (i *Image) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + imgSrcRef, err := i.toImageSourceRef(ctx) + if err != nil { + return nil, "", err + } + return imgSrcRef.GetManifest(ctx, instanceDigest) +} + // Manifest returns the image's manifest as a byte array -// and manifest type as a string. The manifest type is -// MediaTypeImageManifest from ociv1. +// and manifest type as a string. func (i *Image) Manifest(ctx context.Context) ([]byte, string, error) { imgRef, err := i.toImageRef(ctx) if err != nil { @@ -307,29 +325,54 @@ func (i *Image) Manifest(ctx context.Context) ([]byte, string, error) { return imgRef.Manifest(ctx) } -// Names returns a string array of names associated with the image +// Names returns a string array of names associated with the image, which may be a mixture of tags and digests func (i *Image) Names() []string { return i.image.Names } -// RepoDigests returns a string array of repodigests associated with the image -func (i *Image) RepoDigests() ([]string, error) { - var repoDigests []string - imageDigest := i.Digest() - +// RepoTags returns a string array of repotags associated with the image +func (i *Image) RepoTags() ([]string, error) { + var repoTags []string for _, name := range i.Names() { named, err := reference.ParseNormalizedNamed(name) if err != nil { return nil, err } - - canonical, err := reference.WithDigest(reference.TrimNamed(named), imageDigest) - if err != nil { - return nil, err + if tagged, isTagged := named.(reference.NamedTagged); isTagged { + repoTags = append(repoTags, tagged.String()) } + } + return repoTags, nil +} + +// RepoDigests returns a string array of repodigests associated with the image +func (i *Image) RepoDigests() ([]string, error) { + var repoDigests []string + added := make(map[string]struct{}) + + for _, name := range i.Names() { + for _, imageDigest := range append(i.Digests(), i.Digest()) { + if imageDigest == "" { + continue + } + + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, err + } + + canonical, err := reference.WithDigest(reference.TrimNamed(named), imageDigest) + if err != nil { + return nil, err + } - repoDigests = append(repoDigests, canonical.String()) + if _, alreadyInList := added[canonical.String()]; !alreadyInList { + repoDigests = append(repoDigests, canonical.String()) + added[canonical.String()] = struct{}{} + } + } } + sort.Strings(repoDigests) return repoDigests, nil } @@ -381,31 +424,6 @@ func (i *Image) Remove(ctx context.Context, force bool) error { return nil } -// Decompose an Image -func (i *Image) Decompose() error { - return types2.NotImplementedError -} - -// TODO: Rework this method to not require an assembly of the fq name with transport -/* -// GetManifest tries to GET an images manifest, returns nil on success and err on failure -func (i *Image) GetManifest() error { - pullRef, err := alltransports.ParseImageName(i.assembleFqNameTransport()) - if err != nil { - return errors.Errorf("unable to parse '%s'", i.Names()[0]) - } - imageSource, err := pullRef.NewImageSource(nil) - if err != nil { - return errors.Wrapf(err, "unable to create new image source") - } - _, _, err = imageSource.GetManifest(nil) - if err == nil { - return nil - } - return err -} -*/ - // getImage retrieves an image matching the given name or hash from system // storage // If no matching image can be found, an error is returned @@ -553,7 +571,7 @@ func (i *Image) UntagImage(tag string) error { // PushImageToHeuristicDestination pushes the given image to "destination", which is heuristically parsed. // Use PushImageToReference if the destination is known precisely. -func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination, manifestMIMEType, authFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { +func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination, manifestMIMEType, authFile, digestFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { if destination == "" { return errors.Wrapf(syscall.EINVAL, "destination image name must be specified") } @@ -571,11 +589,11 @@ func (i *Image) PushImageToHeuristicDestination(ctx context.Context, destination return err } } - return i.PushImageToReference(ctx, dest, manifestMIMEType, authFile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, additionalDockerArchiveTags) + return i.PushImageToReference(ctx, dest, manifestMIMEType, authFile, digestFile, signaturePolicyPath, writer, forceCompress, signingOptions, dockerRegistryOptions, additionalDockerArchiveTags) } // PushImageToReference pushes the given image to a location described by the given path -func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageReference, manifestMIMEType, authFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { +func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageReference, manifestMIMEType, authFile, digestFile, signaturePolicyPath string, writer io.Writer, forceCompress bool, signingOptions SigningOptions, dockerRegistryOptions *DockerRegistryOptions, additionalDockerArchiveTags []reference.NamedTagged) error { sc := GetSystemContext(signaturePolicyPath, authFile, forceCompress) sc.BlobInfoCacheDir = filepath.Join(i.imageruntime.store.GraphRoot(), "cache") @@ -597,86 +615,120 @@ func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageRefere copyOptions := getCopyOptions(sc, writer, nil, dockerRegistryOptions, signingOptions, manifestMIMEType, additionalDockerArchiveTags) copyOptions.DestinationCtx.SystemRegistriesConfPath = registries.SystemRegistriesConfPath() // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place. // Copy the image to the remote destination - _, err = cp.Image(ctx, policyContext, dest, src, copyOptions) + manifestBytes, err := cp.Image(ctx, policyContext, dest, src, copyOptions) if err != nil { return errors.Wrapf(err, "Error copying image to the remote destination") } + digest, err := manifest.Digest(manifestBytes) + if err != nil { + return errors.Wrapf(err, "error computing digest of manifest of new image %q", transports.ImageName(dest)) + } + + logrus.Debugf("Successfully pushed %s with digest %s", transports.ImageName(dest), digest.String()) + + if digestFile != "" { + if err = ioutil.WriteFile(digestFile, []byte(digest.String()), 0644); err != nil { + return errors.Wrapf(err, "failed to write digest to file %q", digestFile) + } + } i.newImageEvent(events.Push) return nil } // MatchesID returns a bool based on if the input id // matches the image's id +// TODO: This isn't used anywhere, so remove it func (i *Image) MatchesID(id string) bool { return strings.HasPrefix(i.ID(), id) } -// toStorageReference returns a *storageReference from an Image -func (i *Image) toStorageReference() (types.ImageReference, error) { - var lookupName string - if i.storeRef == nil { - if i.image != nil { - lookupName = i.ID() - } else { - lookupName = i.InputName - } - storeRef, err := is.Transport.ParseStoreReference(i.imageruntime.store, lookupName) - if err != nil { - return nil, err - } - i.storeRef = storeRef - } - return i.storeRef, nil -} - // ToImageRef returns an image reference type from an image // TODO: Hopefully we can remove this exported function for mheon func (i *Image) ToImageRef(ctx context.Context) (types.Image, error) { return i.toImageRef(ctx) } -// toImageRef returns an Image Reference type from an image -func (i *Image) toImageRef(ctx context.Context) (types.Image, error) { +// toImageSourceRef returns an ImageSource Reference type from an image +func (i *Image) toImageSourceRef(ctx context.Context) (types.ImageSource, error) { if i == nil { - return nil, errors.Errorf("cannot convert nil image to image reference") + return nil, errors.Errorf("cannot convert nil image to image source reference") } - if i.imgRef == nil { + if i.imgSrcRef == nil { ref, err := is.Transport.ParseStoreReference(i.imageruntime.store, "@"+i.ID()) if err != nil { return nil, errors.Wrapf(err, "error parsing reference to image %q", i.ID()) } - imgRef, err := ref.NewImage(ctx, nil) + imgSrcRef, err := ref.NewImageSource(ctx, nil) if err != nil { - return nil, errors.Wrapf(err, "error reading image %q", i.ID()) + return nil, errors.Wrapf(err, "error reading image %q as image source", i.ID()) } - i.imgRef = imgRef + i.imgSrcRef = imgSrcRef } - return i.imgRef, nil -} - -// sizer knows its size. -type sizer interface { - Size() (int64, error) + return i.imgSrcRef, nil } //Size returns the size of the image func (i *Image) Size(ctx context.Context) (*uint64, error) { - storeRef, err := is.Transport.ParseStoreReference(i.imageruntime.store, i.ID()) - if err != nil { - return nil, err + if i.image == nil { + localImage, err := i.getLocalImage() + if err != nil { + return nil, err + } + i.image = localImage } - systemContext := &types.SystemContext{} - img, err := storeRef.NewImageSource(ctx, systemContext) + if sum, err := i.imageruntime.store.ImageSize(i.ID()); err == nil && sum >= 0 { + usum := uint64(sum) + return &usum, nil + } + return nil, errors.Errorf("unable to determine size") +} + +// toImageRef returns an Image Reference type from an image +func (i *Image) toImageRef(ctx context.Context) (types.Image, error) { + if i == nil { + return nil, errors.Errorf("cannot convert nil image to image reference") + } + imgSrcRef, err := i.toImageSourceRef(ctx) if err != nil { return nil, err } - if s, ok := img.(sizer); ok { - if sum, err := s.Size(); err == nil { - usum := uint64(sum) - return &usum, nil + if i.imgRef == nil { + systemContext := &types.SystemContext{} + unparsedDefaultInstance := image.UnparsedInstance(imgSrcRef, nil) + imgRef, err := image.FromUnparsedImage(ctx, systemContext, unparsedDefaultInstance) + if err != nil { + // check for a "tried-to-treat-a-bare-list-like-a-runnable-image" problem, else + // return info about the not-a-bare-list runnable image part of this storage.Image + if manifestBytes, manifestType, err2 := imgSrcRef.GetManifest(ctx, nil); err2 == nil { + if manifest.MIMETypeIsMultiImage(manifestType) { + if list, err3 := manifest.ListFromBlob(manifestBytes, manifestType); err3 == nil { + switch manifestType { + case ociv1.MediaTypeImageIndex: + err = errors.Wrapf(ErrImageIsBareList, "%q is an image index", i.InputName) + case manifest.DockerV2ListMediaType: + err = errors.Wrapf(ErrImageIsBareList, "%q is a manifest list", i.InputName) + default: + err = errors.Wrapf(ErrImageIsBareList, "%q", i.InputName) + } + for _, instanceDigest := range list.Instances() { + instance := instanceDigest + unparsedInstance := image.UnparsedInstance(imgSrcRef, &instance) + if imgRef2, err4 := image.FromUnparsedImage(ctx, systemContext, unparsedInstance); err4 == nil { + imgRef = imgRef2 + err = nil + break + } + } + } + } + } + if err != nil { + return nil, errors.Wrapf(err, "error reading image %q as image", i.ID()) + } } + i.imgRef = imgRef } - return nil, errors.Errorf("unable to determine size") + return i.imgRef, nil } // DriverData gets the driver data from the store on a layer @@ -703,6 +755,9 @@ type History struct { func (i *Image) History(ctx context.Context) ([]*History, error) { img, err := i.toImageRef(ctx) if err != nil { + if errors.Cause(err) == ErrImageIsBareList { + return nil, nil + } return nil, err } oci, err := img.OCIConfig(ctx) @@ -848,7 +903,10 @@ func (i *Image) GetLabel(ctx context.Context, label string) (string, error) { func (i *Image) Annotations(ctx context.Context) (map[string]string, error) { imageManifest, manifestType, err := i.Manifest(ctx) if err != nil { - return nil, err + imageManifest, manifestType, err = i.GetManifest(ctx, nil) + if err != nil { + return nil, err + } } annotations := make(map[string]string) switch manifestType { @@ -863,24 +921,19 @@ func (i *Image) Annotations(ctx context.Context) (map[string]string, error) { return annotations, nil } -// ociv1Image converts and image to an imgref and then an -// ociv1 image type +// ociv1Image converts an image to an imgref and then returns its config blob +// converted to an ociv1 image type func (i *Image) ociv1Image(ctx context.Context) (*ociv1.Image, error) { imgRef, err := i.toImageRef(ctx) if err != nil { return nil, err } - return imgRef.OCIConfig(ctx) } func (i *Image) imageInspectInfo(ctx context.Context) (*types.ImageInspectInfo, error) { if i.inspectInfo == nil { - sr, err := i.toStorageReference() - if err != nil { - return nil, err - } - ic, err := sr.NewImage(ctx, &types.SystemContext{}) + ic, err := i.toImageRef(ctx) if err != nil { return nil, err } @@ -901,18 +954,23 @@ func (i *Image) Inspect(ctx context.Context) (*inspect.ImageData, error) { ociv1Img, err := i.ociv1Image(ctx) if err != nil { - return nil, err + ociv1Img = &ociv1.Image{} } info, err := i.imageInspectInfo(ctx) if err != nil { - return nil, err + info = &types.ImageInspectInfo{} } annotations, err := i.Annotations(ctx) if err != nil { return nil, err } - size, err := i.Size(ctx) + size := int64(-1) + if usize, err := i.Size(ctx); err == nil { + size = int64(*usize) + } + + repoTags, err := i.RepoTags() if err != nil { return nil, err } @@ -927,7 +985,7 @@ func (i *Image) Inspect(ctx context.Context) (*inspect.ImageData, error) { return nil, err } - _, manifestType, err := i.Manifest(ctx) + _, manifestType, err := i.GetManifest(ctx, nil) if err != nil { return nil, errors.Wrapf(err, "unable to determine manifest type") } @@ -938,7 +996,7 @@ func (i *Image) Inspect(ctx context.Context) (*inspect.ImageData, error) { data := &inspect.ImageData{ ID: i.ID(), - RepoTags: i.Names(), + RepoTags: repoTags, RepoDigests: repoDigests, Comment: comment, Created: ociv1Img.Created, @@ -947,8 +1005,8 @@ func (i *Image) Inspect(ctx context.Context) (*inspect.ImageData, error) { Os: ociv1Img.OS, Config: &ociv1Img.Config, Version: info.DockerVersion, - Size: int64(*size), - VirtualSize: int64(*size), + Size: size, + VirtualSize: size, Annotations: annotations, Digest: i.Digest(), Labels: info.Labels, @@ -1077,6 +1135,9 @@ func splitString(input string) string { func (i *Image) IsParent(ctx context.Context) (bool, error) { children, err := i.getChildren(ctx, 1) if err != nil { + if errors.Cause(err) == ErrImageIsBareList { + return false, nil + } return false, err } return len(children) > 0, nil @@ -1160,6 +1221,9 @@ func (i *Image) GetParent(ctx context.Context) (*Image, error) { // fetch the configuration for the child image child, err := i.ociv1Image(ctx) if err != nil { + if errors.Cause(err) == ErrImageIsBareList { + return nil, nil + } return nil, err } for _, img := range images { @@ -1200,12 +1264,24 @@ func (i *Image) GetParent(ctx context.Context) (*Image, error) { // GetChildren returns a list of the imageIDs that depend on the image func (i *Image) GetChildren(ctx context.Context) ([]string, error) { - return i.getChildren(ctx, 0) + children, err := i.getChildren(ctx, 0) + if err != nil { + if errors.Cause(err) == ErrImageIsBareList { + return nil, nil + } + return nil, err + } + return children, nil } // getChildren returns a list of at most "max" imageIDs that depend on the image func (i *Image) getChildren(ctx context.Context, max int) ([]string, error) { var children []string + + if _, err := i.toImageRef(ctx); err != nil { + return nil, nil + } + images, err := i.imageruntime.GetImages() if err != nil { return nil, err @@ -1296,6 +1372,9 @@ func (i *Image) Comment(ctx context.Context, manifestType string) (string, error } ociv1Img, err := i.ociv1Image(ctx) if err != nil { + if errors.Cause(err) == ErrImageIsBareList { + return "", nil + } return "", err } if len(ociv1Img.History) > 0 { @@ -1356,7 +1435,7 @@ func (i *Image) Save(ctx context.Context, source, format, output string, moreTag return err } } - if err := i.PushImageToReference(ctx, destRef, manifestType, "", "", writer, compress, SigningOptions{}, &DockerRegistryOptions{}, additionaltags); err != nil { + if err := i.PushImageToReference(ctx, destRef, manifestType, "", "", "", writer, compress, SigningOptions{}, &DockerRegistryOptions{}, additionaltags); err != nil { return errors.Wrapf(err, "unable to save %q", source) } i.newImageEvent(events.Save) diff --git a/vendor/github.com/containers/libpod/libpod/image/parts.go b/vendor/github.com/containers/libpod/libpod/image/parts.go index dfdf0b08a7..d4677f9353 100644 --- a/vendor/github.com/containers/libpod/libpod/image/parts.go +++ b/vendor/github.com/containers/libpod/libpod/image/parts.go @@ -3,7 +3,7 @@ package image import ( "strings" - "github.com/containers/image/docker/reference" + "github.com/containers/image/v5/docker/reference" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/libpod/libpod/image/prune.go b/vendor/github.com/containers/libpod/libpod/image/prune.go index 6ef5d321fd..006cbdf220 100644 --- a/vendor/github.com/containers/libpod/libpod/image/prune.go +++ b/vendor/github.com/containers/libpod/libpod/image/prune.go @@ -4,7 +4,9 @@ import ( "context" "github.com/containers/libpod/libpod/events" + "github.com/containers/storage" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // GetPruneImages returns a slice of images that have no names/unused @@ -44,6 +46,10 @@ func (ir *Runtime) PruneImages(ctx context.Context, all bool) ([]string, error) } for _, p := range pruneImages { if err := p.Remove(ctx, true); err != nil { + if errors.Cause(err) == storage.ErrImageUsedByContainer { + logrus.Warnf("Failed to prune image %s as it is in use: %v", p.ID(), err) + continue + } return nil, errors.Wrap(err, "failed to prune image") } defer p.newImageEvent(events.Prune) diff --git a/vendor/github.com/containers/libpod/libpod/image/pull.go b/vendor/github.com/containers/libpod/libpod/image/pull.go index 78cfe36261..7f5dc33b9a 100644 --- a/vendor/github.com/containers/libpod/libpod/image/pull.go +++ b/vendor/github.com/containers/libpod/libpod/image/pull.go @@ -7,16 +7,17 @@ import ( "path/filepath" "strings" - cp "github.com/containers/image/copy" - "github.com/containers/image/directory" - "github.com/containers/image/docker" - dockerarchive "github.com/containers/image/docker/archive" - "github.com/containers/image/docker/tarfile" - ociarchive "github.com/containers/image/oci/archive" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/directory" + "github.com/containers/image/v5/docker" + dockerarchive "github.com/containers/image/v5/docker/archive" + "github.com/containers/image/v5/docker/tarfile" + ociarchive "github.com/containers/image/v5/oci/archive" + oci "github.com/containers/image/v5/oci/layout" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/registries" "github.com/hashicorp/go-multierror" @@ -37,6 +38,9 @@ var ( DirTransport = directory.Transport.Name() // DockerTransport is the transport for docker registries DockerTransport = docker.Transport.Name() + // OCIDirTransport is the transport for pushing and pulling + // images to and from a directory containing an OCI image + OCIDirTransport = oci.Transport.Name() // AtomicTransport is the transport for atomic registries AtomicTransport = "atomic" // DefaultTransport is a prefix that we apply to an image name @@ -189,12 +193,12 @@ func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types. return ir.getSinglePullRefPairGoal(srcRef, dest) case DirTransport: - path := srcRef.StringWithinTransport() - image := path - if image[:1] == "/" { - // Set localhost as the registry so docker.io isn't prepended, and the path becomes the repository - image = DefaultLocalRegistry + image - } + image := toLocalImageName(srcRef.StringWithinTransport()) + return ir.getSinglePullRefPairGoal(srcRef, image) + + case OCIDirTransport: + split := strings.SplitN(srcRef.StringWithinTransport(), ":", 2) + image := toLocalImageName(split[0]) return ir.getSinglePullRefPairGoal(srcRef, image) default: @@ -202,6 +206,15 @@ func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types. } } +// toLocalImageName converts an image name into a 'localhost/' prefixed one +func toLocalImageName(imageName string) string { + return fmt.Sprintf( + "%s/%s", + DefaultLocalRegistry, + strings.TrimLeft(imageName, "/"), + ) +} + // pullImageFromHeuristicSource pulls an image based on inputName, which is heuristically parsed and may involve configured registries. // Use pullImageFromReference if the source is known precisely. func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, label *string) ([]string, error) { @@ -210,6 +223,10 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s var goal *pullGoal sc := GetSystemContext(signaturePolicyPath, authfile, false) + if dockerOptions != nil { + sc.OSChoice = dockerOptions.OSChoice + sc.ArchitectureChoice = dockerOptions.ArchitectureChoice + } sc.BlobInfoCacheDir = filepath.Join(ir.store.GraphRoot(), "cache") srcRef, err := alltransports.ParseImageName(inputName) if err != nil { @@ -233,6 +250,10 @@ func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.Imag defer span.Finish() sc := GetSystemContext(signaturePolicyPath, authfile, false) + if dockerOptions != nil { + sc.OSChoice = dockerOptions.OSChoice + sc.ArchitectureChoice = dockerOptions.ArchitectureChoice + } goal, err := ir.pullGoalFromImageReference(ctx, srcRef, transports.ImageName(srcRef), sc) if err != nil { return nil, errors.Wrapf(err, "error determining pull goal for image %q", transports.ImageName(srcRef)) diff --git a/vendor/github.com/containers/libpod/libpod/image/search.go b/vendor/github.com/containers/libpod/libpod/image/search.go index 82ef4f75aa..fd29dac457 100644 --- a/vendor/github.com/containers/libpod/libpod/image/search.go +++ b/vendor/github.com/containers/libpod/libpod/image/search.go @@ -6,8 +6,8 @@ import ( "strings" "sync" - "github.com/containers/image/docker" - "github.com/containers/image/types" + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/types" sysreg "github.com/containers/libpod/pkg/registries" "github.com/pkg/errors" "github.com/sirupsen/logrus" diff --git a/vendor/github.com/containers/libpod/libpod/image/utils.go b/vendor/github.com/containers/libpod/libpod/image/utils.go index 544796a4b9..b7ea63c663 100644 --- a/vendor/github.com/containers/libpod/libpod/image/utils.go +++ b/vendor/github.com/containers/libpod/libpod/image/utils.go @@ -7,10 +7,10 @@ import ( "regexp" "strings" - cp "github.com/containers/image/copy" - "github.com/containers/image/docker/reference" - "github.com/containers/image/signature" - "github.com/containers/image/types" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/pkg/errors" ) @@ -87,18 +87,18 @@ func hasTransport(image string) bool { } // ReposToMap parses the specified repotags and returns a map with repositories -// as keys and the corresponding arrays of tags as values. -func ReposToMap(repotags []string) (map[string][]string, error) { - // map format is repo -> tag +// as keys and the corresponding arrays of tags or digests-as-strings as values. +func ReposToMap(names []string) (map[string][]string, error) { + // map format is repo -> []tag-or-digest repos := make(map[string][]string) - for _, repo := range repotags { + for _, name := range names { var repository, tag string - if len(repo) > 0 { - named, err := reference.ParseNormalizedNamed(repo) - repository = named.Name() + if len(name) > 0 { + named, err := reference.ParseNormalizedNamed(name) if err != nil { return nil, err } + repository = named.Name() if ref, ok := named.(reference.NamedTagged); ok { tag = ref.Tag() } else if ref, ok := named.(reference.Canonical); ok { diff --git a/vendor/github.com/containers/libpod/libpod/in_memory_state.go b/vendor/github.com/containers/libpod/libpod/in_memory_state.go index 7c4abd25d6..2144671a5e 100644 --- a/vendor/github.com/containers/libpod/libpod/in_memory_state.go +++ b/vendor/github.com/containers/libpod/libpod/in_memory_state.go @@ -3,6 +3,7 @@ package libpod import ( "strings" + "github.com/containers/libpod/libpod/config" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/registrar" "github.com/containers/storage/pkg/truncindex" @@ -80,8 +81,8 @@ func (s *InMemoryState) Refresh() error { // GetDBConfig is not implemented for in-memory state. // As we do not store a config, return an empty one. -func (s *InMemoryState) GetDBConfig() (*DBConfig, error) { - return &DBConfig{}, nil +func (s *InMemoryState) GetDBConfig() (*config.DBConfig, error) { + return &config.DBConfig{}, nil } // ValidateDBConfig is not implemented for the in-memory state. @@ -115,15 +116,16 @@ func (s *InMemoryState) Container(id string) (*Container, error) { return ctr, nil } -// LookupContainer retrieves a container by full ID, unique partial ID, or name -func (s *InMemoryState) LookupContainer(idOrName string) (*Container, error) { +// lookupID retrieves a container or pod ID by full ID, unique partial ID, or +// name +func (s *InMemoryState) lookupID(idOrName string) (string, error) { var ( nameIndex *registrar.Registrar idIndex *truncindex.TruncIndex ) if idOrName == "" { - return nil, define.ErrEmptyID + return "", define.ErrEmptyID } if s.namespace != "" { @@ -131,7 +133,7 @@ func (s *InMemoryState) LookupContainer(idOrName string) (*Container, error) { if !ok { // We have no containers in the namespace // Return false - return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container found with name or ID %s", idOrName) + return "", define.ErrNoSuchCtr } nameIndex = nsIndex.nameIndex idIndex = nsIndex.idIndex @@ -147,13 +149,53 @@ func (s *InMemoryState) LookupContainer(idOrName string) (*Container, error) { fullID, err = idIndex.Get(idOrName) if err != nil { if err == truncindex.ErrNotExist { - return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container found with name or ID %s", idOrName) + return "", define.ErrNoSuchCtr } - return nil, errors.Wrapf(err, "error performing truncindex lookup for ID %s", idOrName) + return "", errors.Wrapf(err, "error performing truncindex lookup for ID %s", idOrName) } } else { - return nil, errors.Wrapf(err, "error performing registry lookup for ID %s", idOrName) + return "", errors.Wrapf(err, "error performing registry lookup for ID %s", idOrName) + } + } + + return fullID, nil +} + +// LookupContainerID retrieves a container ID by full ID, unique partial ID, or +// name +func (s *InMemoryState) LookupContainerID(idOrName string) (string, error) { + fullID, err := s.lookupID(idOrName) + + switch err { + case nil: + _, ok := s.containers[fullID] + if !ok { + // It's a pod, not a container + return "", errors.Wrapf(define.ErrNoSuchCtr, "name or ID %s is a pod, not a container", idOrName) } + + case define.ErrNoSuchCtr: + return "", errors.Wrapf(define.ErrNoSuchCtr, "no container found with name or ID %s", idOrName) + + default: + return "", err + } + + return fullID, nil +} + +// LookupContainer retrieves a container by full ID, unique partial ID, or name +func (s *InMemoryState) LookupContainer(idOrName string) (*Container, error) { + fullID, err := s.lookupID(idOrName) + + switch err { + case nil: + + case define.ErrNoSuchCtr: + return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container found with name or ID %s", idOrName) + + default: + return nil, err } ctr, ok := s.containers[fullID] @@ -385,6 +427,16 @@ func (s *InMemoryState) AllContainers() ([]*Container, error) { return ctrs, nil } +// GetContainerConfig returns a container config from the database by full ID +func (s *InMemoryState) GetContainerConfig(id string) (*ContainerConfig, error) { + ctr, err := s.LookupContainer(id) + if err != nil { + return nil, err + } + + return ctr.Config(), nil +} + // RewriteContainerConfig rewrites a container's configuration. // This function is DANGEROUS, even with an in-memory state. // Please read the full comment on it in state.go before using it. @@ -425,6 +477,26 @@ func (s *InMemoryState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error { return nil } +// RewriteVolumeConfig rewrites a volume's configuration. +// This function is DANGEROUS, even with in-memory state. +// Please read the full comment in state.go before using it. +func (s *InMemoryState) RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) error { + if !volume.valid { + return define.ErrVolumeRemoved + } + + // If the volume does not exist, return error + stateVol, ok := s.volumes[volume.Name()] + if !ok { + volume.valid = false + return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %q not found in state", volume.Name()) + } + + stateVol.config = newCfg + + return nil +} + // Volume retrieves a volume from its full name func (s *InMemoryState) Volume(name string) (*Volume, error) { if name == "" { @@ -439,6 +511,41 @@ func (s *InMemoryState) Volume(name string) (*Volume, error) { return vol, nil } +// LookupVolume finds a volume from an unambiguous partial ID. +func (s *InMemoryState) LookupVolume(name string) (*Volume, error) { + if name == "" { + return nil, define.ErrEmptyID + } + + vol, ok := s.volumes[name] + if ok { + return vol, nil + } + + // Alright, we've failed to find by full name. Now comes the expensive + // part. + // Loop through all volumes and look for matches. + var ( + foundMatch bool + candidate *Volume + ) + for volName, vol := range s.volumes { + if strings.HasPrefix(volName, name) { + if foundMatch { + return nil, errors.Wrapf(define.ErrVolumeExists, "more than one result for volume name %q", name) + } + candidate = vol + foundMatch = true + } + } + + if !foundMatch { + return nil, errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %q found", name) + } + + return candidate, nil +} + // HasVolume checks if a volume with the given name is present in the state func (s *InMemoryState) HasVolume(name string) (bool, error) { if name == "" { @@ -487,6 +594,36 @@ func (s *InMemoryState) RemoveVolume(volume *Volume) error { return nil } +// UpdateVolume updates a volume from the database. +// For the in-memory state, this is a no-op. +func (s *InMemoryState) UpdateVolume(volume *Volume) error { + if !volume.valid { + return define.ErrVolumeRemoved + } + + if _, ok := s.volumes[volume.Name()]; !ok { + volume.valid = false + return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %q not found in state", volume.Name()) + } + + return nil +} + +// SaveVolume saves a volume's state to the database. +// For the in-memory state, this is a no-op. +func (s *InMemoryState) SaveVolume(volume *Volume) error { + if !volume.valid { + return define.ErrVolumeRemoved + } + + if _, ok := s.volumes[volume.Name()]; !ok { + volume.valid = false + return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %q not found in state", volume.Name()) + } + + return nil +} + // VolumeInUse checks if the given volume is being used by at least one container func (s *InMemoryState) VolumeInUse(volume *Volume) ([]string, error) { if !volume.valid { @@ -538,49 +675,22 @@ func (s *InMemoryState) Pod(id string) (*Pod, error) { // LookupPod retrieves a pod from the state from a full or unique partial ID or // a full name func (s *InMemoryState) LookupPod(idOrName string) (*Pod, error) { - var ( - nameIndex *registrar.Registrar - idIndex *truncindex.TruncIndex - ) + fullID, err := s.lookupID(idOrName) - if idOrName == "" { - return nil, define.ErrEmptyID - } + switch err { + case nil: - if s.namespace != "" { - nsIndex, ok := s.namespaceIndexes[s.namespace] - if !ok { - // We have no containers in the namespace - // Return false - return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container found with name or ID %s", idOrName) - } - nameIndex = nsIndex.nameIndex - idIndex = nsIndex.idIndex - } else { - nameIndex = s.nameIndex - idIndex = s.idIndex - } + case define.ErrNoSuchCtr, define.ErrNoSuchPod: + return nil, errors.Wrapf(define.ErrNoSuchPod, "no pod found with name or ID %s", idOrName) - fullID, err := nameIndex.Get(idOrName) - if err != nil { - if err == registrar.ErrNameNotReserved { - // What was passed is not a name, assume it's an ID - fullID, err = idIndex.Get(idOrName) - if err != nil { - if err == truncindex.ErrNotExist { - return nil, errors.Wrapf(define.ErrNoSuchPod, "no pod found with name or ID %s", idOrName) - } - return nil, errors.Wrapf(err, "error performing truncindex lookup for ID %s", idOrName) - } - } else { - return nil, errors.Wrapf(err, "error performing registry lookup for ID %s", idOrName) - } + default: + return nil, err } pod, ok := s.pods[fullID] if !ok { // It's a container not a pod - return nil, errors.Wrapf(define.ErrNoSuchPod, "id or name %s is a container not a pod", idOrName) + return nil, errors.Wrapf(define.ErrNoSuchPod, "id or name %s is a container, not a pod", idOrName) } return pod, nil diff --git a/vendor/github.com/containers/libpod/libpod/info.go b/vendor/github.com/containers/libpod/libpod/info.go index 9ab6f7e52b..e5c075d975 100644 --- a/vendor/github.com/containers/libpod/libpod/info.go +++ b/vendor/github.com/containers/libpod/libpod/info.go @@ -6,17 +6,19 @@ import ( "fmt" "io/ioutil" "os" + "os/exec" "runtime" "strconv" "strings" "time" "github.com/containers/buildah" + "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/rootless" - "github.com/containers/libpod/utils" "github.com/containers/storage" "github.com/containers/storage/pkg/system" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // top-level "host" info @@ -27,6 +29,15 @@ func (r *Runtime) hostInfo() (map[string]interface{}, error) { info["arch"] = runtime.GOARCH info["cpus"] = runtime.NumCPU() info["rootless"] = rootless.IsRootless() + unified, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return nil, errors.Wrapf(err, "error reading cgroups mode") + } + cgroupVersion := "v1" + if unified { + cgroupVersion = "v2" + } + info["CgroupVersion"] = cgroupVersion mi, err := system.ReadMemInfo() if err != nil { return nil, errors.Wrapf(err, "error reading memory info") @@ -36,24 +47,37 @@ func (r *Runtime) hostInfo() (map[string]interface{}, error) { info["MemFree"] = mi.MemFree info["SwapTotal"] = mi.SwapTotal info["SwapFree"] = mi.SwapFree - conmonVersion, _ := r.GetConmonVersion() - ociruntimeVersion, _ := r.GetOCIRuntimeVersion() hostDistributionInfo := r.GetHostDistributionInfo() - info["Conmon"] = map[string]interface{}{ - "path": r.conmonPath, - "package": r.defaultOCIRuntime.conmonPackage(), - "version": conmonVersion, - } - info["OCIRuntime"] = map[string]interface{}{ - "path": r.defaultOCIRuntime.path, - "package": r.defaultOCIRuntime.pathPackage(), - "version": ociruntimeVersion, + if rootless.IsRootless() { + if path, err := exec.LookPath("slirp4netns"); err == nil { + logrus.Warnf("Failed to retrieve program version for %s: %v", path, err) + version, err := programVersion(path) + if err != nil { + logrus.Warnf("Failed to retrieve program version for %s: %v", path, err) + } + program := map[string]interface{}{} + program["Executable"] = path + program["Version"] = version + program["Package"] = packageVersion(path) + info["slirp4netns"] = program + } + uidmappings, err := rootless.ReadMappingsProc("/proc/self/uid_map") + if err != nil { + return nil, errors.Wrapf(err, "error reading uid mappings") + } + gidmappings, err := rootless.ReadMappingsProc("/proc/self/gid_map") + if err != nil { + return nil, errors.Wrapf(err, "error reading gid mappings") + } + idmappings := make(map[string]interface{}) + idmappings["uidmap"] = uidmappings + idmappings["gidmap"] = gidmappings + info["IDMappings"] = idmappings } info["Distribution"] = map[string]interface{}{ "distribution": hostDistributionInfo["Distribution"], "version": hostDistributionInfo["Version"], } - info["BuildahVersion"] = buildah.Version kv, err := readKernelVersion() if err != nil { @@ -61,6 +85,15 @@ func (r *Runtime) hostInfo() (map[string]interface{}, error) { } info["kernel"] = kv + runtimeInfo, err := r.defaultOCIRuntime.RuntimeInfo() + if err != nil { + logrus.Errorf("Error getting info on OCI runtime %s: %v", r.defaultOCIRuntime.Name(), err) + } else { + for k, v := range runtimeInfo { + info[k] = v + } + } + up, err := readUptime() if err != nil { return nil, errors.Wrapf(err, "error reading up time") @@ -103,6 +136,7 @@ func (r *Runtime) hostInfo() (map[string]interface{}, error) { } info["hostname"] = host info["eventlogger"] = r.eventer.String() + return info, nil } @@ -113,7 +147,24 @@ func (r *Runtime) storeInfo() (map[string]interface{}, error) { info["GraphRoot"] = r.store.GraphRoot() info["RunRoot"] = r.store.RunRoot() info["GraphDriverName"] = r.store.GraphDriverName() - info["GraphOptions"] = r.store.GraphOptions() + graphOptions := map[string]interface{}{} + for _, o := range r.store.GraphOptions() { + split := strings.SplitN(o, "=", 2) + if strings.HasSuffix(split[0], "mount_program") { + version, err := programVersion(split[1]) + if err != nil { + logrus.Warnf("Failed to retrieve program version for %s: %v", split[1], err) + } + program := map[string]interface{}{} + program["Executable"] = split[1] + program["Version"] = version + program["Package"] = packageVersion(split[1]) + graphOptions[split[0]] = program + } else { + graphOptions[split[0]] = split[1] + } + } + info["GraphOptions"] = graphOptions info["VolumePath"] = r.config.VolumePath configFile, err := storage.DefaultConfigFile(rootless.IsRootless()) @@ -173,29 +224,6 @@ func readUptime() (string, error) { return string(f[0]), nil } -// GetConmonVersion returns a string representation of the conmon version -func (r *Runtime) GetConmonVersion() (string, error) { - output, err := utils.ExecCmd(r.conmonPath, "--version") - if err != nil { - return "", err - } - return strings.TrimSuffix(strings.Replace(output, "\n", ", ", 1), "\n"), nil -} - -// GetOCIRuntimePath returns the path to the OCI Runtime Path the runtime is using -func (r *Runtime) GetOCIRuntimePath() string { - return r.defaultOCIRuntime.path -} - -// GetOCIRuntimeVersion returns a string representation of the oci runtimes version -func (r *Runtime) GetOCIRuntimeVersion() (string, error) { - output, err := utils.ExecCmd(r.GetOCIRuntimePath(), "--version") - if err != nil { - return "", err - } - return strings.TrimSuffix(output, "\n"), nil -} - // GetHostDistributionInfo returns a map containing the host's distribution and version func (r *Runtime) GetHostDistributionInfo() map[string]string { dist := make(map[string]string) diff --git a/vendor/github.com/containers/libpod/libpod/logs/log.go b/vendor/github.com/containers/libpod/libpod/logs/log.go index 0b17035673..0330df06a1 100644 --- a/vendor/github.com/containers/libpod/libpod/logs/log.go +++ b/vendor/github.com/containers/libpod/libpod/logs/log.go @@ -31,7 +31,7 @@ type LogOptions struct { Details bool Follow bool Since time.Time - Tail uint64 + Tail int64 Timestamps bool Multi bool WaitGroup *sync.WaitGroup @@ -54,8 +54,10 @@ func GetLogFile(path string, options *LogOptions) (*tail.Tail, []*LogLine, error logTail []*LogLine ) // whence 0=origin, 2=end - if options.Tail > 0 { + if options.Tail >= 0 { whence = 2 + } + if options.Tail > 0 { logTail, err = getTailLog(path, int(options.Tail)) if err != nil { return nil, nil, err diff --git a/vendor/github.com/containers/libpod/libpod/networking_linux.go b/vendor/github.com/containers/libpod/libpod/networking_linux.go index bef3f7739f..daa0619a2e 100644 --- a/vendor/github.com/containers/libpod/libpod/networking_linux.go +++ b/vendor/github.com/containers/libpod/libpod/networking_linux.go @@ -5,6 +5,7 @@ package libpod import ( "crypto/rand" "fmt" + "io/ioutil" "net" "os" "os/exec" @@ -17,7 +18,6 @@ import ( cnitypes "github.com/containernetworking/cni/pkg/types/current" "github.com/containernetworking/plugins/pkg/ns" "github.com/containers/libpod/pkg/errorhandling" - "github.com/containers/libpod/pkg/firewall" "github.com/containers/libpod/pkg/netns" "github.com/containers/libpod/pkg/rootless" "github.com/cri-o/ocicni/pkg/ocicni" @@ -86,26 +86,11 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re networkStatus = append(networkStatus, resultCurrent) } - // Add firewall rules to ensure the container has network access. - // Will not be necessary once CNI firewall plugin merges upstream. - // https://github.com/containernetworking/plugins/pull/75 - for _, netStatus := range networkStatus { - firewallConf := &firewall.FirewallNetConf{ - PrevResult: netStatus, - } - if err := r.firewallBackend.Add(firewallConf); err != nil { - return nil, errors.Wrapf(err, "error adding firewall rules for container %s", ctr.ID()) - } - } - return networkStatus, nil } // Create and configure a new network namespace for a container func (r *Runtime) createNetNS(ctr *Container) (n ns.NetNS, q []*cnitypes.Result, err error) { - if rootless.IsRootless() { - return nil, nil, errors.New("cannot configure a new network namespace in rootless mode, only --network=slirp4netns is supported") - } ctrNS, err := netns.NewNS() if err != nil { return nil, nil, errors.Wrapf(err, "error creating network namespace for container %s", ctr.ID()) @@ -123,7 +108,10 @@ func (r *Runtime) createNetNS(ctr *Container) (n ns.NetNS, q []*cnitypes.Result, logrus.Debugf("Made network namespace at %s for container %s", ctrNS.Path(), ctr.ID()) - networkStatus, err := r.configureNetNS(ctr, ctrNS) + networkStatus := []*cnitypes.Result{} + if !rootless.IsRootless() { + networkStatus, err = r.configureNetNS(ctr, ctrNS) + } return ctrNS, networkStatus, err } @@ -140,20 +128,17 @@ type slirp4netnsCmd struct { Args slirp4netnsCmdArg `json:"arguments"` } -func checkSlirpFlags(path string) (bool, bool, error) { +func checkSlirpFlags(path string) (bool, bool, bool, error) { cmd := exec.Command(path, "--help") out, err := cmd.CombinedOutput() if err != nil { - return false, false, err + return false, false, false, errors.Wrapf(err, "slirp4netns %q", out) } - return strings.Contains(string(out), "--disable-host-loopback"), strings.Contains(string(out), "--mtu"), nil + return strings.Contains(string(out), "--disable-host-loopback"), strings.Contains(string(out), "--mtu"), strings.Contains(string(out), "--enable-sandbox"), nil } // Configure the network namespace for a rootless container func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { - defer errorhandling.CloseQuiet(ctr.rootlessSlirpSyncR) - defer errorhandling.CloseQuiet(ctr.rootlessSlirpSyncW) - path := r.config.NetworkCmdPath if path == "" { @@ -173,15 +158,16 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { defer errorhandling.CloseQuiet(syncW) havePortMapping := len(ctr.Config().PortMappings) > 0 - apiSocket := filepath.Join(ctr.ociRuntime.tmpDir, fmt.Sprintf("%s.net", ctr.config.ID)) + apiSocket := filepath.Join(ctr.runtime.config.TmpDir, fmt.Sprintf("%s.net", ctr.config.ID)) + logPath := filepath.Join(ctr.runtime.config.TmpDir, fmt.Sprintf("slirp4netns-%s.log", ctr.config.ID)) cmdArgs := []string{} if havePortMapping { - cmdArgs = append(cmdArgs, "--api-socket", apiSocket, fmt.Sprintf("%d", ctr.state.PID)) + cmdArgs = append(cmdArgs, "--api-socket", apiSocket) } - dhp, mtu, err := checkSlirpFlags(path) + dhp, mtu, sandbox, err := checkSlirpFlags(path) if err != nil { - return errors.Wrapf(err, "error checking slirp4netns binary %s", path) + return errors.Wrapf(err, "error checking slirp4netns binary %s: %q", path, err) } if dhp { cmdArgs = append(cmdArgs, "--disable-host-loopback") @@ -189,15 +175,55 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { if mtu { cmdArgs = append(cmdArgs, "--mtu", "65520") } - cmdArgs = append(cmdArgs, "-c", "-e", "3", "-r", "4", fmt.Sprintf("%d", ctr.state.PID), "tap0") + if sandbox { + cmdArgs = append(cmdArgs, "--enable-sandbox") + } - cmd := exec.Command(path, cmdArgs...) + // the slirp4netns arguments being passed are describes as follows: + // from the slirp4netns documentation: https://github.com/rootless-containers/slirp4netns + // -c, --configure Brings up the tap interface + // -e, --exit-fd=FD specify the FD for terminating slirp4netns + // -r, --ready-fd=FD specify the FD to write to when the initialization steps are finished + cmdArgs = append(cmdArgs, "-c", "-e", "3", "-r", "4") + if !ctr.config.PostConfigureNetNS { + ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe() + if err != nil { + return errors.Wrapf(err, "failed to create rootless network sync pipe") + } + cmdArgs = append(cmdArgs, "--netns-type=path", ctr.state.NetNS.Path(), "tap0") + } else { + defer errorhandling.CloseQuiet(ctr.rootlessSlirpSyncR) + defer errorhandling.CloseQuiet(ctr.rootlessSlirpSyncW) + cmdArgs = append(cmdArgs, fmt.Sprintf("%d", ctr.state.PID), "tap0") + } + cmd := exec.Command(path, cmdArgs...) + logrus.Debugf("slirp4netns command: %s", strings.Join(cmd.Args, " ")) cmd.SysProcAttr = &syscall.SysProcAttr{ Setpgid: true, } + + // workaround for https://github.com/rootless-containers/slirp4netns/pull/153 + if sandbox { + cmd.SysProcAttr.Cloneflags = syscall.CLONE_NEWNS + cmd.SysProcAttr.Unshareflags = syscall.CLONE_NEWNS + } + + // Leak one end of the pipe in slirp4netns, the other will be sent to conmon cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessSlirpSyncR, syncW) + logFile, err := os.Create(logPath) + if err != nil { + return errors.Wrapf(err, "failed to open slirp4netns log file %s", logPath) + } + defer logFile.Close() + // Unlink immediately the file so we won't need to worry about cleaning it up later. + // It is still accessible through the open fd logFile. + if err := os.Remove(logPath); err != nil { + return errors.Wrapf(err, "delete file %s", logPath) + } + cmd.Stdout = logFile + cmd.Stderr = logFile if err := cmd.Start(); err != nil { return errors.Wrapf(err, "failed to start slirp4netns process") } @@ -226,7 +252,15 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { continue } if status.Exited() { - return errors.New("slirp4netns failed") + // Seek at the beginning of the file and read all its content + if _, err := logFile.Seek(0, 0); err != nil { + logrus.Errorf("could not seek log file: %q", err) + } + logContent, err := ioutil.ReadAll(logFile) + if err != nil { + return errors.Wrapf(err, "slirp4netns failed") + } + return errors.Errorf("slirp4netns failed: %q", logContent) } if status.Signaled() { return errors.New("slirp4netns killed by signal") @@ -390,42 +424,30 @@ func (r *Runtime) closeNetNS(ctr *Container) error { } // Tear down a network namespace, undoing all state associated with it. -// The CNI firewall rules will be removed, the namespace will be unmounted, -// and the file descriptor associated with it closed. func (r *Runtime) teardownNetNS(ctr *Container) error { if ctr.state.NetNS == nil { // The container has no network namespace, we're set return nil } - // Remove firewall rules we added on configuring the container. - // Will not be necessary once CNI firewall plugin merges upstream. - // https://github.com/containernetworking/plugins/pull/75 - for _, netStatus := range ctr.state.NetworkStatus { - firewallConf := &firewall.FirewallNetConf{ - PrevResult: netStatus, - } - if err := r.firewallBackend.Del(firewallConf); err != nil { - return errors.Wrapf(err, "error removing firewall rules for container %s", ctr.ID()) - } - } - logrus.Debugf("Tearing down network namespace at %s for container %s", ctr.state.NetNS.Path(), ctr.ID()) - var requestedIP net.IP - if ctr.requestedIP != nil { - requestedIP = ctr.requestedIP - // cancel request for a specific IP in case the container is reused later - ctr.requestedIP = nil - } else { - requestedIP = ctr.config.StaticIP - } + // rootless containers do not use the CNI plugin + if !rootless.IsRootless() { + var requestedIP net.IP + if ctr.requestedIP != nil { + requestedIP = ctr.requestedIP + // cancel request for a specific IP in case the container is reused later + ctr.requestedIP = nil + } else { + requestedIP = ctr.config.StaticIP + } - podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP) + podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP) - // The network may have already been torn down, so don't fail here, just log - if err := r.netPlugin.TearDownPod(podNetwork); err != nil { - return errors.Wrapf(err, "error tearing down CNI namespace configuration for container %s", ctr.ID()) + if err := r.netPlugin.TearDownPod(podNetwork); err != nil { + return errors.Wrapf(err, "error tearing down CNI namespace configuration for container %s", ctr.ID()) + } } // First unmount the namespace @@ -462,6 +484,12 @@ func getContainerNetNS(ctr *Container) (string, error) { func getContainerNetIO(ctr *Container) (*netlink.LinkStatistics, error) { var netStats *netlink.LinkStatistics + // rootless v2 cannot seem to resolve its network connection to + // collect statistics. For now, we allow stats to at least run + // by returning nil + if rootless.IsRootless() { + return netStats, nil + } netNSPath, netPathErr := getContainerNetNS(ctr) if netPathErr != nil { return nil, netPathErr diff --git a/vendor/github.com/containers/libpod/libpod/oci.go b/vendor/github.com/containers/libpod/libpod/oci.go index 2eb004b84e..9e761788ec 100644 --- a/vendor/github.com/containers/libpod/libpod/oci.go +++ b/vendor/github.com/containers/libpod/libpod/oci.go @@ -1,437 +1,132 @@ package libpod import ( - "bytes" - "fmt" - "io/ioutil" - "net" - "os" - "os/exec" - "path/filepath" - "strings" - "time" - - "github.com/containers/libpod/libpod/define" - "github.com/containers/libpod/pkg/util" - "github.com/cri-o/ocicni/pkg/ocicni" - spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - // TODO import these functions into libpod and remove the import - // Trying to keep libpod from depending on CRI-O code - "github.com/containers/libpod/utils" -) - -// OCI code is undergoing heavy rewrite - -const ( - // CgroupfsCgroupsManager represents cgroupfs native cgroup manager - CgroupfsCgroupsManager = "cgroupfs" - // SystemdCgroupsManager represents systemd native cgroup manager - SystemdCgroupsManager = "systemd" - - // ContainerCreateTimeout represents the value of container creating timeout - ContainerCreateTimeout = 240 * time.Second - - // Timeout before declaring that runtime has failed to kill a given - // container - killContainerTimeout = 5 * time.Second - // DefaultShmSize is the default shm size - DefaultShmSize = 64 * 1024 * 1024 - // NsRunDir is the default directory in which running network namespaces - // are stored - NsRunDir = "/var/run/netns" + "k8s.io/client-go/tools/remotecommand" ) -// OCIRuntime represents an OCI-compatible runtime that libpod can call into -// to perform container operations -type OCIRuntime struct { - name string - path string - conmonPath string - conmonEnv []string - cgroupManager string - tmpDir string - exitsDir string - socketsDir string - logSizeMax int64 - noPivot bool - reservePorts bool - supportsJSON bool -} - -// ociError is used to parse the OCI runtime JSON log. It is not part of the -// OCI runtime specifications, it follows what runc does -type ociError struct { - Level string `json:"level,omitempty"` - Time string `json:"time,omitempty"` - Msg string `json:"msg,omitempty"` -} - -// Make a new OCI runtime with provided options. -// The first path that points to a valid executable will be used. -func newOCIRuntime(name string, paths []string, conmonPath string, runtimeCfg *RuntimeConfig, supportsJSON bool) (*OCIRuntime, error) { - if name == "" { - return nil, errors.Wrapf(define.ErrInvalidArg, "the OCI runtime must be provided a non-empty name") - } - - runtime := new(OCIRuntime) - runtime.name = name - runtime.conmonPath = conmonPath - - runtime.conmonEnv = runtimeCfg.ConmonEnvVars - runtime.cgroupManager = runtimeCfg.CgroupManager - runtime.tmpDir = runtimeCfg.TmpDir - runtime.logSizeMax = runtimeCfg.MaxLogSize - runtime.noPivot = runtimeCfg.NoPivotRoot - runtime.reservePorts = runtimeCfg.EnablePortReservation - - // TODO: probe OCI runtime for feature and enable automatically if - // available. - runtime.supportsJSON = supportsJSON - - foundPath := false - for _, path := range paths { - stat, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - continue - } - return nil, errors.Wrapf(err, "cannot stat %s", path) - } - if !stat.Mode().IsRegular() { - continue - } - foundPath = true - runtime.path = path - logrus.Debugf("using runtime %q", path) - break - } - - // Search the $PATH as last fallback - if !foundPath { - if foundRuntime, err := exec.LookPath(name); err == nil { - foundPath = true - runtime.path = foundRuntime - logrus.Debugf("using runtime %q from $PATH: %q", name, foundRuntime) - } - } - - if !foundPath { - return nil, errors.Wrapf(define.ErrInvalidArg, "no valid executable found for OCI runtime %s", name) - } - - runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits") - runtime.socketsDir = filepath.Join(runtime.tmpDir, "socket") - - if runtime.cgroupManager != CgroupfsCgroupsManager && runtime.cgroupManager != SystemdCgroupsManager { - return nil, errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager specified: %s", runtime.cgroupManager) - } - - // Create the exit files and attach sockets directories - if err := os.MkdirAll(runtime.exitsDir, 0750); err != nil { - // The directory is allowed to exist - if !os.IsExist(err) { - return nil, errors.Wrapf(err, "error creating OCI runtime exit files directory %s", - runtime.exitsDir) - } - } - if err := os.MkdirAll(runtime.socketsDir, 0750); err != nil { - // The directory is allowed to exist - if !os.IsExist(err) { - return nil, errors.Wrapf(err, "error creating OCI runtime attach sockets directory %s", - runtime.socketsDir) - } - } - - return runtime, nil -} - -// Create systemd unit name for cgroup scopes -func createUnitName(prefix string, name string) string { - return fmt.Sprintf("%s-%s.scope", prefix, name) -} - -func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) { - var files []*os.File - notifySCTP := false - for _, i := range ports { - switch i.Protocol { - case "udp": - addr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort)) - if err != nil { - return nil, errors.Wrapf(err, "cannot resolve the UDP address") - } - - server, err := net.ListenUDP("udp", addr) - if err != nil { - return nil, errors.Wrapf(err, "cannot listen on the UDP port") - } - f, err := server.File() - if err != nil { - return nil, errors.Wrapf(err, "cannot get file for UDP socket") - } - files = append(files, f) - - case "tcp": - addr, err := net.ResolveTCPAddr("tcp4", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort)) - if err != nil { - return nil, errors.Wrapf(err, "cannot resolve the TCP address") - } - - server, err := net.ListenTCP("tcp4", addr) - if err != nil { - return nil, errors.Wrapf(err, "cannot listen on the TCP port") - } - f, err := server.File() - if err != nil { - return nil, errors.Wrapf(err, "cannot get file for TCP socket") - } - files = append(files, f) - case "sctp": - if !notifySCTP { - notifySCTP = true - logrus.Warnf("port reservation for SCTP is not supported") - } - default: - return nil, fmt.Errorf("unknown protocol %s", i.Protocol) - - } - } - return files, nil -} - -// updateContainerStatus retrieves the current status of the container from the -// runtime. It updates the container's state but does not save it. -// If useRunc is false, we will not directly hit runc to see the container's -// status, but will instead only check for the existence of the conmon exit file -// and update state to stopped if it exists. -func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) error { - exitFile := ctr.exitFilePath() - - runtimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return err - } - - // If not using the OCI runtime, we don't need to do most of this. - if !useRuntime { - // If the container's not running, nothing to do. - if ctr.state.State != define.ContainerStateRunning && ctr.state.State != define.ContainerStatePaused { - return nil - } - - // Check for the exit file conmon makes - info, err := os.Stat(exitFile) - if err != nil { - if os.IsNotExist(err) { - // Container is still running, no error - return nil - } - - return errors.Wrapf(err, "error running stat on container %s exit file", ctr.ID()) - } - - // Alright, it exists. Transition to Stopped state. - ctr.state.State = define.ContainerStateStopped - ctr.state.PID = 0 - ctr.state.ConmonPID = 0 - - // Read the exit file to get our stopped time and exit code. - return ctr.handleExitFile(exitFile, info) - } - - // Store old state so we know if we were already stopped - oldState := ctr.state.State - - state := new(spec.State) - - cmd := exec.Command(r.path, "state", ctr.ID()) - cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)) - - outPipe, err := cmd.StdoutPipe() - if err != nil { - return errors.Wrapf(err, "getting stdout pipe") - } - errPipe, err := cmd.StderrPipe() - if err != nil { - return errors.Wrapf(err, "getting stderr pipe") - } - - if err := cmd.Start(); err != nil { - out, err2 := ioutil.ReadAll(errPipe) - if err2 != nil { - return errors.Wrapf(err, "error getting container %s state", ctr.ID()) - } - if strings.Contains(string(out), "does not exist") { - if err := ctr.removeConmonFiles(); err != nil { - logrus.Debugf("unable to remove conmon files for container %s", ctr.ID()) - } - ctr.state.ExitCode = -1 - ctr.state.FinishedTime = time.Now() - ctr.state.State = define.ContainerStateExited - return nil - } - return errors.Wrapf(err, "error getting container %s state. stderr/out: %s", ctr.ID(), out) - } - defer func() { - _ = cmd.Wait() - }() - - if err := errPipe.Close(); err != nil { - return err - } - out, err := ioutil.ReadAll(outPipe) - if err != nil { - return errors.Wrapf(err, "error reading stdout: %s", ctr.ID()) - } - if err := json.NewDecoder(bytes.NewBuffer(out)).Decode(state); err != nil { - return errors.Wrapf(err, "error decoding container status for container %s", ctr.ID()) - } - ctr.state.PID = state.Pid - - switch state.Status { - case "created": - ctr.state.State = define.ContainerStateCreated - case "paused": - ctr.state.State = define.ContainerStatePaused - case "running": - ctr.state.State = define.ContainerStateRunning - case "stopped": - ctr.state.State = define.ContainerStateStopped - default: - return errors.Wrapf(define.ErrInternal, "unrecognized status returned by runtime for container %s: %s", - ctr.ID(), state.Status) - } - - // Only grab exit status if we were not already stopped - // If we were, it should already be in the database - if ctr.state.State == define.ContainerStateStopped && oldState != define.ContainerStateStopped { - var fi os.FileInfo - chWait := make(chan error) - defer close(chWait) - - _, err := WaitForFile(exitFile, chWait, time.Second*5) - if err == nil { - fi, err = os.Stat(exitFile) - } - if err != nil { - ctr.state.ExitCode = -1 - ctr.state.FinishedTime = time.Now() - logrus.Errorf("No exit file for container %s found: %v", ctr.ID(), err) - return nil - } - - return ctr.handleExitFile(exitFile, fi) - } - - return nil -} - -// startContainer starts the given container -// Sets time the container was started, but does not save it. -func (r *OCIRuntime) startContainer(ctr *Container) error { - // TODO: streams should probably *not* be our STDIN/OUT/ERR - redirect to buffers? - runtimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return err - } - env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} - if notify, ok := os.LookupEnv("NOTIFY_SOCKET"); ok { - env = append(env, fmt.Sprintf("NOTIFY_SOCKET=%s", notify)) - } - if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "start", ctr.ID()); err != nil { - return err - } - - ctr.state.StartedTime = time.Now() - - return nil -} - -// killContainer sends the given signal to the given container -func (r *OCIRuntime) killContainer(ctr *Container, signal uint) error { - logrus.Debugf("Sending signal %d to container %s", signal, ctr.ID()) - runtimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return err - } - env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} - if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "kill", ctr.ID(), fmt.Sprintf("%d", signal)); err != nil { - return errors.Wrapf(err, "error sending signal to container %s", ctr.ID()) - } - - return nil -} - -// deleteContainer deletes a container from the OCI runtime -func (r *OCIRuntime) deleteContainer(ctr *Container) error { - runtimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return err - } - env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} - return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "delete", "--force", ctr.ID()) -} - -// pauseContainer pauses the given container -func (r *OCIRuntime) pauseContainer(ctr *Container) error { - runtimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return err - } - env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} - return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "pause", ctr.ID()) -} - -// unpauseContainer unpauses the given container -func (r *OCIRuntime) unpauseContainer(ctr *Container) error { - runtimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return err - } - env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} - return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "resume", ctr.ID()) -} - -// checkpointContainer checkpoints the given container -func (r *OCIRuntime) checkpointContainer(ctr *Container, options ContainerCheckpointOptions) error { - if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil { - return err - } - // imagePath is used by CRIU to store the actual checkpoint files - imagePath := ctr.CheckpointPath() - // workPath will be used to store dump.log and stats-dump - workPath := ctr.bundlePath() - logrus.Debugf("Writing checkpoint to %s", imagePath) - logrus.Debugf("Writing checkpoint logs to %s", workPath) - args := []string{} - args = append(args, "checkpoint") - args = append(args, "--image-path") - args = append(args, imagePath) - args = append(args, "--work-path") - args = append(args, workPath) - if options.KeepRunning { - args = append(args, "--leave-running") - } - if options.TCPEstablished { - args = append(args, "--tcp-established") - } - args = append(args, ctr.ID()) - return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, nil, r.path, args...) +// OCIRuntime is an implementation of an OCI runtime. +// The OCI runtime implementation is expected to be a fairly thin wrapper around +// the actual runtime, and is not expected to include things like state +// management logic - e.g., we do not expect it to determine on its own that +// calling 'UnpauseContainer()' on a container that is not paused is an error. +// The code calling the OCIRuntime will manage this. +// TODO: May want to move the Attach() code under this umbrella. It's highly OCI +// runtime dependent. +// TODO: May want to move the conmon cleanup code here too - it depends on +// Conmon being in use. +type OCIRuntime interface { + // Name returns the name of the runtime. + Name() string + // Path returns the path to the runtime executable. + Path() string + + // CreateContainer creates the container in the OCI runtime. + CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error + // UpdateContainerStatus updates the status of the given container. + // It includes a switch for whether to perform a hard query of the + // runtime. If unset, the exit file (if supported by the implementation) + // will be used. + UpdateContainerStatus(ctr *Container) error + // StartContainer starts the given container. + StartContainer(ctr *Container) error + // KillContainer sends the given signal to the given container. + // If all is set, all processes in the container will be signalled; + // otherwise, only init will be signalled. + KillContainer(ctr *Container, signal uint, all bool) error + // StopContainer stops the given container. + // The container's stop signal (or SIGTERM if unspecified) will be sent + // first. + // After the given timeout, SIGKILL will be sent. + // If the given timeout is 0, SIGKILL will be sent immediately, and the + // stop signal will be omitted. + // If all is set, we will attempt to use the --all flag will `kill` in + // the OCI runtime to kill all processes in the container, including + // exec sessions. This is only supported if the container has cgroups. + StopContainer(ctr *Container, timeout uint, all bool) error + // DeleteContainer deletes the given container from the OCI runtime. + DeleteContainer(ctr *Container) error + // PauseContainer pauses the given container. + PauseContainer(ctr *Container) error + // UnpauseContainer unpauses the given container. + UnpauseContainer(ctr *Container) error + + // ExecContainer executes a command in a running container. + // Returns an int (exit code), error channel (errors from attach), and + // error (errors that occurred attempting to start the exec session). + ExecContainer(ctr *Container, sessionID string, options *ExecOptions) (int, chan error, error) + // ExecStopContainer stops a given exec session in a running container. + // SIGTERM with be sent initially, then SIGKILL after the given timeout. + // If timeout is 0, SIGKILL will be sent immediately, and SIGTERM will + // be omitted. + ExecStopContainer(ctr *Container, sessionID string, timeout uint) error + // ExecContainerCleanup cleans up after an exec session exits. + // It removes any files left by the exec session that are no longer + // needed, including the attach socket. + ExecContainerCleanup(ctr *Container, sessionID string) error + + // CheckpointContainer checkpoints the given container. + // Some OCI runtimes may not support this - if SupportsCheckpoint() + // returns false, this is not implemented, and will always return an + // error. + CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) error + + // SupportsCheckpoint returns whether this OCI runtime + // implementation supports the CheckpointContainer() operation. + SupportsCheckpoint() bool + // SupportsJSONErrors is whether the runtime can return JSON-formatted + // error messages. + SupportsJSONErrors() bool + // SupportsNoCgroups is whether the runtime supports running containers + // without cgroups. + SupportsNoCgroups() bool + + // AttachSocketPath is the path to the socket to attach to a given + // container. + // TODO: If we move Attach code in here, this should be made internal. + // We don't want to force all runtimes to share the same attach + // implementation. + AttachSocketPath(ctr *Container) (string, error) + // ExecAttachSocketPath is the path to the socket to attach to a given + // exec session in the given container. + // TODO: Probably should be made internal. + ExecAttachSocketPath(ctr *Container, sessionID string) (string, error) + // ExitFilePath is the path to a container's exit file. + // All runtime implementations must create an exit file when containers + // exit, containing the exit code of the container (as a string). + // This is the path to that file for a given container. + ExitFilePath(ctr *Container) (string, error) + + // RuntimeInfo returns verbose information about the runtime. + RuntimeInfo() (map[string]interface{}, error) } -func (r *OCIRuntime) featureCheckCheckpointing() bool { - // Check if the runtime implements checkpointing. Currently only - // runc's checkpoint/restore implementation is supported. - cmd := exec.Command(r.path, "checkpoint", "-h") - if err := cmd.Start(); err != nil { - return false - } - if err := cmd.Wait(); err == nil { - return true - } - return false +// ExecOptions are options passed into ExecContainer. They control the command +// that will be executed and how the exec will proceed. +type ExecOptions struct { + // Cmd is the command to execute. + Cmd []string + // CapAdd is a set of capabilities to add to the executed command. + CapAdd []string + // Env is a set of environment variables to add to the container. + Env map[string]string + // Terminal is whether to create a new TTY for the exec session. + Terminal bool + // Cwd is the working directory for the executed command. If unset, the + // working directory of the container will be used. + Cwd string + // User is the user the command will be executed as. If unset, the user + // the container was run as will be used. + User string + // Streams are the streams that will be attached to the container. + Streams *AttachStreams + // PreserveFDs is a number of additional file descriptors (in addition + // to 0, 1, 2) that will be passed to the executed process. The total FDs + // passed will be 3 + PreserveFDs. + PreserveFDs uint + // Resize is a channel where terminal resize events are sent to be + // handled. + Resize chan remotecommand.TerminalSize + // DetachKeys is a set of keys that, when pressed in sequence, will + // detach from the container. + DetachKeys string } diff --git a/vendor/github.com/containers/libpod/libpod/oci_attach_linux.go b/vendor/github.com/containers/libpod/libpod/oci_attach_linux.go index 22afa74169..eeaee6d43d 100644 --- a/vendor/github.com/containers/libpod/libpod/oci_attach_linux.go +++ b/vendor/github.com/containers/libpod/libpod/oci_attach_linux.go @@ -47,7 +47,11 @@ func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan re registerResizeFunc(resize, c.bundlePath()) - socketPath := buildSocketPath(c.AttachSocketPath()) + attachSock, err := c.AttachSocketPath() + if err != nil { + return err + } + socketPath := buildSocketPath(attachSock) conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"}) if err != nil { @@ -107,15 +111,18 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c logrus.Debugf("Attaching to container %s exec session %s", c.ID(), sessionID) - registerResizeFunc(resize, c.execBundlePath(sessionID)) - // set up the socket path, such that it is the correct length and location for exec - socketPath := buildSocketPath(c.execAttachSocketPath(sessionID)) + sockPath, err := c.execAttachSocketPath(sessionID) + if err != nil { + return err + } + socketPath := buildSocketPath(sockPath) // 2: read from attachFd that the parent process has set up the console socket if _, err := readConmonPipeData(attachFd, ""); err != nil { return err } + // 2: then attach conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"}) if err != nil { @@ -127,6 +134,10 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c } }() + // Register the resize func after we've read the attach socket, as we know at this point the + // 'ctl' file has been created in conmon + registerResizeFunc(resize, c.execBundlePath(sessionID)) + // start listening on stdio of the process receiveStdoutError, stdinDone := setupStdioChannels(streams, conn, detachKeys) @@ -141,7 +152,7 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c func processDetachKeys(keys string) ([]byte, error) { // Check the validity of the provided keys first if len(keys) == 0 { - keys = DefaultDetachKeys + keys = define.DefaultDetachKeys } detachKeys, err := term.ToBytes(keys) if err != nil { diff --git a/vendor/github.com/containers/libpod/libpod/oci_conmon_linux.go b/vendor/github.com/containers/libpod/libpod/oci_conmon_linux.go new file mode 100644 index 0000000000..026b131296 --- /dev/null +++ b/vendor/github.com/containers/libpod/libpod/oci_conmon_linux.go @@ -0,0 +1,1388 @@ +// +build linux + +package libpod + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + + "github.com/containers/libpod/libpod/config" + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/pkg/cgroups" + "github.com/containers/libpod/pkg/errorhandling" + "github.com/containers/libpod/pkg/lookup" + "github.com/containers/libpod/pkg/rootless" + "github.com/containers/libpod/pkg/util" + "github.com/containers/libpod/utils" + pmount "github.com/containers/storage/pkg/mount" + "github.com/coreos/go-systemd/activation" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// ConmonOCIRuntime is an OCI runtime managed by Conmon. +// TODO: Make all calls to OCI runtime have a timeout. +type ConmonOCIRuntime struct { + name string + path string + conmonPath string + conmonEnv []string + cgroupManager string + tmpDir string + exitsDir string + socketsDir string + logSizeMax int64 + noPivot bool + reservePorts bool + supportsJSON bool + supportsNoCgroups bool + sdNotify bool +} + +// Make a new Conmon-based OCI runtime with the given options. +// Conmon will wrap the given OCI runtime, which can be `runc`, `crun`, or +// any runtime with a runc-compatible CLI. +// The first path that points to a valid executable will be used. +// Deliberately private. Someone should not be able to construct this outside of +// libpod. +func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtimeCfg *config.Config, supportsJSON, supportsNoCgroups bool) (OCIRuntime, error) { + if name == "" { + return nil, errors.Wrapf(define.ErrInvalidArg, "the OCI runtime must be provided a non-empty name") + } + + runtime := new(ConmonOCIRuntime) + runtime.name = name + runtime.conmonPath = conmonPath + + runtime.conmonEnv = runtimeCfg.ConmonEnvVars + runtime.cgroupManager = runtimeCfg.CgroupManager + runtime.tmpDir = runtimeCfg.TmpDir + runtime.logSizeMax = runtimeCfg.MaxLogSize + runtime.noPivot = runtimeCfg.NoPivotRoot + runtime.reservePorts = runtimeCfg.EnablePortReservation + runtime.sdNotify = runtimeCfg.SDNotify + + // TODO: probe OCI runtime for feature and enable automatically if + // available. + runtime.supportsJSON = supportsJSON + runtime.supportsNoCgroups = supportsNoCgroups + + foundPath := false + for _, path := range paths { + stat, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, errors.Wrapf(err, "cannot stat %s", path) + } + if !stat.Mode().IsRegular() { + continue + } + foundPath = true + runtime.path = path + logrus.Debugf("using runtime %q", path) + break + } + + // Search the $PATH as last fallback + if !foundPath { + if foundRuntime, err := exec.LookPath(name); err == nil { + foundPath = true + runtime.path = foundRuntime + logrus.Debugf("using runtime %q from $PATH: %q", name, foundRuntime) + } + } + + if !foundPath { + return nil, errors.Wrapf(define.ErrInvalidArg, "no valid executable found for OCI runtime %s", name) + } + + runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits") + runtime.socketsDir = filepath.Join(runtime.tmpDir, "socket") + + if runtime.cgroupManager != define.CgroupfsCgroupsManager && runtime.cgroupManager != define.SystemdCgroupsManager { + return nil, errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager specified: %s", runtime.cgroupManager) + } + + // Create the exit files and attach sockets directories + if err := os.MkdirAll(runtime.exitsDir, 0750); err != nil { + // The directory is allowed to exist + if !os.IsExist(err) { + return nil, errors.Wrapf(err, "error creating OCI runtime exit files directory %s", + runtime.exitsDir) + } + } + if err := os.MkdirAll(runtime.socketsDir, 0750); err != nil { + // The directory is allowed to exist + if !os.IsExist(err) { + return nil, errors.Wrapf(err, "error creating OCI runtime attach sockets directory %s", + runtime.socketsDir) + } + } + + return runtime, nil +} + +// Name returns the name of the runtime being wrapped by Conmon. +func (r *ConmonOCIRuntime) Name() string { + return r.name +} + +// Path returns the path of the OCI runtime being wrapped by Conmon. +func (r *ConmonOCIRuntime) Path() string { + return r.path +} + +// CreateContainer creates a container. +func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (err error) { + if len(ctr.config.IDMappings.UIDMap) != 0 || len(ctr.config.IDMappings.GIDMap) != 0 { + for _, i := range []string{ctr.state.RunDir, ctr.runtime.config.TmpDir, ctr.config.StaticDir, ctr.state.Mountpoint, ctr.runtime.config.VolumePath} { + if err := makeAccessible(i, ctr.RootUID(), ctr.RootGID()); err != nil { + return err + } + } + + // if we are running a non privileged container, be sure to umount some kernel paths so they are not + // bind mounted inside the container at all. + if !ctr.config.Privileged && !rootless.IsRootless() { + ch := make(chan error) + go func() { + runtime.LockOSThread() + err := func() error { + fd, err := os.Open(fmt.Sprintf("/proc/%d/task/%d/ns/mnt", os.Getpid(), unix.Gettid())) + if err != nil { + return err + } + defer errorhandling.CloseQuiet(fd) + + // create a new mountns on the current thread + if err = unix.Unshare(unix.CLONE_NEWNS); err != nil { + return err + } + defer func() { + if err := unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS); err != nil { + logrus.Errorf("unable to clone new namespace: %q", err) + } + }() + + // don't spread our mounts around. We are setting only /sys to be slave + // so that the cleanup process is still able to umount the storage and the + // changes are propagated to the host. + err = unix.Mount("/sys", "/sys", "none", unix.MS_REC|unix.MS_SLAVE, "") + if err != nil { + return errors.Wrapf(err, "cannot make /sys slave") + } + + mounts, err := pmount.GetMounts() + if err != nil { + return err + } + for _, m := range mounts { + if !strings.HasPrefix(m.Mountpoint, "/sys/kernel") { + continue + } + err = unix.Unmount(m.Mountpoint, 0) + if err != nil && !os.IsNotExist(err) { + return errors.Wrapf(err, "cannot unmount %s", m.Mountpoint) + } + } + return r.createOCIContainer(ctr, restoreOptions) + }() + ch <- err + }() + err := <-ch + return err + } + } + return r.createOCIContainer(ctr, restoreOptions) +} + +// UpdateContainerStatus retrieves the current status of the container from the +// runtime. It updates the container's state but does not save it. +// If useRuntime is false, we will not directly hit runc to see the container's +// status, but will instead only check for the existence of the conmon exit file +// and update state to stopped if it exists. +func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error { + exitFile, err := r.ExitFilePath(ctr) + if err != nil { + return err + } + + runtimeDir, err := util.GetRuntimeDir() + if err != nil { + return err + } + + // Store old state so we know if we were already stopped + oldState := ctr.state.State + + state := new(spec.State) + + cmd := exec.Command(r.path, "state", ctr.ID()) + cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)) + + outPipe, err := cmd.StdoutPipe() + if err != nil { + return errors.Wrapf(err, "getting stdout pipe") + } + errPipe, err := cmd.StderrPipe() + if err != nil { + return errors.Wrapf(err, "getting stderr pipe") + } + + if err := cmd.Start(); err != nil { + out, err2 := ioutil.ReadAll(errPipe) + if err2 != nil { + return errors.Wrapf(err, "error getting container %s state", ctr.ID()) + } + if strings.Contains(string(out), "does not exist") { + if err := ctr.removeConmonFiles(); err != nil { + logrus.Debugf("unable to remove conmon files for container %s", ctr.ID()) + } + ctr.state.ExitCode = -1 + ctr.state.FinishedTime = time.Now() + ctr.state.State = define.ContainerStateExited + return nil + } + return errors.Wrapf(err, "error getting container %s state. stderr/out: %s", ctr.ID(), out) + } + defer func() { + _ = cmd.Wait() + }() + + if err := errPipe.Close(); err != nil { + return err + } + out, err := ioutil.ReadAll(outPipe) + if err != nil { + return errors.Wrapf(err, "error reading stdout: %s", ctr.ID()) + } + if err := json.NewDecoder(bytes.NewBuffer(out)).Decode(state); err != nil { + return errors.Wrapf(err, "error decoding container status for container %s", ctr.ID()) + } + ctr.state.PID = state.Pid + + switch state.Status { + case "created": + ctr.state.State = define.ContainerStateCreated + case "paused": + ctr.state.State = define.ContainerStatePaused + case "running": + ctr.state.State = define.ContainerStateRunning + case "stopped": + ctr.state.State = define.ContainerStateStopped + default: + return errors.Wrapf(define.ErrInternal, "unrecognized status returned by runtime for container %s: %s", + ctr.ID(), state.Status) + } + + // Only grab exit status if we were not already stopped + // If we were, it should already be in the database + if ctr.state.State == define.ContainerStateStopped && oldState != define.ContainerStateStopped { + var fi os.FileInfo + chWait := make(chan error) + defer close(chWait) + + _, err := WaitForFile(exitFile, chWait, time.Second*5) + if err == nil { + fi, err = os.Stat(exitFile) + } + if err != nil { + ctr.state.ExitCode = -1 + ctr.state.FinishedTime = time.Now() + logrus.Errorf("No exit file for container %s found: %v", ctr.ID(), err) + return nil + } + + return ctr.handleExitFile(exitFile, fi) + } + + return nil +} + +// StartContainer starts the given container. +// Sets time the container was started, but does not save it. +func (r *ConmonOCIRuntime) StartContainer(ctr *Container) error { + // TODO: streams should probably *not* be our STDIN/OUT/ERR - redirect to buffers? + runtimeDir, err := util.GetRuntimeDir() + if err != nil { + return err + } + env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} + if notify, ok := os.LookupEnv("NOTIFY_SOCKET"); ok { + env = append(env, fmt.Sprintf("NOTIFY_SOCKET=%s", notify)) + } + if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "start", ctr.ID()); err != nil { + return err + } + + ctr.state.StartedTime = time.Now() + + return nil +} + +// KillContainer sends the given signal to the given container. +// If all is set, send to all PIDs in the container. +// All is only supported if the container created cgroups. +func (r *ConmonOCIRuntime) KillContainer(ctr *Container, signal uint, all bool) error { + logrus.Debugf("Sending signal %d to container %s", signal, ctr.ID()) + runtimeDir, err := util.GetRuntimeDir() + if err != nil { + return err + } + env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} + var args []string + if all { + args = []string{"kill", "--all", ctr.ID(), fmt.Sprintf("%d", signal)} + } else { + args = []string{"kill", ctr.ID(), fmt.Sprintf("%d", signal)} + } + if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, args...); err != nil { + return errors.Wrapf(err, "error sending signal to container %s", ctr.ID()) + } + + return nil +} + +// StopContainer stops a container, first using its given stop signal (or +// SIGTERM if no signal was specified), then using SIGKILL. +// Timeout is given in seconds. If timeout is 0, the container will be +// immediately kill with SIGKILL. +// Does not set finished time for container, assumes you will run updateStatus +// after to pull the exit code. +func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool) error { + logrus.Debugf("Stopping container %s (PID %d)", ctr.ID(), ctr.state.PID) + + // Ping the container to see if it's alive + // If it's not, it's already stopped, return + err := unix.Kill(ctr.state.PID, 0) + if err == unix.ESRCH { + return nil + } + + stopSignal := ctr.config.StopSignal + if stopSignal == 0 { + stopSignal = uint(syscall.SIGTERM) + } + + if timeout > 0 { + if err := r.KillContainer(ctr, stopSignal, all); err != nil { + // Is the container gone? + // If so, it probably died between the first check and + // our sending the signal + // The container is stopped, so exit cleanly + err := unix.Kill(ctr.state.PID, 0) + if err == unix.ESRCH { + return nil + } + + return err + } + + if err := waitContainerStop(ctr, time.Duration(timeout)*time.Second); err != nil { + logrus.Warnf("Timed out stopping container %s, resorting to SIGKILL", ctr.ID()) + } else { + // No error, the container is dead + return nil + } + } + + if err := r.KillContainer(ctr, 9, all); err != nil { + // Again, check if the container is gone. If it is, exit cleanly. + err := unix.Kill(ctr.state.PID, 0) + if err == unix.ESRCH { + return nil + } + + return errors.Wrapf(err, "error sending SIGKILL to container %s", ctr.ID()) + } + + // Give runtime a few seconds to make it happen + if err := waitContainerStop(ctr, killContainerTimeout); err != nil { + return err + } + + return nil +} + +// DeleteContainer deletes a container from the OCI runtime. +func (r *ConmonOCIRuntime) DeleteContainer(ctr *Container) error { + runtimeDir, err := util.GetRuntimeDir() + if err != nil { + return err + } + env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} + return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "delete", "--force", ctr.ID()) +} + +// PauseContainer pauses the given container. +func (r *ConmonOCIRuntime) PauseContainer(ctr *Container) error { + runtimeDir, err := util.GetRuntimeDir() + if err != nil { + return err + } + env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} + return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "pause", ctr.ID()) +} + +// UnpauseContainer unpauses the given container. +func (r *ConmonOCIRuntime) UnpauseContainer(ctr *Container) error { + runtimeDir, err := util.GetRuntimeDir() + if err != nil { + return err + } + env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} + return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "resume", ctr.ID()) +} + +// ExecContainer executes a command in a running container +// TODO: Split into Create/Start/Attach/Wait +func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions) (int, chan error, error) { + if options == nil { + return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer") + } + if len(options.Cmd) == 0 { + return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute") + } + + if sessionID == "" { + return -1, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec") + } + + // create sync pipe to receive the pid + parentSyncPipe, childSyncPipe, err := newPipe() + if err != nil { + return -1, nil, errors.Wrapf(err, "error creating socket pair") + } + + defer errorhandling.CloseQuiet(parentSyncPipe) + + // create start pipe to set the cgroup before running + // attachToExec is responsible for closing parentStartPipe + childStartPipe, parentStartPipe, err := newPipe() + if err != nil { + return -1, nil, errors.Wrapf(err, "error creating socket pair") + } + + // We want to make sure we close the parent{Start,Attach}Pipes if we fail + // but also don't want to close them after attach to exec is called + attachToExecCalled := false + + defer func() { + if !attachToExecCalled { + errorhandling.CloseQuiet(parentStartPipe) + } + }() + + // create the attach pipe to allow attach socket to be created before + // $RUNTIME exec starts running. This is to make sure we can capture all output + // from the process through that socket, rather than half reading the log, half attaching to the socket + // attachToExec is responsible for closing parentAttachPipe + parentAttachPipe, childAttachPipe, err := newPipe() + if err != nil { + return -1, nil, errors.Wrapf(err, "error creating socket pair") + } + + defer func() { + if !attachToExecCalled { + errorhandling.CloseQuiet(parentAttachPipe) + } + }() + + childrenClosed := false + defer func() { + if !childrenClosed { + errorhandling.CloseQuiet(childSyncPipe) + errorhandling.CloseQuiet(childAttachPipe) + errorhandling.CloseQuiet(childStartPipe) + } + }() + + runtimeDir, err := util.GetRuntimeDir() + if err != nil { + return -1, nil, err + } + + finalEnv := make([]string, 0, len(options.Env)) + for k, v := range options.Env { + finalEnv = append(finalEnv, fmt.Sprintf("%s=%s", k, v)) + } + + processFile, err := prepareProcessExec(c, options.Cmd, finalEnv, options.Terminal, options.Cwd, options.User, sessionID) + if err != nil { + return -1, nil, err + } + + var ociLog string + if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON { + ociLog = c.execOCILog(sessionID) + } + args := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), ociLog) + + if options.PreserveFDs > 0 { + args = append(args, formatRuntimeOpts("--preserve-fds", fmt.Sprintf("%d", options.PreserveFDs))...) + } + + for _, capability := range options.CapAdd { + args = append(args, formatRuntimeOpts("--cap", capability)...) + } + + if options.Terminal { + args = append(args, "-t") + } + + // Append container ID and command + args = append(args, "-e") + // TODO make this optional when we can detach + args = append(args, "--exec-attach") + args = append(args, "--exec-process-spec", processFile.Name()) + + logrus.WithFields(logrus.Fields{ + "args": args, + }).Debugf("running conmon: %s", r.conmonPath) + execCmd := exec.Command(r.conmonPath, args...) + + if options.Streams != nil { + if options.Streams.AttachInput { + execCmd.Stdin = options.Streams.InputStream + } + if options.Streams.AttachOutput { + execCmd.Stdout = options.Streams.OutputStream + } + if options.Streams.AttachError { + execCmd.Stderr = options.Streams.ErrorStream + } + } + + conmonEnv, extraFiles, err := r.configureConmonEnv(runtimeDir) + if err != nil { + return -1, nil, err + } + + if options.PreserveFDs > 0 { + for fd := 3; fd < int(3+options.PreserveFDs); fd++ { + execCmd.ExtraFiles = append(execCmd.ExtraFiles, os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd))) + } + } + + // we don't want to step on users fds they asked to preserve + // Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3 + execCmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", options.PreserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", options.PreserveFDs+4), fmt.Sprintf("_OCI_ATTACHPIPE=%d", options.PreserveFDs+5)) + execCmd.Env = append(execCmd.Env, conmonEnv...) + + execCmd.ExtraFiles = append(execCmd.ExtraFiles, childSyncPipe, childStartPipe, childAttachPipe) + execCmd.ExtraFiles = append(execCmd.ExtraFiles, extraFiles...) + execCmd.Dir = c.execBundlePath(sessionID) + execCmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + + err = startCommandGivenSelinux(execCmd) + + // We don't need children pipes on the parent side + errorhandling.CloseQuiet(childSyncPipe) + errorhandling.CloseQuiet(childAttachPipe) + errorhandling.CloseQuiet(childStartPipe) + childrenClosed = true + + if err != nil { + return -1, nil, errors.Wrapf(err, "cannot start container %s", c.ID()) + } + if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe); err != nil { + return -1, nil, err + } + + if options.PreserveFDs > 0 { + for fd := 3; fd < int(3+options.PreserveFDs); fd++ { + // These fds were passed down to the runtime. Close them + // and not interfere + if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil { + logrus.Debugf("unable to close file fd-%d", fd) + } + } + } + + // TODO Only create if !detach + // Attach to the container before starting it + attachChan := make(chan error) + go func() { + // attachToExec is responsible for closing pipes + attachChan <- c.attachToExec(options.Streams, options.DetachKeys, options.Resize, sessionID, parentStartPipe, parentAttachPipe) + close(attachChan) + }() + attachToExecCalled = true + + pid, err := readConmonPipeData(parentSyncPipe, ociLog) + + return pid, attachChan, err +} + +// ExecStopContainer stops a given exec session in a running container. +func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error { + session, ok := ctr.state.ExecSessions[sessionID] + if !ok { + // TODO This should probably be a separate error + return errors.Wrapf(define.ErrInvalidArg, "no exec session with ID %s found in container %s", sessionID, ctr.ID()) + } + + logrus.Debugf("Going to stop container %s exec session %s", ctr.ID(), sessionID) + + // Is the session dead? + // Ping the PID with signal 0 to see if it still exists. + if err := unix.Kill(session.PID, 0); err != nil { + if err == unix.ESRCH { + return nil + } + return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, session.PID) + } + + if timeout > 0 { + // Use SIGTERM by default, then SIGSTOP after timeout. + logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, session.PID, ctr.ID()) + if err := unix.Kill(session.PID, unix.SIGTERM); err != nil { + if err == unix.ESRCH { + return nil + } + return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, session.PID) + } + + // Wait for the PID to stop + if err := waitPidStop(session.PID, time.Duration(timeout)*time.Second); err != nil { + logrus.Warnf("Timed out waiting for container %s exec session %s to stop, resorting to SIGKILL", ctr.ID(), sessionID) + } else { + // No error, container is dead + return nil + } + } + + // SIGTERM did not work. On to SIGKILL. + logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, session.PID, ctr.ID()) + if err := unix.Kill(session.PID, unix.SIGTERM); err != nil { + if err == unix.ESRCH { + return nil + } + return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, session.PID) + } + + // Wait for the PID to stop + if err := waitPidStop(session.PID, killContainerTimeout*time.Second); err != nil { + return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, session.PID) + } + + return nil +} + +// ExecCleanupContainer cleans up files created when a command is run via +// ExecContainer. This includes the attach socket for the exec session. +func (r *ConmonOCIRuntime) ExecContainerCleanup(ctr *Container, sessionID string) error { + // Clean up the sockets dir. Issue #3962 + // Also ignore if it doesn't exist for some reason; hence the conditional return below + if err := os.RemoveAll(filepath.Join(r.socketsDir, sessionID)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// CheckpointContainer checkpoints the given container. +func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) error { + if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil { + return err + } + // imagePath is used by CRIU to store the actual checkpoint files + imagePath := ctr.CheckpointPath() + // workPath will be used to store dump.log and stats-dump + workPath := ctr.bundlePath() + logrus.Debugf("Writing checkpoint to %s", imagePath) + logrus.Debugf("Writing checkpoint logs to %s", workPath) + args := []string{} + args = append(args, "checkpoint") + args = append(args, "--image-path") + args = append(args, imagePath) + args = append(args, "--work-path") + args = append(args, workPath) + if options.KeepRunning { + args = append(args, "--leave-running") + } + if options.TCPEstablished { + args = append(args, "--tcp-established") + } + args = append(args, ctr.ID()) + return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, nil, r.path, args...) +} + +// SupportsCheckpoint checks if the OCI runtime supports checkpointing +// containers. +func (r *ConmonOCIRuntime) SupportsCheckpoint() bool { + // Check if the runtime implements checkpointing. Currently only + // runc's checkpoint/restore implementation is supported. + cmd := exec.Command(r.path, "checkpoint", "-h") + if err := cmd.Start(); err != nil { + return false + } + if err := cmd.Wait(); err == nil { + return true + } + return false +} + +// SupportsJSONErrors checks if the OCI runtime supports JSON-formatted error +// messages. +func (r *ConmonOCIRuntime) SupportsJSONErrors() bool { + return r.supportsJSON +} + +// SupportsNoCgroups checks if the OCI runtime supports running containers +// without cgroups (the --cgroup-manager=disabled flag). +func (r *ConmonOCIRuntime) SupportsNoCgroups() bool { + return r.supportsNoCgroups +} + +// AttachSocketPath is the path to a single container's attach socket. +func (r *ConmonOCIRuntime) AttachSocketPath(ctr *Container) (string, error) { + if ctr == nil { + return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get attach socket path") + } + + return filepath.Join(r.socketsDir, ctr.ID(), "attach"), nil +} + +// ExecAttachSocketPath is the path to a container's exec session attach socket. +func (r *ConmonOCIRuntime) ExecAttachSocketPath(ctr *Container, sessionID string) (string, error) { + // We don't even use container, so don't validity check it + if sessionID == "" { + return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid session ID to get attach socket path") + } + + return filepath.Join(r.socketsDir, sessionID, "attach"), nil +} + +// ExitFilePath is the path to a container's exit file. +func (r *ConmonOCIRuntime) ExitFilePath(ctr *Container) (string, error) { + if ctr == nil { + return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get exit file path") + } + return filepath.Join(r.exitsDir, ctr.ID()), nil +} + +// RuntimeInfo provides information on the runtime. +func (r *ConmonOCIRuntime) RuntimeInfo() (map[string]interface{}, error) { + runtimePackage := packageVersion(r.path) + conmonPackage := packageVersion(r.conmonPath) + runtimeVersion, err := r.getOCIRuntimeVersion() + if err != nil { + return nil, errors.Wrapf(err, "error getting version of OCI runtime %s", r.name) + } + conmonVersion, err := r.getConmonVersion() + if err != nil { + return nil, errors.Wrapf(err, "error getting conmon version") + } + + info := make(map[string]interface{}) + info["Conmon"] = map[string]interface{}{ + "path": r.conmonPath, + "package": conmonPackage, + "version": conmonVersion, + } + info["OCIRuntime"] = map[string]interface{}{ + "name": r.name, + "path": r.path, + "package": runtimePackage, + "version": runtimeVersion, + } + + return info, nil +} + +// makeAccessible changes the path permission and each parent directory to have --x--x--x +func makeAccessible(path string, uid, gid int) error { + for ; path != "/"; path = filepath.Dir(path) { + st, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + if int(st.Sys().(*syscall.Stat_t).Uid) == uid && int(st.Sys().(*syscall.Stat_t).Gid) == gid { + continue + } + if st.Mode()&0111 != 0111 { + if err := os.Chmod(path, st.Mode()|0111); err != nil { + return err + } + } + } + return nil +} + +// Wait for a container which has been sent a signal to stop +func waitContainerStop(ctr *Container, timeout time.Duration) error { + return waitPidStop(ctr.state.PID, timeout) +} + +// Wait for a given PID to stop +func waitPidStop(pid int, timeout time.Duration) error { + done := make(chan struct{}) + chControl := make(chan struct{}) + go func() { + for { + select { + case <-chControl: + return + default: + if err := unix.Kill(pid, 0); err != nil { + if err == unix.ESRCH { + close(done) + return + } + logrus.Errorf("Error pinging PID %d with signal 0: %v", pid, err) + } + time.Sleep(100 * time.Millisecond) + } + } + }() + select { + case <-done: + return nil + case <-time.After(timeout): + close(chControl) + return errors.Errorf("given PIDs did not die within timeout") + } +} + +// createOCIContainer generates this container's main conmon instance and prepares it for starting +func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (err error) { + var stderrBuf bytes.Buffer + + runtimeDir, err := util.GetRuntimeDir() + if err != nil { + return err + } + + parentSyncPipe, childSyncPipe, err := newPipe() + if err != nil { + return errors.Wrapf(err, "error creating socket pair") + } + defer errorhandling.CloseQuiet(parentSyncPipe) + + childStartPipe, parentStartPipe, err := newPipe() + if err != nil { + return errors.Wrapf(err, "error creating socket pair for start pipe") + } + + defer errorhandling.CloseQuiet(parentStartPipe) + + var ociLog string + if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON { + ociLog = filepath.Join(ctr.state.RunDir, "oci-log") + } + args := r.sharedConmonArgs(ctr, ctr.ID(), ctr.bundlePath(), filepath.Join(ctr.state.RunDir, "pidfile"), ctr.LogPath(), r.exitsDir, ociLog) + + if ctr.config.Spec.Process.Terminal { + args = append(args, "-t") + } else if ctr.config.Stdin { + args = append(args, "-i") + } + + if ctr.config.ConmonPidFile != "" { + args = append(args, "--conmon-pidfile", ctr.config.ConmonPidFile) + } + + if r.noPivot { + args = append(args, "--no-pivot") + } + + if len(ctr.config.ExitCommand) > 0 { + args = append(args, "--exit-command", ctr.config.ExitCommand[0]) + for _, arg := range ctr.config.ExitCommand[1:] { + args = append(args, []string{"--exit-command-arg", arg}...) + } + } + + if restoreOptions != nil { + args = append(args, "--restore", ctr.CheckpointPath()) + if restoreOptions.TCPEstablished { + args = append(args, "--runtime-opt", "--tcp-established") + } + } + + logrus.WithFields(logrus.Fields{ + "args": args, + }).Debugf("running conmon: %s", r.conmonPath) + + cmd := exec.Command(r.conmonPath, args...) + cmd.Dir = ctr.bundlePath() + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + // TODO this is probably a really bad idea for some uses + // Make this configurable + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if ctr.config.Spec.Process.Terminal { + cmd.Stderr = &stderrBuf + } + + // 0, 1 and 2 are stdin, stdout and stderr + conmonEnv, envFiles, err := r.configureConmonEnv(runtimeDir) + if err != nil { + return err + } + + cmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", 3), fmt.Sprintf("_OCI_STARTPIPE=%d", 4)) + cmd.Env = append(cmd.Env, conmonEnv...) + cmd.ExtraFiles = append(cmd.ExtraFiles, childSyncPipe, childStartPipe) + cmd.ExtraFiles = append(cmd.ExtraFiles, envFiles...) + + if r.reservePorts && !ctr.config.NetMode.IsSlirp4netns() { + ports, err := bindPorts(ctr.config.PortMappings) + if err != nil { + return err + } + + // Leak the port we bound in the conmon process. These fd's won't be used + // by the container and conmon will keep the ports busy so that another + // process cannot use them. + cmd.ExtraFiles = append(cmd.ExtraFiles, ports...) + } + + if ctr.config.NetMode.IsSlirp4netns() { + if ctr.config.PostConfigureNetNS { + ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe() + if err != nil { + return errors.Wrapf(err, "failed to create rootless network sync pipe") + } + } else { + if ctr.rootlessSlirpSyncR != nil { + defer errorhandling.CloseQuiet(ctr.rootlessSlirpSyncR) + } + if ctr.rootlessSlirpSyncW != nil { + defer errorhandling.CloseQuiet(ctr.rootlessSlirpSyncW) + } + } + // Leak one end in conmon, the other one will be leaked into slirp4netns + cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessSlirpSyncW) + } + + err = startCommandGivenSelinux(cmd) + // regardless of whether we errored or not, we no longer need the children pipes + childSyncPipe.Close() + childStartPipe.Close() + if err != nil { + return err + } + if err := r.moveConmonToCgroupAndSignal(ctr, cmd, parentStartPipe); err != nil { + return err + } + /* Wait for initial setup and fork, and reap child */ + err = cmd.Wait() + if err != nil { + return err + } + + pid, err := readConmonPipeData(parentSyncPipe, ociLog) + if err != nil { + if err2 := r.DeleteContainer(ctr); err2 != nil { + logrus.Errorf("Error removing container %s from runtime after creation failed", ctr.ID()) + } + return err + } + ctr.state.PID = pid + + conmonPID, err := readConmonPidFile(ctr.config.ConmonPidFile) + if err != nil { + logrus.Warnf("error reading conmon pid file for container %s: %s", ctr.ID(), err.Error()) + } else if conmonPID > 0 { + // conmon not having a pid file is a valid state, so don't set it if we don't have it + logrus.Infof("Got Conmon PID as %d", conmonPID) + ctr.state.ConmonPID = conmonPID + } + + return nil +} + +// prepareProcessExec returns the path of the process.json used in runc exec -p +// caller is responsible to close the returned *os.File if needed. +func prepareProcessExec(c *Container, cmd, env []string, tty bool, cwd, user, sessionID string) (*os.File, error) { + f, err := ioutil.TempFile(c.execBundlePath(sessionID), "exec-process-") + if err != nil { + return nil, err + } + pspec := c.config.Spec.Process + pspec.SelinuxLabel = c.config.ProcessLabel + pspec.Args = cmd + // We need to default this to false else it will inherit terminal as true + // from the container. + pspec.Terminal = false + if tty { + pspec.Terminal = true + } + if len(env) > 0 { + pspec.Env = append(pspec.Env, env...) + } + + if cwd != "" { + pspec.Cwd = cwd + + } + + overrides := c.getUserOverrides() + execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, user, overrides) + if err != nil { + return nil, err + } + + // If user was set, look it up in the container to get a UID to use on + // the host + if user != "" { + sgids := make([]uint32, 0, len(execUser.Sgids)) + for _, sgid := range execUser.Sgids { + sgids = append(sgids, uint32(sgid)) + } + processUser := spec.User{ + UID: uint32(execUser.Uid), + GID: uint32(execUser.Gid), + AdditionalGids: sgids, + } + + pspec.User = processUser + } + + hasHomeSet := false + for _, s := range pspec.Env { + if strings.HasPrefix(s, "HOME=") { + hasHomeSet = true + break + } + } + if !hasHomeSet { + pspec.Env = append(pspec.Env, fmt.Sprintf("HOME=%s", execUser.Home)) + } + + processJSON, err := json.Marshal(pspec) + if err != nil { + return nil, err + } + + if err := ioutil.WriteFile(f.Name(), processJSON, 0644); err != nil { + return nil, err + } + return f, nil +} + +// configureConmonEnv gets the environment values to add to conmon's exec struct +// TODO this may want to be less hardcoded/more configurable in the future +func (r *ConmonOCIRuntime) configureConmonEnv(runtimeDir string) ([]string, []*os.File, error) { + env := make([]string, 0, 6) + env = append(env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)) + env = append(env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED"))) + env = append(env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID"))) + home, err := util.HomeDir() + if err != nil { + return nil, nil, err + } + env = append(env, fmt.Sprintf("HOME=%s", home)) + + extraFiles := make([]*os.File, 0) + if notify, ok := os.LookupEnv("NOTIFY_SOCKET"); ok { + env = append(env, fmt.Sprintf("NOTIFY_SOCKET=%s", notify)) + } + if !r.sdNotify { + if listenfds, ok := os.LookupEnv("LISTEN_FDS"); ok { + env = append(env, fmt.Sprintf("LISTEN_FDS=%s", listenfds), "LISTEN_PID=1") + fds := activation.Files(false) + extraFiles = append(extraFiles, fds...) + } + } else { + logrus.Debug("disabling SD notify") + } + return env, extraFiles, nil +} + +// sharedConmonArgs takes common arguments for exec and create/restore and formats them for the conmon CLI +func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, pidPath, logPath, exitDir, ociLogPath string) []string { + // set the conmon API version to be able to use the correct sync struct keys + args := []string{"--api-version", "1"} + if r.cgroupManager == define.SystemdCgroupsManager && !ctr.config.NoCgroups { + args = append(args, "-s") + } + args = append(args, "-c", ctr.ID()) + args = append(args, "-u", cuuid) + args = append(args, "-r", r.path) + args = append(args, "-b", bundlePath) + args = append(args, "-p", pidPath) + + var logDriver string + switch ctr.LogDriver() { + case JournaldLogging: + logDriver = JournaldLogging + case JSONLogging: + fallthrough + default: //nolint-stylecheck + // No case here should happen except JSONLogging, but keep this here in case the options are extended + logrus.Errorf("%s logging specified but not supported. Choosing k8s-file logging instead", ctr.LogDriver()) + fallthrough + case "": + // to get here, either a user would specify `--log-driver ""`, or this came from another place in libpod + // since the former case is obscure, and the latter case isn't an error, let's silently fallthrough + fallthrough + case KubernetesLogging: + logDriver = fmt.Sprintf("%s:%s", KubernetesLogging, logPath) + } + + args = append(args, "-l", logDriver) + args = append(args, "--exit-dir", exitDir) + args = append(args, "--socket-dir-path", r.socketsDir) + if r.logSizeMax >= 0 { + args = append(args, "--log-size-max", fmt.Sprintf("%v", r.logSizeMax)) + } + + logLevel := logrus.GetLevel() + args = append(args, "--log-level", logLevel.String()) + + if logLevel == logrus.DebugLevel { + logrus.Debugf("%s messages will be logged to syslog", r.conmonPath) + args = append(args, "--syslog") + } + if ociLogPath != "" { + args = append(args, "--runtime-arg", "--log-format=json", "--runtime-arg", "--log", fmt.Sprintf("--runtime-arg=%s", ociLogPath)) + } + if ctr.config.NoCgroups { + logrus.Debugf("Running with no CGroups") + args = append(args, "--runtime-arg", "--cgroup-manager", "--runtime-arg", "disabled") + } + return args +} + +// startCommandGivenSelinux starts a container ensuring to set the labels of +// the process to make sure SELinux doesn't block conmon communication, if SELinux is enabled +func startCommandGivenSelinux(cmd *exec.Cmd) error { + if !selinux.GetEnabled() { + return cmd.Start() + } + // Set the label of the conmon process to be level :s0 + // This will allow the container processes to talk to fifo-files + // passed into the container by conmon + var ( + plabel string + con selinux.Context + err error + ) + plabel, err = selinux.CurrentLabel() + if err != nil { + return errors.Wrapf(err, "Failed to get current SELinux label") + } + + con, err = selinux.NewContext(plabel) + if err != nil { + return errors.Wrapf(err, "Failed to get new context from SELinux label") + } + + runtime.LockOSThread() + if con["level"] != "s0" && con["level"] != "" { + con["level"] = "s0" + if err = label.SetProcessLabel(con.Get()); err != nil { + runtime.UnlockOSThread() + return err + } + } + err = cmd.Start() + // Ignore error returned from SetProcessLabel("") call, + // can't recover. + if labelErr := label.SetProcessLabel(""); labelErr != nil { + logrus.Errorf("unable to set process label: %q", err) + } + runtime.UnlockOSThread() + return err +} + +// moveConmonToCgroupAndSignal gets a container's cgroupParent and moves the conmon process to that cgroup +// it then signals for conmon to start by sending nonse data down the start fd +func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec.Cmd, startFd *os.File) error { + mustCreateCgroup := true + // If cgroup creation is disabled - just signal. + if ctr.config.NoCgroups { + mustCreateCgroup = false + } + + if mustCreateCgroup { + cgroupParent := ctr.CgroupParent() + if r.cgroupManager == define.SystemdCgroupsManager { + unitName := createUnitName("libpod-conmon", ctr.ID()) + + realCgroupParent := cgroupParent + splitParent := strings.Split(cgroupParent, "/") + if strings.HasSuffix(cgroupParent, ".slice") && len(splitParent) > 1 { + realCgroupParent = splitParent[len(splitParent)-1] + } + + logrus.Infof("Running conmon under slice %s and unitName %s", realCgroupParent, unitName) + if err := utils.RunUnderSystemdScope(cmd.Process.Pid, realCgroupParent, unitName); err != nil { + logrus.Warnf("Failed to add conmon to systemd sandbox cgroup: %v", err) + } + } else { + cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon") + control, err := cgroups.New(cgroupPath, &spec.LinuxResources{}) + if err != nil { + logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err) + } else { + // we need to remove this defer and delete the cgroup once conmon exits + // maybe need a conmon monitor? + if err := control.AddPid(cmd.Process.Pid); err != nil { + logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err) + } + } + } + } + + /* We set the cgroup, now the child can start creating children */ + if err := writeConmonPipeData(startFd); err != nil { + return err + } + return nil +} + +// newPipe creates a unix socket pair for communication +func newPipe() (parent *os.File, child *os.File, err error) { + fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_SEQPACKET|unix.SOCK_CLOEXEC, 0) + if err != nil { + return nil, nil, err + } + return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil +} + +// readConmonPidFile attempts to read conmon's pid from its pid file +func readConmonPidFile(pidFile string) (int, error) { + // Let's try reading the Conmon pid at the same time. + if pidFile != "" { + contents, err := ioutil.ReadFile(pidFile) + if err != nil { + return -1, err + } + // Convert it to an int + conmonPID, err := strconv.Atoi(string(contents)) + if err != nil { + return -1, err + } + return conmonPID, nil + } + return 0, nil +} + +// readConmonPipeData attempts to read a syncInfo struct from the pipe +func readConmonPipeData(pipe *os.File, ociLog string) (int, error) { + // syncInfo is used to return data from monitor process to daemon + type syncInfo struct { + Data int `json:"data"` + Message string `json:"message,omitempty"` + } + + // Wait to get container pid from conmon + type syncStruct struct { + si *syncInfo + err error + } + ch := make(chan syncStruct) + go func() { + var si *syncInfo + rdr := bufio.NewReader(pipe) + b, err := rdr.ReadBytes('\n') + if err != nil { + ch <- syncStruct{err: err} + } + if err := json.Unmarshal(b, &si); err != nil { + ch <- syncStruct{err: err} + return + } + ch <- syncStruct{si: si} + }() + + data := -1 + select { + case ss := <-ch: + if ss.err != nil { + if ociLog != "" { + ociLogData, err := ioutil.ReadFile(ociLog) + if err == nil { + var ociErr ociError + if err := json.Unmarshal(ociLogData, &ociErr); err == nil { + return -1, getOCIRuntimeError(ociErr.Msg) + } + } + } + return -1, errors.Wrapf(ss.err, "container create failed (no logs from conmon)") + } + logrus.Debugf("Received: %d", ss.si.Data) + if ss.si.Data < 0 { + if ociLog != "" { + ociLogData, err := ioutil.ReadFile(ociLog) + if err == nil { + var ociErr ociError + if err := json.Unmarshal(ociLogData, &ociErr); err == nil { + return ss.si.Data, getOCIRuntimeError(ociErr.Msg) + } + } + } + // If we failed to parse the JSON errors, then print the output as it is + if ss.si.Message != "" { + return ss.si.Data, getOCIRuntimeError(ss.si.Message) + } + return ss.si.Data, errors.Wrapf(define.ErrInternal, "container create failed") + } + data = ss.si.Data + case <-time.After(define.ContainerCreateTimeout): + return -1, errors.Wrapf(define.ErrInternal, "container creation timeout") + } + return data, nil +} + +// writeConmonPipeData writes nonse data to a pipe +func writeConmonPipeData(pipe *os.File) error { + someData := []byte{0} + _, err := pipe.Write(someData) + return err +} + +// formatRuntimeOpts prepends opts passed to it with --runtime-opt for passing to conmon +func formatRuntimeOpts(opts ...string) []string { + args := make([]string, 0, len(opts)*2) + for _, o := range opts { + args = append(args, "--runtime-opt", o) + } + return args +} + +// getConmonVersion returns a string representation of the conmon version. +func (r *ConmonOCIRuntime) getConmonVersion() (string, error) { + output, err := utils.ExecCmd(r.conmonPath, "--version") + if err != nil { + return "", err + } + return strings.TrimSuffix(strings.Replace(output, "\n", ", ", 1), "\n"), nil +} + +// getOCIRuntimeVersion returns a string representation of the OCI runtime's +// version. +func (r *ConmonOCIRuntime) getOCIRuntimeVersion() (string, error) { + output, err := utils.ExecCmd(r.path, "--version") + if err != nil { + return "", err + } + return strings.TrimSuffix(output, "\n"), nil +} diff --git a/vendor/github.com/containers/libpod/libpod/oci_conmon_unsupported.go b/vendor/github.com/containers/libpod/libpod/oci_conmon_unsupported.go new file mode 100644 index 0000000000..a2f52f5271 --- /dev/null +++ b/vendor/github.com/containers/libpod/libpod/oci_conmon_unsupported.go @@ -0,0 +1,131 @@ +// +build !linux + +package libpod + +import ( + "github.com/containers/libpod/libpod/config" + "github.com/containers/libpod/libpod/define" +) + +const ( + osNotSupported = "Not supported on this OS" +) + +// ConmonOCIRuntime is not supported on this OS. +type ConmonOCIRuntime struct { +} + +// newConmonOCIRuntime is not supported on this OS. +func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtimeCfg *config.Config, supportsJSON, supportsNoCgroups bool) (OCIRuntime, error) { + return nil, define.ErrNotImplemented +} + +// Name is not supported on this OS. +func (r *ConmonOCIRuntime) Name() string { + return osNotSupported +} + +// Path is not supported on this OS. +func (r *ConmonOCIRuntime) Path() string { + return osNotSupported +} + +// CreateContainer is not supported on this OS. +func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error { + return define.ErrNotImplemented +} + +// UpdateContainerStatus is not supported on this OS. +func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container, useRuntime bool) error { + return define.ErrNotImplemented +} + +// StartContainer is not supported on this OS. +func (r *ConmonOCIRuntime) StartContainer(ctr *Container) error { + return define.ErrNotImplemented +} + +// KillContainer is not supported on this OS. +func (r *ConmonOCIRuntime) KillContainer(ctr *Container, signal uint, all bool) error { + return define.ErrNotImplemented +} + +// StopContainer is not supported on this OS. +func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool) error { + return define.ErrNotImplemented +} + +// DeleteContainer is not supported on this OS. +func (r *ConmonOCIRuntime) DeleteContainer(ctr *Container) error { + return define.ErrNotImplemented +} + +// PauseContainer is not supported on this OS. +func (r *ConmonOCIRuntime) PauseContainer(ctr *Container) error { + return define.ErrNotImplemented +} + +// UnpauseContainer is not supported on this OS. +func (r *ConmonOCIRuntime) UnpauseContainer(ctr *Container) error { + return define.ErrNotImplemented +} + +// ExecContainer is not supported on this OS. +func (r *ConmonOCIRuntime) ExecContainer(ctr *Container, sessionID string, options *ExecOptions) (int, chan error, error) { + return -1, nil, define.ErrNotImplemented +} + +// ExecStopContainer is not supported on this OS. +func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error { + return define.ErrNotImplemented +} + +// CheckpointContainer is not supported on this OS. +func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) error { + return define.ErrNotImplemented +} + +// SupportsCheckpoint is not supported on this OS. +func (r *ConmonOCIRuntime) SupportsCheckpoint() bool { + return false +} + +// SupportsJSONErrors is not supported on this OS. +func (r *ConmonOCIRuntime) SupportsJSONErrors() bool { + return false +} + +// SupportsNoCgroups is not supported on this OS. +func (r *ConmonOCIRuntime) SupportsNoCgroups() bool { + return false +} + +// AttachSocketPath is not supported on this OS. +func (r *ConmonOCIRuntime) AttachSocketPath(ctr *Container) (string, error) { + return "", define.ErrNotImplemented +} + +// ExecAttachSocketPath is not supported on this OS. +func (r *ConmonOCIRuntime) ExecAttachSocketPath(ctr *Container, sessionID string) (string, error) { + return "", define.ErrNotImplemented +} + +// ExitFilePath is not supported on this OS. +func (r *ConmonOCIRuntime) ExitFilePath(ctr *Container) (string, error) { + return "", define.ErrNotImplemented +} + +// RuntimeInfo is not supported on this OS. +func (r *ConmonOCIRuntime) RuntimeInfo() (map[string]interface{}, error) { + return nil, define.ErrNotImplemented +} + +// Package is not supported on this OS. +func (r *ConmonOCIRuntime) Package() string { + return osNotSupported +} + +// ConmonPackage is not supported on this OS. +func (r *ConmonOCIRuntime) ConmonPackage() string { + return osNotSupported +} diff --git a/vendor/github.com/containers/libpod/libpod/oci_internal_linux.go b/vendor/github.com/containers/libpod/libpod/oci_internal_linux.go deleted file mode 100644 index e2c73f5ed6..0000000000 --- a/vendor/github.com/containers/libpod/libpod/oci_internal_linux.go +++ /dev/null @@ -1,498 +0,0 @@ -// +build linux - -package libpod - -import ( - "bufio" - "bytes" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" - "syscall" - "time" - - "github.com/containers/libpod/libpod/define" - "github.com/containers/libpod/pkg/cgroups" - "github.com/containers/libpod/pkg/errorhandling" - "github.com/containers/libpod/pkg/lookup" - "github.com/containers/libpod/pkg/util" - "github.com/containers/libpod/utils" - "github.com/coreos/go-systemd/activation" - spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/opencontainers/selinux/go-selinux" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// createOCIContainer generates this container's main conmon instance and prepares it for starting -func (r *OCIRuntime) createOCIContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (err error) { - var stderrBuf bytes.Buffer - - runtimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return err - } - - parentSyncPipe, childSyncPipe, err := newPipe() - if err != nil { - return errors.Wrapf(err, "error creating socket pair") - } - defer errorhandling.CloseQuiet(parentSyncPipe) - - childStartPipe, parentStartPipe, err := newPipe() - if err != nil { - return errors.Wrapf(err, "error creating socket pair for start pipe") - } - - defer errorhandling.CloseQuiet(parentStartPipe) - - var ociLog string - if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON { - ociLog = filepath.Join(ctr.state.RunDir, "oci-log") - } - args := r.sharedConmonArgs(ctr, ctr.ID(), ctr.bundlePath(), filepath.Join(ctr.state.RunDir, "pidfile"), ctr.LogPath(), r.exitsDir, ociLog) - - if ctr.config.Spec.Process.Terminal { - args = append(args, "-t") - } else if ctr.config.Stdin { - args = append(args, "-i") - } - - if ctr.config.ConmonPidFile != "" { - args = append(args, "--conmon-pidfile", ctr.config.ConmonPidFile) - } - - if r.noPivot { - args = append(args, "--no-pivot") - } - - if len(ctr.config.ExitCommand) > 0 { - args = append(args, "--exit-command", ctr.config.ExitCommand[0]) - for _, arg := range ctr.config.ExitCommand[1:] { - args = append(args, []string{"--exit-command-arg", arg}...) - } - } - - if restoreOptions != nil { - args = append(args, "--restore", ctr.CheckpointPath()) - if restoreOptions.TCPEstablished { - args = append(args, "--runtime-opt", "--tcp-established") - } - } - - logrus.WithFields(logrus.Fields{ - "args": args, - }).Debugf("running conmon: %s", r.conmonPath) - - cmd := exec.Command(r.conmonPath, args...) - cmd.Dir = ctr.bundlePath() - cmd.SysProcAttr = &syscall.SysProcAttr{ - Setpgid: true, - } - // TODO this is probably a really bad idea for some uses - // Make this configurable - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if ctr.config.Spec.Process.Terminal { - cmd.Stderr = &stderrBuf - } - - // 0, 1 and 2 are stdin, stdout and stderr - conmonEnv, envFiles, err := r.configureConmonEnv(runtimeDir) - if err != nil { - return err - } - - cmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", 3), fmt.Sprintf("_OCI_STARTPIPE=%d", 4)) - cmd.Env = append(cmd.Env, conmonEnv...) - cmd.ExtraFiles = append(cmd.ExtraFiles, childSyncPipe, childStartPipe) - cmd.ExtraFiles = append(cmd.ExtraFiles, envFiles...) - - if r.reservePorts && !ctr.config.NetMode.IsSlirp4netns() { - ports, err := bindPorts(ctr.config.PortMappings) - if err != nil { - return err - } - - // Leak the port we bound in the conmon process. These fd's won't be used - // by the container and conmon will keep the ports busy so that another - // process cannot use them. - cmd.ExtraFiles = append(cmd.ExtraFiles, ports...) - } - - if ctr.config.NetMode.IsSlirp4netns() { - ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe() - if err != nil { - return errors.Wrapf(err, "failed to create rootless network sync pipe") - } - // Leak one end in conmon, the other one will be leaked into slirp4netns - cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessSlirpSyncW) - } - - err = startCommandGivenSelinux(cmd) - // regardless of whether we errored or not, we no longer need the children pipes - childSyncPipe.Close() - childStartPipe.Close() - if err != nil { - return err - } - if err := r.moveConmonToCgroupAndSignal(ctr, cmd, parentStartPipe, ctr.ID()); err != nil { - return err - } - /* Wait for initial setup and fork, and reap child */ - err = cmd.Wait() - if err != nil { - return err - } - - pid, err := readConmonPipeData(parentSyncPipe, ociLog) - if err != nil { - if err2 := r.deleteContainer(ctr); err2 != nil { - logrus.Errorf("Error removing container %s from runtime after creation failed", ctr.ID()) - } - return err - } - ctr.state.PID = pid - - conmonPID, err := readConmonPidFile(ctr.config.ConmonPidFile) - if err != nil { - logrus.Warnf("error reading conmon pid file for container %s: %s", ctr.ID(), err.Error()) - } else if conmonPID > 0 { - // conmon not having a pid file is a valid state, so don't set it if we don't have it - logrus.Infof("Got Conmon PID as %d", conmonPID) - ctr.state.ConmonPID = conmonPID - } - - return nil -} - -// prepareProcessExec returns the path of the process.json used in runc exec -p -// caller is responsible to close the returned *os.File if needed. -func prepareProcessExec(c *Container, cmd, env []string, tty bool, cwd, user, sessionID string) (*os.File, error) { - f, err := ioutil.TempFile(c.execBundlePath(sessionID), "exec-process-") - if err != nil { - return nil, err - } - - pspec := c.config.Spec.Process - pspec.Args = cmd - // We need to default this to false else it will inherit terminal as true - // from the container. - pspec.Terminal = false - if tty { - pspec.Terminal = true - } - if len(env) > 0 { - pspec.Env = append(pspec.Env, env...) - } - - if cwd != "" { - pspec.Cwd = cwd - - } - // If user was set, look it up in the container to get a UID to use on - // the host - if user != "" { - execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, user, nil) - if err != nil { - return nil, err - } - sgids := make([]uint32, 0, len(execUser.Sgids)) - for _, sgid := range execUser.Sgids { - sgids = append(sgids, uint32(sgid)) - } - processUser := spec.User{ - UID: uint32(execUser.Uid), - GID: uint32(execUser.Gid), - AdditionalGids: sgids, - } - - pspec.User = processUser - } - - processJSON, err := json.Marshal(pspec) - if err != nil { - return nil, err - } - - if err := ioutil.WriteFile(f.Name(), processJSON, 0644); err != nil { - return nil, err - } - return f, nil -} - -// configureConmonEnv gets the environment values to add to conmon's exec struct -// TODO this may want to be less hardcoded/more configurable in the future -func (r *OCIRuntime) configureConmonEnv(runtimeDir string) ([]string, []*os.File, error) { - env := make([]string, 0, 6) - env = append(env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)) - env = append(env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED"))) - env = append(env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID"))) - home, err := homeDir() - if err != nil { - return nil, nil, err - } - env = append(env, fmt.Sprintf("HOME=%s", home)) - - extraFiles := make([]*os.File, 0) - if notify, ok := os.LookupEnv("NOTIFY_SOCKET"); ok { - env = append(env, fmt.Sprintf("NOTIFY_SOCKET=%s", notify)) - } - if listenfds, ok := os.LookupEnv("LISTEN_FDS"); ok { - env = append(env, fmt.Sprintf("LISTEN_FDS=%s", listenfds), "LISTEN_PID=1") - fds := activation.Files(false) - extraFiles = append(extraFiles, fds...) - } - return env, extraFiles, nil -} - -// sharedConmonArgs takes common arguments for exec and create/restore and formats them for the conmon CLI -func (r *OCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, pidPath, logPath, exitDir, ociLogPath string) []string { - // set the conmon API version to be able to use the correct sync struct keys - args := []string{"--api-version", "1"} - if r.cgroupManager == SystemdCgroupsManager { - args = append(args, "-s") - } - args = append(args, "-c", ctr.ID()) - args = append(args, "-u", cuuid) - args = append(args, "-r", r.path) - args = append(args, "-b", bundlePath) - args = append(args, "-p", pidPath) - - var logDriver string - switch ctr.LogDriver() { - case JournaldLogging: - logDriver = JournaldLogging - case JSONLogging: - fallthrough - default: //nolint-stylecheck - // No case here should happen except JSONLogging, but keep this here in case the options are extended - logrus.Errorf("%s logging specified but not supported. Choosing k8s-file logging instead", ctr.LogDriver()) - fallthrough - case "": - // to get here, either a user would specify `--log-driver ""`, or this came from another place in libpod - // since the former case is obscure, and the latter case isn't an error, let's silently fallthrough - fallthrough - case KubernetesLogging: - logDriver = fmt.Sprintf("%s:%s", KubernetesLogging, logPath) - } - - args = append(args, "-l", logDriver) - args = append(args, "--exit-dir", exitDir) - args = append(args, "--socket-dir-path", r.socketsDir) - if r.logSizeMax >= 0 { - args = append(args, "--log-size-max", fmt.Sprintf("%v", r.logSizeMax)) - } - - logLevel := logrus.GetLevel() - args = append(args, "--log-level", logLevel.String()) - - if logLevel == logrus.DebugLevel { - logrus.Debugf("%s messages will be logged to syslog", r.conmonPath) - args = append(args, "--syslog") - } - if ociLogPath != "" { - args = append(args, "--runtime-arg", "--log-format=json", "--runtime-arg", "--log", fmt.Sprintf("--runtime-arg=%s", ociLogPath)) - } - return args -} - -// startCommandGivenSelinux starts a container ensuring to set the labels of -// the process to make sure SELinux doesn't block conmon communication, if SELinux is enabled -func startCommandGivenSelinux(cmd *exec.Cmd) error { - if !selinux.GetEnabled() { - return cmd.Start() - } - // Set the label of the conmon process to be level :s0 - // This will allow the container processes to talk to fifo-files - // passed into the container by conmon - var ( - plabel string - con selinux.Context - err error - ) - plabel, err = selinux.CurrentLabel() - if err != nil { - return errors.Wrapf(err, "Failed to get current SELinux label") - } - - con, err = selinux.NewContext(plabel) - if err != nil { - return errors.Wrapf(err, "Failed to get new context from SELinux label") - } - - runtime.LockOSThread() - if con["level"] != "s0" && con["level"] != "" { - con["level"] = "s0" - if err = label.SetProcessLabel(con.Get()); err != nil { - runtime.UnlockOSThread() - return err - } - } - err = cmd.Start() - // Ignore error returned from SetProcessLabel("") call, - // can't recover. - if labelErr := label.SetProcessLabel(""); labelErr != nil { - logrus.Errorf("unable to set process label: %q", err) - } - runtime.UnlockOSThread() - return err -} - -// moveConmonToCgroupAndSignal gets a container's cgroupParent and moves the conmon process to that cgroup -// it then signals for conmon to start by sending nonse data down the start fd -func (r *OCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec.Cmd, startFd *os.File, uuid string) error { - cgroupParent := ctr.CgroupParent() - if r.cgroupManager == SystemdCgroupsManager { - unitName := createUnitName("libpod-conmon", ctr.ID()) - - realCgroupParent := cgroupParent - splitParent := strings.Split(cgroupParent, "/") - if strings.HasSuffix(cgroupParent, ".slice") && len(splitParent) > 1 { - realCgroupParent = splitParent[len(splitParent)-1] - } - - logrus.Infof("Running conmon under slice %s and unitName %s", realCgroupParent, unitName) - if err := utils.RunUnderSystemdScope(cmd.Process.Pid, realCgroupParent, unitName); err != nil { - logrus.Warnf("Failed to add conmon to systemd sandbox cgroup: %v", err) - } - } else { - cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon") - control, err := cgroups.New(cgroupPath, &spec.LinuxResources{}) - if err != nil { - logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err) - } else { - // we need to remove this defer and delete the cgroup once conmon exits - // maybe need a conmon monitor? - if err := control.AddPid(cmd.Process.Pid); err != nil { - logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err) - } - } - } - - /* We set the cgroup, now the child can start creating children */ - if err := writeConmonPipeData(startFd); err != nil { - return err - } - return nil -} - -// newPipe creates a unix socket pair for communication -func newPipe() (parent *os.File, child *os.File, err error) { - fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_SEQPACKET|unix.SOCK_CLOEXEC, 0) - if err != nil { - return nil, nil, err - } - return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil -} - -// readConmonPidFile attempts to read conmon's pid from its pid file -func readConmonPidFile(pidFile string) (int, error) { - // Let's try reading the Conmon pid at the same time. - if pidFile != "" { - contents, err := ioutil.ReadFile(pidFile) - if err != nil { - return -1, err - } - // Convert it to an int - conmonPID, err := strconv.Atoi(string(contents)) - if err != nil { - return -1, err - } - return conmonPID, nil - } - return 0, nil -} - -// readConmonPipeData attempts to read a syncInfo struct from the pipe -func readConmonPipeData(pipe *os.File, ociLog string) (int, error) { - // syncInfo is used to return data from monitor process to daemon - type syncInfo struct { - Data int `json:"data"` - Message string `json:"message,omitempty"` - } - - // Wait to get container pid from conmon - type syncStruct struct { - si *syncInfo - err error - } - ch := make(chan syncStruct) - go func() { - var si *syncInfo - rdr := bufio.NewReader(pipe) - b, err := rdr.ReadBytes('\n') - if err != nil { - ch <- syncStruct{err: err} - } - if err := json.Unmarshal(b, &si); err != nil { - ch <- syncStruct{err: err} - return - } - ch <- syncStruct{si: si} - }() - - data := -1 - select { - case ss := <-ch: - if ss.err != nil { - return -1, errors.Wrapf(ss.err, "error reading container (probably exited) json message") - } - logrus.Debugf("Received: %d", ss.si.Data) - if ss.si.Data < 0 { - if ociLog != "" { - ociLogData, err := ioutil.ReadFile(ociLog) - if err == nil { - var ociErr ociError - if err := json.Unmarshal(ociLogData, &ociErr); err == nil { - return ss.si.Data, getOCIRuntimeError(ociErr.Msg) - } - } - } - // If we failed to parse the JSON errors, then print the output as it is - if ss.si.Message != "" { - return ss.si.Data, getOCIRuntimeError(ss.si.Message) - } - return ss.si.Data, errors.Wrapf(define.ErrInternal, "container create failed") - } - data = ss.si.Data - case <-time.After(ContainerCreateTimeout): - return -1, errors.Wrapf(define.ErrInternal, "container creation timeout") - } - return data, nil -} - -func getOCIRuntimeError(runtimeMsg string) error { - if match, _ := regexp.MatchString(".*permission denied.*", runtimeMsg); match { - return errors.Wrapf(define.ErrOCIRuntimePermissionDenied, "%s", strings.Trim(runtimeMsg, "\n")) - } - if match, _ := regexp.MatchString(".*executable file not found in.*", runtimeMsg); match { - return errors.Wrapf(define.ErrOCIRuntimeNotFound, "%s", strings.Trim(runtimeMsg, "\n")) - } - return errors.Wrapf(define.ErrOCIRuntime, "%s", strings.Trim(runtimeMsg, "\n")) -} - -// writeConmonPipeData writes nonse data to a pipe -func writeConmonPipeData(pipe *os.File) error { - someData := []byte{0} - _, err := pipe.Write(someData) - return err -} - -// formatRuntimeOpts prepends opts passed to it with --runtime-opt for passing to conmon -func formatRuntimeOpts(opts ...string) []string { - args := make([]string, 0, len(opts)*2) - for _, o := range opts { - args = append(args, "--runtime-opt", o) - } - return args -} diff --git a/vendor/github.com/containers/libpod/libpod/oci_linux.go b/vendor/github.com/containers/libpod/libpod/oci_linux.go deleted file mode 100644 index 45365203e0..0000000000 --- a/vendor/github.com/containers/libpod/libpod/oci_linux.go +++ /dev/null @@ -1,526 +0,0 @@ -// +build linux - -package libpod - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "syscall" - "time" - - "github.com/containers/libpod/libpod/define" - "github.com/containers/libpod/pkg/errorhandling" - "github.com/containers/libpod/pkg/rootless" - "github.com/containers/libpod/pkg/util" - "github.com/containers/libpod/utils" - pmount "github.com/containers/storage/pkg/mount" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" - "k8s.io/client-go/tools/remotecommand" -) - -const unknownPackage = "Unknown" - -// makeAccessible changes the path permission and each parent directory to have --x--x--x -func makeAccessible(path string, uid, gid int) error { - for ; path != "/"; path = filepath.Dir(path) { - st, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - if int(st.Sys().(*syscall.Stat_t).Uid) == uid && int(st.Sys().(*syscall.Stat_t).Gid) == gid { - continue - } - if st.Mode()&0111 != 0111 { - if err := os.Chmod(path, st.Mode()|0111); err != nil { - return err - } - } - } - return nil -} - -// CreateContainer creates a container in the OCI runtime -// TODO terminal support for container -// Presently just ignoring conmon opts related to it -func (r *OCIRuntime) createContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (err error) { - if len(ctr.config.IDMappings.UIDMap) != 0 || len(ctr.config.IDMappings.GIDMap) != 0 { - for _, i := range []string{ctr.state.RunDir, ctr.runtime.config.TmpDir, ctr.config.StaticDir, ctr.state.Mountpoint, ctr.runtime.config.VolumePath} { - if err := makeAccessible(i, ctr.RootUID(), ctr.RootGID()); err != nil { - return err - } - } - - // if we are running a non privileged container, be sure to umount some kernel paths so they are not - // bind mounted inside the container at all. - if !ctr.config.Privileged && !rootless.IsRootless() { - ch := make(chan error) - go func() { - runtime.LockOSThread() - err := func() error { - fd, err := os.Open(fmt.Sprintf("/proc/%d/task/%d/ns/mnt", os.Getpid(), unix.Gettid())) - if err != nil { - return err - } - defer errorhandling.CloseQuiet(fd) - - // create a new mountns on the current thread - if err = unix.Unshare(unix.CLONE_NEWNS); err != nil { - return err - } - defer func() { - if err := unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS); err != nil { - logrus.Errorf("unable to clone new namespace: %q", err) - } - }() - - // don't spread our mounts around. We are setting only /sys to be slave - // so that the cleanup process is still able to umount the storage and the - // changes are propagated to the host. - err = unix.Mount("/sys", "/sys", "none", unix.MS_REC|unix.MS_SLAVE, "") - if err != nil { - return errors.Wrapf(err, "cannot make /sys slave") - } - - mounts, err := pmount.GetMounts() - if err != nil { - return err - } - for _, m := range mounts { - if !strings.HasPrefix(m.Mountpoint, "/sys/kernel") { - continue - } - err = unix.Unmount(m.Mountpoint, 0) - if err != nil && !os.IsNotExist(err) { - return errors.Wrapf(err, "cannot unmount %s", m.Mountpoint) - } - } - return r.createOCIContainer(ctr, restoreOptions) - }() - ch <- err - }() - err := <-ch - return err - } - } - return r.createOCIContainer(ctr, restoreOptions) -} - -func rpmVersion(path string) string { - output := unknownPackage - cmd := exec.Command("/usr/bin/rpm", "-q", "-f", path) - if outp, err := cmd.Output(); err == nil { - output = string(outp) - } - return strings.Trim(output, "\n") -} - -func dpkgVersion(path string) string { - output := unknownPackage - cmd := exec.Command("/usr/bin/dpkg", "-S", path) - if outp, err := cmd.Output(); err == nil { - output = string(outp) - } - return strings.Trim(output, "\n") -} - -func (r *OCIRuntime) pathPackage() string { - if out := rpmVersion(r.path); out != unknownPackage { - return out - } - return dpkgVersion(r.path) -} - -func (r *OCIRuntime) conmonPackage() string { - if out := rpmVersion(r.conmonPath); out != unknownPackage { - return out - } - return dpkgVersion(r.conmonPath) -} - -// execContainer executes a command in a running container -// TODO: Add --detach support -// TODO: Convert to use conmon -// TODO: add --pid-file and use that to generate exec session tracking -func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty bool, cwd, user, sessionID string, streams *AttachStreams, preserveFDs int, resize chan remotecommand.TerminalSize, detachKeys string) (int, chan error, error) { - if len(cmd) == 0 { - return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute") - } - - if sessionID == "" { - return -1, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec") - } - - // create sync pipe to receive the pid - parentSyncPipe, childSyncPipe, err := newPipe() - if err != nil { - return -1, nil, errors.Wrapf(err, "error creating socket pair") - } - - defer errorhandling.CloseQuiet(parentSyncPipe) - - // create start pipe to set the cgroup before running - // attachToExec is responsible for closing parentStartPipe - childStartPipe, parentStartPipe, err := newPipe() - if err != nil { - return -1, nil, errors.Wrapf(err, "error creating socket pair") - } - - // We want to make sure we close the parent{Start,Attach}Pipes if we fail - // but also don't want to close them after attach to exec is called - attachToExecCalled := false - - defer func() { - if !attachToExecCalled { - errorhandling.CloseQuiet(parentStartPipe) - } - }() - - // create the attach pipe to allow attach socket to be created before - // $RUNTIME exec starts running. This is to make sure we can capture all output - // from the process through that socket, rather than half reading the log, half attaching to the socket - // attachToExec is responsible for closing parentAttachPipe - parentAttachPipe, childAttachPipe, err := newPipe() - if err != nil { - return -1, nil, errors.Wrapf(err, "error creating socket pair") - } - - defer func() { - if !attachToExecCalled { - errorhandling.CloseQuiet(parentAttachPipe) - } - }() - - childrenClosed := false - defer func() { - if !childrenClosed { - errorhandling.CloseQuiet(childSyncPipe) - errorhandling.CloseQuiet(childAttachPipe) - errorhandling.CloseQuiet(childStartPipe) - } - }() - - runtimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return -1, nil, err - } - - processFile, err := prepareProcessExec(c, cmd, env, tty, cwd, user, sessionID) - if err != nil { - return -1, nil, err - } - - var ociLog string - if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON { - ociLog = c.execOCILog(sessionID) - } - args := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), ociLog) - - if preserveFDs > 0 { - args = append(args, formatRuntimeOpts("--preserve-fds", string(preserveFDs))...) - } - - for _, capability := range capAdd { - args = append(args, formatRuntimeOpts("--cap", capability)...) - } - - if tty { - args = append(args, "-t") - } - - // Append container ID and command - args = append(args, "-e") - // TODO make this optional when we can detach - args = append(args, "--exec-attach") - args = append(args, "--exec-process-spec", processFile.Name()) - - logrus.WithFields(logrus.Fields{ - "args": args, - }).Debugf("running conmon: %s", r.conmonPath) - execCmd := exec.Command(r.conmonPath, args...) - - if streams.AttachInput { - execCmd.Stdin = streams.InputStream - } - if streams.AttachOutput { - execCmd.Stdout = streams.OutputStream - } - if streams.AttachError { - execCmd.Stderr = streams.ErrorStream - } - - conmonEnv, extraFiles, err := r.configureConmonEnv(runtimeDir) - if err != nil { - return -1, nil, err - } - - // we don't want to step on users fds they asked to preserve - // Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3 - execCmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", preserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", preserveFDs+4), fmt.Sprintf("_OCI_ATTACHPIPE=%d", preserveFDs+5)) - execCmd.Env = append(execCmd.Env, conmonEnv...) - - execCmd.ExtraFiles = append(execCmd.ExtraFiles, childSyncPipe, childStartPipe, childAttachPipe) - execCmd.ExtraFiles = append(execCmd.ExtraFiles, extraFiles...) - execCmd.Dir = c.execBundlePath(sessionID) - execCmd.SysProcAttr = &syscall.SysProcAttr{ - Setpgid: true, - } - - if preserveFDs > 0 { - for fd := 3; fd < 3+preserveFDs; fd++ { - execCmd.ExtraFiles = append(execCmd.ExtraFiles, os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd))) - } - } - - err = startCommandGivenSelinux(execCmd) - - // We don't need children pipes on the parent side - errorhandling.CloseQuiet(childSyncPipe) - errorhandling.CloseQuiet(childAttachPipe) - errorhandling.CloseQuiet(childStartPipe) - childrenClosed = true - - if err != nil { - return -1, nil, errors.Wrapf(err, "cannot start container %s", c.ID()) - } - if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe, sessionID); err != nil { - return -1, nil, err - } - - if preserveFDs > 0 { - for fd := 3; fd < 3+preserveFDs; fd++ { - // These fds were passed down to the runtime. Close them - // and not interfere - if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil { - logrus.Debugf("unable to close file fd-%d", fd) - } - } - } - - // TODO Only create if !detach - // Attach to the container before starting it - attachChan := make(chan error) - go func() { - // attachToExec is responsible for closing pipes - attachChan <- c.attachToExec(streams, detachKeys, resize, sessionID, parentStartPipe, parentAttachPipe) - close(attachChan) - }() - attachToExecCalled = true - - pid, err := readConmonPipeData(parentSyncPipe, ociLog) - - return pid, attachChan, err -} - -// Wait for a container which has been sent a signal to stop -func waitContainerStop(ctr *Container, timeout time.Duration) error { - done := make(chan struct{}) - chControl := make(chan struct{}) - go func() { - for { - select { - case <-chControl: - return - default: - // Check if the process is still around - err := unix.Kill(ctr.state.PID, 0) - if err == unix.ESRCH { - close(done) - return - } - time.Sleep(100 * time.Millisecond) - } - } - }() - select { - case <-done: - return nil - case <-time.After(timeout): - close(chControl) - logrus.Debugf("container %s did not die within timeout %d", ctr.ID(), timeout) - return errors.Errorf("container %s did not die within timeout", ctr.ID()) - } -} - -// Wait for a set of given PIDs to stop -func waitPidsStop(pids []int, timeout time.Duration) error { - done := make(chan struct{}) - chControl := make(chan struct{}) - go func() { - for { - select { - case <-chControl: - return - default: - allClosed := true - for _, pid := range pids { - if err := unix.Kill(pid, 0); err != unix.ESRCH { - allClosed = false - break - } - } - if allClosed { - close(done) - return - } - time.Sleep(100 * time.Millisecond) - } - } - }() - select { - case <-done: - return nil - case <-time.After(timeout): - close(chControl) - return errors.Errorf("given PIDs did not die within timeout") - } -} - -// stopContainer stops a container, first using its given stop signal (or -// SIGTERM if no signal was specified), then using SIGKILL -// Timeout is given in seconds. If timeout is 0, the container will be -// immediately kill with SIGKILL -// Does not set finished time for container, assumes you will run updateStatus -// after to pull the exit code -func (r *OCIRuntime) stopContainer(ctr *Container, timeout uint) error { - logrus.Debugf("Stopping container %s (PID %d)", ctr.ID(), ctr.state.PID) - - // Ping the container to see if it's alive - // If it's not, it's already stopped, return - err := unix.Kill(ctr.state.PID, 0) - if err == unix.ESRCH { - return nil - } - - stopSignal := ctr.config.StopSignal - if stopSignal == 0 { - stopSignal = uint(syscall.SIGTERM) - } - - if timeout > 0 { - if err := r.killContainer(ctr, stopSignal); err != nil { - // Is the container gone? - // If so, it probably died between the first check and - // our sending the signal - // The container is stopped, so exit cleanly - err := unix.Kill(ctr.state.PID, 0) - if err == unix.ESRCH { - return nil - } - - return err - } - - if err := waitContainerStop(ctr, time.Duration(timeout)*time.Second); err != nil { - logrus.Warnf("Timed out stopping container %s, resorting to SIGKILL", ctr.ID()) - } else { - // No error, the container is dead - return nil - } - } - - var args []string - if rootless.IsRootless() { - // we don't use --all for rootless containers as the OCI runtime might use - // the cgroups to determine the PIDs, but for rootless containers there is - // not any. - args = []string{"kill", ctr.ID(), "KILL"} - } else { - args = []string{"kill", "--all", ctr.ID(), "KILL"} - } - - runtimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return err - } - env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} - if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, args...); err != nil { - // Again, check if the container is gone. If it is, exit cleanly. - err := unix.Kill(ctr.state.PID, 0) - if err == unix.ESRCH { - return nil - } - - return errors.Wrapf(err, "error sending SIGKILL to container %s", ctr.ID()) - } - - // Give runtime a few seconds to make it happen - if err := waitContainerStop(ctr, killContainerTimeout); err != nil { - return err - } - - return nil -} - -// execStopContainer stops all active exec sessions in a container -// It will also stop all other processes in the container. It is only intended -// to be used to assist in cleanup when removing a container. -// SIGTERM is used by default to stop processes. If SIGTERM fails, SIGKILL will be used. -func (r *OCIRuntime) execStopContainer(ctr *Container, timeout uint) error { - // Do we have active exec sessions? - if len(ctr.state.ExecSessions) == 0 { - return nil - } - - // Get a list of active exec sessions - execSessions := []int{} - for _, session := range ctr.state.ExecSessions { - pid := session.PID - // Ping the PID with signal 0 to see if it still exists - if err := unix.Kill(pid, 0); err == unix.ESRCH { - continue - } - - execSessions = append(execSessions, pid) - } - - // All the sessions may be dead - // If they are, just return - if len(execSessions) == 0 { - return nil - } - runtimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return err - } - env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} - - // If timeout is 0, just use SIGKILL - if timeout > 0 { - // Stop using SIGTERM by default - // Use SIGSTOP after a timeout - logrus.Debugf("Killing all processes in container %s with SIGTERM", ctr.ID()) - if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "kill", "--all", ctr.ID(), "TERM"); err != nil { - return errors.Wrapf(err, "error sending SIGTERM to container %s processes", ctr.ID()) - } - - // Wait for all processes to stop - if err := waitPidsStop(execSessions, time.Duration(timeout)*time.Second); err != nil { - logrus.Warnf("Timed out stopping container %s exec sessions", ctr.ID()) - } else { - // No error, all exec sessions are dead - return nil - } - } - - // Send SIGKILL - logrus.Debugf("Killing all processes in container %s with SIGKILL", ctr.ID()) - if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "kill", "--all", ctr.ID(), "KILL"); err != nil { - return errors.Wrapf(err, "error sending SIGKILL to container %s processes", ctr.ID()) - } - - // Give the processes a few seconds to go down - if err := waitPidsStop(execSessions, killContainerTimeout); err != nil { - return errors.Wrapf(err, "failed to kill container %s exec sessions", ctr.ID()) - } - - return nil -} diff --git a/vendor/github.com/containers/libpod/libpod/oci_missing.go b/vendor/github.com/containers/libpod/libpod/oci_missing.go new file mode 100644 index 0000000000..d4524cd349 --- /dev/null +++ b/vendor/github.com/containers/libpod/libpod/oci_missing.go @@ -0,0 +1,189 @@ +package libpod + +import ( + "fmt" + "path/filepath" + "sync" + + "github.com/containers/libpod/libpod/define" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + // Only create each missing runtime once. + // Creation makes error messages we don't want to duplicate. + missingRuntimes map[string]*MissingRuntime + // We need a lock for this + missingRuntimesLock sync.Mutex +) + +// MissingRuntime is used when the OCI runtime requested by the container is +// missing (not installed or not in the configuration file). +type MissingRuntime struct { + // Name is the name of the missing runtime. Will be used in errors. + name string + // exitsDir is the directory for exit files. + exitsDir string +} + +// Get a new MissingRuntime for the given name. +// Requires a libpod Runtime so we can make a sane path for the exits dir. +func getMissingRuntime(name string, r *Runtime) (OCIRuntime, error) { + missingRuntimesLock.Lock() + defer missingRuntimesLock.Unlock() + + if missingRuntimes == nil { + missingRuntimes = make(map[string]*MissingRuntime) + } + + runtime, ok := missingRuntimes[name] + if ok { + return runtime, nil + } + + // Once for each missing runtime, we want to error. + logrus.Errorf("OCI Runtime %s is in use by a container, but is not available (not in configuration file or not installed)", name) + + newRuntime := new(MissingRuntime) + newRuntime.name = name + newRuntime.exitsDir = filepath.Join(r.config.TmpDir, "exits") + + missingRuntimes[name] = newRuntime + + return newRuntime, nil +} + +// Name is the name of the missing runtime +func (r *MissingRuntime) Name() string { + return fmt.Sprintf("%s (missing/not available)", r.name) +} + +// Path is not available as the runtime is missing +func (r *MissingRuntime) Path() string { + return "(missing/not available)" +} + +// CreateContainer is not available as the runtime is missing +func (r *MissingRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error { + return r.printError() +} + +// UpdateContainerStatus is not available as the runtime is missing +func (r *MissingRuntime) UpdateContainerStatus(ctr *Container) error { + return r.printError() +} + +// StartContainer is not available as the runtime is missing +func (r *MissingRuntime) StartContainer(ctr *Container) error { + return r.printError() +} + +// KillContainer is not available as the runtime is missing +// TODO: We could attempt to unix.Kill() the PID as recorded in the state if we +// really want to smooth things out? Won't be perfect, but if the container has +// a PID namespace it could be enough? +func (r *MissingRuntime) KillContainer(ctr *Container, signal uint, all bool) error { + return r.printError() +} + +// StopContainer is not available as the runtime is missing +func (r *MissingRuntime) StopContainer(ctr *Container, timeout uint, all bool) error { + return r.printError() +} + +// DeleteContainer is not available as the runtime is missing +func (r *MissingRuntime) DeleteContainer(ctr *Container) error { + return r.printError() +} + +// PauseContainer is not available as the runtime is missing +func (r *MissingRuntime) PauseContainer(ctr *Container) error { + return r.printError() +} + +// UnpauseContainer is not available as the runtime is missing +func (r *MissingRuntime) UnpauseContainer(ctr *Container) error { + return r.printError() +} + +// ExecContainer is not available as the runtime is missing +func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options *ExecOptions) (int, chan error, error) { + return -1, nil, r.printError() +} + +// ExecStopContainer is not available as the runtime is missing. +// TODO: We can also investigate using unix.Kill() on the PID of the exec +// session here if we want to make stopping containers possible. Won't be +// perfect, though. +func (r *MissingRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error { + return r.printError() +} + +// ExecContainerCleanup is not available as the runtime is missing +func (r *MissingRuntime) ExecContainerCleanup(ctr *Container, sessionID string) error { + return r.printError() +} + +// CheckpointContainer is not available as the runtime is missing +func (r *MissingRuntime) CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) error { + return r.printError() +} + +// SupportsCheckpoint returns false as checkpointing requires a working runtime +func (r *MissingRuntime) SupportsCheckpoint() bool { + return false +} + +// SupportsJSONErrors returns false as there is no runtime to give errors +func (r *MissingRuntime) SupportsJSONErrors() bool { + return false +} + +// SupportsNoCgroups returns false as there is no runtime to create containers +func (r *MissingRuntime) SupportsNoCgroups() bool { + return false +} + +// AttachSocketPath does not work as there is no runtime to attach to. +// (Theoretically we could follow ExitFilePath but there is no guarantee the +// container is running and thus has an attach socket...) +func (r *MissingRuntime) AttachSocketPath(ctr *Container) (string, error) { + return "", r.printError() +} + +// ExecAttachSocketPath does not work as there is no runtime to attach to. +// (Again, we could follow ExitFilePath, but no guarantee there is an existing +// and running exec session) +func (r *MissingRuntime) ExecAttachSocketPath(ctr *Container, sessionID string) (string, error) { + return "", r.printError() +} + +// ExitFilePath returns the exit file path for containers. +// Here, we mimic what ConmonOCIRuntime does, because there is a chance that the +// container in question is still running happily (config file modified to +// remove a runtime, for example). We can't find the runtime to do anything to +// the container, but Conmon should still place an exit file for it. +func (r *MissingRuntime) ExitFilePath(ctr *Container) (string, error) { + if ctr == nil { + return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get exit file path") + } + return filepath.Join(r.exitsDir, ctr.ID()), nil +} + +// RuntimeInfo returns information on the missing runtime +func (r *MissingRuntime) RuntimeInfo() (map[string]interface{}, error) { + info := make(map[string]interface{}) + info["OCIRuntime"] = map[string]interface{}{ + "name": r.name, + "path": "missing", + "package": "missing", + "version": "missing", + } + return info, nil +} + +// Return an error indicating the runtime is missing +func (r *MissingRuntime) printError() error { + return errors.Wrapf(define.ErrOCIRuntimeNotFound, "runtime %s is missing", r.name) +} diff --git a/vendor/github.com/containers/libpod/libpod/oci_unsupported.go b/vendor/github.com/containers/libpod/libpod/oci_unsupported.go deleted file mode 100644 index 4a65d4d1df..0000000000 --- a/vendor/github.com/containers/libpod/libpod/oci_unsupported.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build !linux - -package libpod - -import ( - "os" - "os/exec" - - "github.com/containers/libpod/libpod/define" - "k8s.io/client-go/tools/remotecommand" -) - -func (r *OCIRuntime) moveConmonToCgroup(ctr *Container, cgroupParent string, cmd *exec.Cmd) error { - return define.ErrOSNotSupported -} - -func newPipe() (parent *os.File, child *os.File, err error) { - return nil, nil, define.ErrNotImplemented -} - -func (r *OCIRuntime) createContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (err error) { - return define.ErrNotImplemented -} - -func (r *OCIRuntime) pathPackage() string { - return "" -} - -func (r *OCIRuntime) conmonPackage() string { - return "" -} - -func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) { - return define.ErrOSNotSupported -} - -func (r *OCIRuntime) execStopContainer(ctr *Container, timeout uint) error { - return define.ErrOSNotSupported -} - -func (r *OCIRuntime) stopContainer(ctr *Container, timeout uint) error { - return define.ErrOSNotSupported -} - -func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty bool, cwd, user, sessionID string, streams *AttachStreams, preserveFDs int, resize chan remotecommand.TerminalSize, detachKeys string) (int, chan error, error) { - return -1, nil, define.ErrOSNotSupported -} diff --git a/vendor/github.com/containers/libpod/libpod/oci_util.go b/vendor/github.com/containers/libpod/libpod/oci_util.go new file mode 100644 index 0000000000..c1a7f1c9a3 --- /dev/null +++ b/vendor/github.com/containers/libpod/libpod/oci_util.go @@ -0,0 +1,93 @@ +package libpod + +import ( + "fmt" + "net" + "os" + "regexp" + "strings" + "time" + + "github.com/containers/libpod/libpod/define" + "github.com/cri-o/ocicni/pkg/ocicni" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Timeout before declaring that runtime has failed to kill a given +// container +const killContainerTimeout = 5 * time.Second + +// ociError is used to parse the OCI runtime JSON log. It is not part of the +// OCI runtime specifications, it follows what runc does +type ociError struct { + Level string `json:"level,omitempty"` + Time string `json:"time,omitempty"` + Msg string `json:"msg,omitempty"` +} + +// Create systemd unit name for cgroup scopes +func createUnitName(prefix string, name string) string { + return fmt.Sprintf("%s-%s.scope", prefix, name) +} + +// Bind ports to keep them closed on the host +func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) { + var files []*os.File + notifySCTP := false + for _, i := range ports { + switch i.Protocol { + case "udp": + addr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort)) + if err != nil { + return nil, errors.Wrapf(err, "cannot resolve the UDP address") + } + + server, err := net.ListenUDP("udp", addr) + if err != nil { + return nil, errors.Wrapf(err, "cannot listen on the UDP port") + } + f, err := server.File() + if err != nil { + return nil, errors.Wrapf(err, "cannot get file for UDP socket") + } + files = append(files, f) + + case "tcp": + addr, err := net.ResolveTCPAddr("tcp4", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort)) + if err != nil { + return nil, errors.Wrapf(err, "cannot resolve the TCP address") + } + + server, err := net.ListenTCP("tcp4", addr) + if err != nil { + return nil, errors.Wrapf(err, "cannot listen on the TCP port") + } + f, err := server.File() + if err != nil { + return nil, errors.Wrapf(err, "cannot get file for TCP socket") + } + files = append(files, f) + case "sctp": + if !notifySCTP { + notifySCTP = true + logrus.Warnf("port reservation for SCTP is not supported") + } + default: + return nil, fmt.Errorf("unknown protocol %s", i.Protocol) + + } + } + return files, nil +} + +func getOCIRuntimeError(runtimeMsg string) error { + r := strings.ToLower(runtimeMsg) + if match, _ := regexp.MatchString(".*permission denied.*|.*operation not permitted.*", r); match { + return errors.Wrapf(define.ErrOCIRuntimePermissionDenied, "%s", strings.Trim(runtimeMsg, "\n")) + } + if match, _ := regexp.MatchString(".*executable file not found in.*|.*no such file or directory.*", r); match { + return errors.Wrapf(define.ErrOCIRuntimeNotFound, "%s", strings.Trim(runtimeMsg, "\n")) + } + return errors.Wrapf(define.ErrOCIRuntime, "%s", strings.Trim(runtimeMsg, "\n")) +} diff --git a/vendor/github.com/containers/libpod/libpod/options.go b/vendor/github.com/containers/libpod/libpod/options.go index 7fbd0016aa..66e8ef93cd 100644 --- a/vendor/github.com/containers/libpod/libpod/options.go +++ b/vendor/github.com/containers/libpod/libpod/options.go @@ -7,7 +7,7 @@ import ( "regexp" "syscall" - "github.com/containers/image/manifest" + "github.com/containers/image/v5/manifest" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/namespaces" @@ -20,8 +20,8 @@ import ( ) var ( - nameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") - regexError = errors.Wrapf(define.ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*") + NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") + RegexError = errors.Wrapf(define.ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*") ) // Runtime Creation Options @@ -39,30 +39,30 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption { if config.RunRoot != "" { rt.config.StorageConfig.RunRoot = config.RunRoot - rt.configuredFrom.storageRunRootSet = true + rt.config.StorageConfigRunRootSet = true setField = true } if config.GraphRoot != "" { rt.config.StorageConfig.GraphRoot = config.GraphRoot - rt.configuredFrom.storageGraphRootSet = true + rt.config.StorageConfigGraphRootSet = true // Also set libpod static dir, so we are a subdirectory // of the c/storage store by default rt.config.StaticDir = filepath.Join(config.GraphRoot, "libpod") - rt.configuredFrom.libpodStaticDirSet = true + rt.config.StaticDirSet = true // Also set libpod volume path, so we are a subdirectory // of the c/storage store by default rt.config.VolumePath = filepath.Join(config.GraphRoot, "volumes") - rt.configuredFrom.volPathSet = true + rt.config.VolumePathSet = true setField = true } if config.GraphDriverName != "" { rt.config.StorageConfig.GraphDriverName = config.GraphDriverName - rt.configuredFrom.storageGraphDriverSet = true + rt.config.StorageConfigGraphDriverNameSet = true setField = true } @@ -135,13 +135,13 @@ func WithSignaturePolicy(path string) RuntimeOption { // Please note that information is not portable between backing states. // As such, if this differs between two libpods running on the same system, // they will not share containers, and unspecified behavior may occur. -func WithStateType(storeType RuntimeStateStore) RuntimeOption { +func WithStateType(storeType define.RuntimeStateStore) RuntimeOption { return func(rt *Runtime) error { if rt.valid { return define.ErrRuntimeFinalized } - if storeType == InvalidStateStore { + if storeType == define.InvalidStateStore { return errors.Wrapf(define.ErrInvalidArg, "must provide a valid state store type") } @@ -224,9 +224,9 @@ func WithCgroupManager(manager string) RuntimeOption { return define.ErrRuntimeFinalized } - if manager != CgroupfsCgroupsManager && manager != SystemdCgroupsManager { + if manager != define.CgroupfsCgroupsManager && manager != define.SystemdCgroupsManager { return errors.Wrapf(define.ErrInvalidArg, "CGroup manager must be one of %s and %s", - CgroupfsCgroupsManager, SystemdCgroupsManager) + define.CgroupfsCgroupsManager, define.SystemdCgroupsManager) } rt.config.CgroupManager = manager @@ -244,7 +244,7 @@ func WithStaticDir(dir string) RuntimeOption { } rt.config.StaticDir = dir - rt.configuredFrom.libpodStaticDirSet = true + rt.config.StaticDirSet = true return nil } @@ -295,7 +295,7 @@ func WithTmpDir(dir string) RuntimeOption { return define.ErrRuntimeFinalized } rt.config.TmpDir = dir - rt.configuredFrom.libpodTmpDirSet = true + rt.config.TmpDirSet = true return nil } @@ -395,7 +395,7 @@ func WithVolumePath(volPath string) RuntimeOption { } rt.config.VolumePath = volPath - rt.configuredFrom.volPathSet = true + rt.config.VolumePathSet = true return nil } @@ -463,6 +463,28 @@ func WithMigrate() RuntimeOption { } } +// WithMigrateRuntime instructs Libpod to change the default OCI runtime on all +// containers during a migration. This is not used if `MigrateRuntime()` is not +// also passed. +// Libpod makes no promises that your containers continue to work with the new +// runtime - migrations between dissimilar runtimes may well break things. +// Use with caution. +func WithMigrateRuntime(requestedRuntime string) RuntimeOption { + return func(rt *Runtime) error { + if rt.valid { + return define.ErrRuntimeFinalized + } + + if requestedRuntime == "" { + return errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty name for new runtime") + } + + rt.migrateRuntime = requestedRuntime + + return nil + } +} + // WithEventsLogger sets the events backend to use. // Currently supported values are "file" for file backend and "journald" for // journald backend. @@ -482,6 +504,15 @@ func WithEventsLogger(logger string) RuntimeOption { } } +// WithEnableSDNotify sets a runtime option so we know whether to disable socket/FD +// listening +func WithEnableSDNotify() RuntimeOption { + return func(rt *Runtime) error { + rt.config.SDNotify = true + return nil + } +} + // Container Creation Options // WithShmDir sets the directory that should be mounted on /dev/shm. @@ -639,8 +670,8 @@ func WithName(name string) CtrCreateOption { } // Check the name against a regex - if !nameRegex.MatchString(name) { - return regexError + if !NameRegex.MatchString(name) { + return RegexError } ctr.config.Name = name @@ -838,6 +869,10 @@ func WithPIDNSFrom(nsCtr *Container) CtrCreateOption { return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) } + if ctr.config.NoCgroups { + return errors.Wrapf(define.ErrInvalidArg, "container has disabled creation of CGroups, which is incompatible with sharing a PID namespace") + } + ctr.config.PIDNsCtr = nsCtr.ID() return nil @@ -979,6 +1014,13 @@ func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, netmo ctr.config.NetMode = namespaces.NetworkMode(netmode) ctr.config.CreateNetNS = true ctr.config.PortMappings = portMappings + + if rootless.IsRootless() { + if len(networks) > 0 { + return errors.New("cannot use CNI networks with rootless containers") + } + } + ctr.config.Networks = networks return nil @@ -1047,6 +1089,27 @@ func WithLogPath(path string) CtrCreateOption { } } +// WithNoCgroups disables the creation of CGroups for the new container. +func WithNoCgroups() CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + + if ctr.config.CgroupParent != "" { + return errors.Wrapf(define.ErrInvalidArg, "NoCgroups conflicts with CgroupParent") + } + + if ctr.config.PIDNsCtr != "" { + return errors.Wrapf(define.ErrInvalidArg, "NoCgroups requires a private PID namespace and cannot be used when PID namespace is shared with another container") + } + + ctr.config.NoCgroups = true + + return nil + } +} + // WithCgroupParent sets the Cgroup Parent of the new container. func WithCgroupParent(parent string) CtrCreateOption { return func(ctr *Container) error { @@ -1058,6 +1121,10 @@ func WithCgroupParent(parent string) CtrCreateOption { return errors.Wrapf(define.ErrInvalidArg, "cgroup parent cannot be empty") } + if ctr.config.NoCgroups { + return errors.Wrapf(define.ErrInvalidArg, "CgroupParent conflicts with NoCgroups") + } + ctr.config.CgroupParent = parent return nil @@ -1351,10 +1418,15 @@ func WithNamedVolumes(volumes []*ContainerNamedVolume) CtrCreateOption { } destinations[vol.Dest] = true + mountOpts, err := util.ProcessOptions(vol.Options, false, nil) + if err != nil { + return errors.Wrapf(err, "error processing options for named volume %q mounted at %q", vol.Name, vol.Dest) + } + ctr.config.NamedVolumes = append(ctr.config.NamedVolumes, &ContainerNamedVolume{ Name: vol.Name, Dest: vol.Dest, - Options: util.ProcessOptions(vol.Options), + Options: mountOpts, }) } @@ -1362,6 +1434,17 @@ func WithNamedVolumes(volumes []*ContainerNamedVolume) CtrCreateOption { } } +// WithHealthCheck adds the healthcheck to the container config +func WithHealthCheck(healthCheck *manifest.Schema2HealthConfig) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + ctr.config.HealthCheckConfig = healthCheck + return nil + } +} + // Volume Creation Options // WithVolumeName sets the name of the volume. @@ -1372,8 +1455,8 @@ func WithVolumeName(name string) VolumeCreateOption { } // Check the name against a regex - if !nameRegex.MatchString(name) { - return regexError + if !NameRegex.MatchString(name) { + return RegexError } volume.config.Name = name @@ -1381,36 +1464,38 @@ func WithVolumeName(name string) VolumeCreateOption { } } -// WithVolumeLabels sets the labels of the volume. -func WithVolumeLabels(labels map[string]string) VolumeCreateOption { +// WithVolumeDriver sets the volume's driver. +// It is presently not implemented, but will be supported in a future Podman +// release. +func WithVolumeDriver(driver string) VolumeCreateOption { return func(volume *Volume) error { if volume.valid { return define.ErrVolumeFinalized } - volume.config.Labels = make(map[string]string) - for key, value := range labels { - volume.config.Labels[key] = value - } - - return nil + return define.ErrNotImplemented } } -// WithVolumeDriver sets the driver of the volume. -func WithVolumeDriver(driver string) VolumeCreateOption { +// WithVolumeLabels sets the labels of the volume. +func WithVolumeLabels(labels map[string]string) VolumeCreateOption { return func(volume *Volume) error { if volume.valid { return define.ErrVolumeFinalized } - volume.config.Driver = driver + volume.config.Labels = make(map[string]string) + for key, value := range labels { + volume.config.Labels[key] = value + } return nil } } // WithVolumeOptions sets the options of the volume. +// If the "local" driver has been selected, options will be validated. There are +// currently 3 valid options for the "local" driver - o, type, and device. func WithVolumeOptions(options map[string]string) VolumeCreateOption { return func(volume *Volume) error { if volume.valid { @@ -1419,6 +1504,13 @@ func WithVolumeOptions(options map[string]string) VolumeCreateOption { volume.config.Options = make(map[string]string) for key, value := range options { + switch key { + case "type", "device", "o": + volume.config.Options[key] = value + default: + return errors.Wrapf(define.ErrInvalidArg, "unrecognized volume option %q is not supported with local driver", key) + } + volume.config.Options[key] = value } @@ -1478,8 +1570,8 @@ func WithPodName(name string) PodCreateOption { } // Check the name against a regex - if !nameRegex.MatchString(name) { - return regexError + if !NameRegex.MatchString(name) { + return RegexError } pod.config.Name = name @@ -1488,6 +1580,24 @@ func WithPodName(name string) PodCreateOption { } } +// WithPodHostname sets the hostname of the pod. +func WithPodHostname(hostname string) PodCreateOption { + return func(pod *Pod) error { + if pod.valid { + return define.ErrPodFinalized + } + + // Check the hostname against a regex + if !NameRegex.MatchString(hostname) { + return RegexError + } + + pod.config.Hostname = hostname + + return nil + } +} + // WithPodLabels sets the labels of a pod. func WithPodLabels(labels map[string]string) PodCreateOption { return func(pod *Pod) error { @@ -1673,14 +1783,3 @@ func WithInfraContainerPorts(bindings []ocicni.PortMapping) PodCreateOption { return nil } } - -// WithHealthCheck adds the healthcheck to the container config -func WithHealthCheck(healthCheck *manifest.Schema2HealthConfig) CtrCreateOption { - return func(ctr *Container) error { - if ctr.valid { - return define.ErrCtrFinalized - } - ctr.config.HealthCheckConfig = healthCheck - return nil - } -} diff --git a/vendor/github.com/containers/libpod/libpod/pod.go b/vendor/github.com/containers/libpod/libpod/pod.go index 60626bfd74..3b9bb9c600 100644 --- a/vendor/github.com/containers/libpod/libpod/pod.go +++ b/vendor/github.com/containers/libpod/libpod/pod.go @@ -36,6 +36,8 @@ type PodConfig struct { // Namespace the pod is in Namespace string `json:"namespace,omitempty"` + Hostname string `json:"hostname,omitempty"` + // Labels contains labels applied to the pod Labels map[string]string `json:"labels"` // CgroupParent contains the pod's CGroup parent diff --git a/vendor/github.com/containers/libpod/libpod/pod_api.go b/vendor/github.com/containers/libpod/libpod/pod_api.go index c7b0353bdc..3a194f04b2 100644 --- a/vendor/github.com/containers/libpod/libpod/pod_api.go +++ b/vendor/github.com/containers/libpod/libpod/pod_api.go @@ -5,6 +5,8 @@ import ( "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" + "github.com/containers/libpod/pkg/cgroups" + "github.com/containers/libpod/pkg/rootless" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -37,7 +39,7 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) { } // Build a dependency graph of containers in the pod - graph, err := buildContainerGraph(allCtrs) + graph, err := BuildContainerGraph(allCtrs) if err != nil { return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID()) } @@ -121,7 +123,7 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m if timeout > -1 { stopTimeout = uint(timeout) } - if err := ctr.stop(stopTimeout); err != nil { + if err := ctr.stop(stopTimeout, false); err != nil { ctr.lock.Unlock() ctrErrors[ctr.ID()] = err continue @@ -163,6 +165,16 @@ func (p *Pod) Pause() (map[string]error, error) { return nil, define.ErrPodRemoved } + if rootless.IsRootless() { + cgroupv2, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return nil, errors.Wrap(err, "failed to determine cgroupversion") + } + if !cgroupv2 { + return nil, errors.Wrap(define.ErrNoCgroups, "can not pause pods containing rootless containers with cgroup V1") + } + } + allCtrs, err := p.runtime.state.PodContainers(p) if err != nil { return nil, err @@ -289,7 +301,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) { } // Build a dependency graph of containers in the pod - graph, err := buildContainerGraph(allCtrs) + graph, err := BuildContainerGraph(allCtrs) if err != nil { return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID()) } @@ -358,7 +370,7 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) { continue } - if err := ctr.ociRuntime.killContainer(ctr, signal); err != nil { + if err := ctr.ociRuntime.KillContainer(ctr, signal, false); err != nil { ctr.lock.Unlock() ctrErrors[ctr.ID()] = err continue diff --git a/vendor/github.com/containers/libpod/libpod/pod_internal.go b/vendor/github.com/containers/libpod/libpod/pod_internal.go index 7aacda4827..b5895d133c 100644 --- a/vendor/github.com/containers/libpod/libpod/pod_internal.go +++ b/vendor/github.com/containers/libpod/libpod/pod_internal.go @@ -66,13 +66,13 @@ func (p *Pod) refresh() error { // We need to recreate the pod's cgroup if p.config.UsePodCgroup { switch p.runtime.config.CgroupManager { - case SystemdCgroupsManager: + case define.SystemdCgroupsManager: cgroupPath, err := systemdSliceFromPath(p.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", p.ID())) if err != nil { logrus.Errorf("Error creating CGroup for pod %s: %v", p.ID(), err) } p.state.CgroupPath = cgroupPath - case CgroupfsCgroupsManager: + case define.CgroupfsCgroupsManager: p.state.CgroupPath = filepath.Join(p.config.CgroupParent, p.ID()) logrus.Debugf("setting pod cgroup to %s", p.state.CgroupPath) diff --git a/vendor/github.com/containers/libpod/libpod/runtime.go b/vendor/github.com/containers/libpod/libpod/runtime.go index 8a4eee0817..42e6782e96 100644 --- a/vendor/github.com/containers/libpod/libpod/runtime.go +++ b/vendor/github.com/containers/libpod/libpod/runtime.go @@ -1,29 +1,21 @@ package libpod import ( - "bytes" "context" "fmt" - "io/ioutil" "os" - "os/exec" - "os/user" "path/filepath" - "regexp" - "strconv" "strings" "sync" "syscall" - "time" - "github.com/BurntSushi/toml" - is "github.com/containers/image/storage" - "github.com/containers/image/types" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/types" + "github.com/containers/libpod/libpod/config" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/libpod/image" "github.com/containers/libpod/libpod/lock" - "github.com/containers/libpod/pkg/firewall" sysreg "github.com/containers/libpod/pkg/registries" "github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/util" @@ -34,83 +26,24 @@ import ( "github.com/sirupsen/logrus" ) -// RuntimeStateStore is a constant indicating which state store implementation -// should be used by libpod -type RuntimeStateStore int - -const ( - // InvalidStateStore is an invalid state store - InvalidStateStore RuntimeStateStore = iota - // InMemoryStateStore is an in-memory state that will not persist data - // on containers and pods between libpod instances or after system - // reboot - InMemoryStateStore RuntimeStateStore = iota - // SQLiteStateStore is a state backed by a SQLite database - // It is presently disabled - SQLiteStateStore RuntimeStateStore = iota - // BoltDBStateStore is a state backed by a BoltDB database - BoltDBStateStore RuntimeStateStore = iota -) - -var ( - // InstallPrefix is the prefix where podman will be installed. - // It can be overridden at build time. - installPrefix = "/usr" - // EtcDir is the sysconfdir where podman should look for system config files. - // It can be overridden at build time. - etcDir = "/etc" - - // SeccompDefaultPath defines the default seccomp path - SeccompDefaultPath = installPrefix + "/share/containers/seccomp.json" - // SeccompOverridePath if this exists it overrides the default seccomp path - SeccompOverridePath = etcDir + "/crio/seccomp.json" - - // ConfigPath is the path to the libpod configuration file - // This file is loaded to replace the builtin default config before - // runtime options (e.g. WithStorageConfig) are applied. - // If it is not present, the builtin default config is used instead - // This path can be overridden when the runtime is created by using - // NewRuntimeFromConfig() instead of NewRuntime() - ConfigPath = installPrefix + "/share/containers/libpod.conf" - // OverrideConfigPath is the path to an override for the default libpod - // configuration file. If OverrideConfigPath exists, it will be used in - // place of the configuration file pointed to by ConfigPath. - OverrideConfigPath = etcDir + "/containers/libpod.conf" - - // DefaultInfraImage to use for infra container - - // DefaultInfraCommand to be run in an infra container - - // DefaultSHMLockPath is the default path for SHM locks - DefaultSHMLockPath = "/libpod_lock" - // DefaultRootlessSHMLockPath is the default path for rootless SHM locks - DefaultRootlessSHMLockPath = "/libpod_rootless_lock" - - // DefaultDetachKeys is the default keys sequence for detaching a - // container - DefaultDetachKeys = "ctrl-p,ctrl-q" -) - // A RuntimeOption is a functional option which alters the Runtime created by // NewRuntime type RuntimeOption func(*Runtime) error // Runtime is the core libpod runtime type Runtime struct { - config *RuntimeConfig + config *config.Config state State store storage.Store storageService *storageService imageContext *types.SystemContext - defaultOCIRuntime *OCIRuntime - ociRuntimes map[string]*OCIRuntime + defaultOCIRuntime OCIRuntime + ociRuntimes map[string]OCIRuntime netPlugin ocicni.CNIPlugin conmonPath string imageRuntime *image.Runtime - firewallBackend firewall.FirewallBackend lockManager lock.Manager - configuredFrom *runtimeConfiguredFrom // doRenumber indicates that the runtime should perform a lock renumber // during initialization. @@ -119,6 +52,10 @@ type Runtime struct { doRenumber bool doMigrate bool + // System migrate can move containers to a new runtime. + // We make no promises that these migrated containers work on the new + // runtime, though. + migrateRuntime string // valid indicates whether the runtime is ready to use. // valid is set to true when a runtime is returned from GetRuntime(), @@ -134,212 +71,9 @@ type Runtime struct { noStore bool } -// RuntimeConfig contains configuration options used to set up the runtime -type RuntimeConfig struct { - // StorageConfig is the configuration used by containers/storage - // Not included in on-disk config, use the dedicated containers/storage - // configuration file instead - StorageConfig storage.StoreOptions `toml:"-"` - // VolumePath is the default location that named volumes will be created - // under. This convention is followed by the default volume driver, but - // may not be by other drivers. - VolumePath string `toml:"volume_path"` - // ImageDefaultTransport is the default transport method used to fetch - // images - ImageDefaultTransport string `toml:"image_default_transport"` - // SignaturePolicyPath is the path to a signature policy to use for - // validating images - // If left empty, the containers/image default signature policy will - // be used - SignaturePolicyPath string `toml:"signature_policy_path,omitempty"` - // StateType is the type of the backing state store. - // Avoid using multiple values for this with the same containers/storage - // configuration on the same system. Different state types do not - // interact, and each will see a separate set of containers, which may - // cause conflicts in containers/storage - // As such this is not exposed via the config file - StateType RuntimeStateStore `toml:"-"` - // OCIRuntime is the OCI runtime to use. - OCIRuntime string `toml:"runtime"` - // OCIRuntimes are the set of configured OCI runtimes (default is runc) - OCIRuntimes map[string][]string `toml:"runtimes"` - // RuntimeSupportsJSON is the list of the OCI runtimes that support --format=json - RuntimeSupportsJSON []string `toml:"runtime_supports_json"` - // RuntimePath is the path to OCI runtime binary for launching - // containers. - // The first path pointing to a valid file will be used - // This is used only when there are no OCIRuntime/OCIRuntimes defined. It - // is used only to be backward compatible with older versions of Podman. - RuntimePath []string `toml:"runtime_path"` - // ConmonPath is the path to the Conmon binary used for managing - // containers - // The first path pointing to a valid file will be used - ConmonPath []string `toml:"conmon_path"` - // ConmonEnvVars are environment variables to pass to the Conmon binary - // when it is launched - ConmonEnvVars []string `toml:"conmon_env_vars"` - // CGroupManager is the CGroup Manager to use - // Valid values are "cgroupfs" and "systemd" - CgroupManager string `toml:"cgroup_manager"` - // InitPath is the path to the container-init binary. - InitPath string `toml:"init_path"` - // StaticDir is the path to a persistent directory to store container - // files - StaticDir string `toml:"static_dir"` - // TmpDir is the path to a temporary directory to store per-boot - // container files - // Must be stored in a tmpfs - TmpDir string `toml:"tmp_dir"` - // MaxLogSize is the maximum size of container logfiles - MaxLogSize int64 `toml:"max_log_size,omitempty"` - // NoPivotRoot sets whether to set no-pivot-root in the OCI runtime - NoPivotRoot bool `toml:"no_pivot_root"` - // CNIConfigDir sets the directory where CNI configuration files are - // stored - CNIConfigDir string `toml:"cni_config_dir"` - // CNIPluginDir sets a number of directories where the CNI network - // plugins can be located - CNIPluginDir []string `toml:"cni_plugin_dir"` - // CNIDefaultNetwork is the network name of the default CNI network - // to attach pods to - CNIDefaultNetwork string `toml:"cni_default_network,omitempty"` - // HooksDir holds paths to the directories containing hooks - // configuration files. When the same filename is present in in - // multiple directories, the file in the directory listed last in - // this slice takes precedence. - HooksDir []string `toml:"hooks_dir"` - // DefaultMountsFile is the path to the default mounts file for testing - // purposes only - DefaultMountsFile string `toml:"-"` - // Namespace is the libpod namespace to use. - // Namespaces are used to create scopes to separate containers and pods - // in the state. - // When namespace is set, libpod will only view containers and pods in - // the same namespace. All containers and pods created will default to - // the namespace set here. - // A namespace of "", the empty string, is equivalent to no namespace, - // and all containers and pods will be visible. - // The default namespace is "". - Namespace string `toml:"namespace,omitempty"` - - // InfraImage is the image a pod infra container will use to manage namespaces - InfraImage string `toml:"infra_image"` - // InfraCommand is the command run to start up a pod infra container - InfraCommand string `toml:"infra_command"` - // EnablePortReservation determines whether libpod will reserve ports on - // the host when they are forwarded to containers. - // When enabled, when ports are forwarded to containers, they are - // held open by conmon as long as the container is running, ensuring - // that they cannot be reused by other programs on the host. - // However, this can cause significant memory usage if a container has - // many ports forwarded to it. Disabling this can save memory. - EnablePortReservation bool `toml:"enable_port_reservation"` - // EnableLabeling indicates wether libpod will support container labeling - EnableLabeling bool `toml:"label"` - // NetworkCmdPath is the path to the slirp4netns binary - NetworkCmdPath string `toml:"network_cmd_path"` - - // NumLocks is the number of locks to make available for containers and - // pods. - NumLocks uint32 `toml:"num_locks,omitempty"` - - // LockType is the type of locking to use. - LockType string `toml:"lock_type,omitempty"` - - // EventsLogger determines where events should be logged - EventsLogger string `toml:"events_logger"` - // EventsLogFilePath is where the events log is stored. - EventsLogFilePath string `toml:"-events_logfile_path"` - //DetachKeys is the sequence of keys used to detach a container - DetachKeys string `toml:"detach_keys"` -} - -// runtimeConfiguredFrom is a struct used during early runtime init to help -// assemble the full RuntimeConfig struct from defaults. -// It indicated whether several fields in the runtime configuration were set -// explicitly. -// If they were not, we may override them with information from the database, -// if it exists and differs from what is present in the system already. -type runtimeConfiguredFrom struct { - storageGraphDriverSet bool - storageGraphRootSet bool - storageRunRootSet bool - libpodStaticDirSet bool - libpodTmpDirSet bool - volPathSet bool - conmonPath bool - conmonEnvVars bool - initPath bool - ociRuntimes bool - runtimePath bool - cniPluginDir bool - noPivotRoot bool - runtimeSupportsJSON bool - ociRuntime bool -} - -func defaultRuntimeConfig() (RuntimeConfig, error) { - storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) - if err != nil { - return RuntimeConfig{}, err - } - return RuntimeConfig{ - // Leave this empty so containers/storage will use its defaults - StorageConfig: storage.StoreOptions{}, - VolumePath: filepath.Join(storeOpts.GraphRoot, "volumes"), - ImageDefaultTransport: DefaultTransport, - StateType: BoltDBStateStore, - OCIRuntime: "runc", - OCIRuntimes: map[string][]string{ - "runc": { - "/usr/bin/runc", - "/usr/sbin/runc", - "/usr/local/bin/runc", - "/usr/local/sbin/runc", - "/sbin/runc", - "/bin/runc", - "/usr/lib/cri-o-runc/sbin/runc", - "/run/current-system/sw/bin/runc", - }, - }, - ConmonPath: []string{ - "/usr/libexec/podman/conmon", - "/usr/local/lib/podman/conmon", - "/usr/bin/conmon", - "/usr/sbin/conmon", - "/usr/local/bin/conmon", - "/usr/local/sbin/conmon", - "/run/current-system/sw/bin/conmon", - }, - ConmonEnvVars: []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - }, - InitPath: define.DefaultInitPath, - CgroupManager: SystemdCgroupsManager, - StaticDir: filepath.Join(storeOpts.GraphRoot, "libpod"), - TmpDir: "", - MaxLogSize: -1, - NoPivotRoot: false, - CNIConfigDir: etcDir + "/cni/net.d/", - CNIPluginDir: []string{"/usr/libexec/cni", "/usr/lib/cni", "/usr/local/lib/cni", "/opt/cni/bin"}, - InfraCommand: define.DefaultInfraCommand, - InfraImage: define.DefaultInfraImage, - EnablePortReservation: true, - EnableLabeling: true, - NumLocks: 2048, - EventsLogger: events.DefaultEventerType.String(), - DetachKeys: DefaultDetachKeys, - LockType: "shm", - }, nil -} - // SetXdgDirs ensures the XDG_RUNTIME_DIR env and XDG_CONFIG_HOME variables are set. // containers/image uses XDG_RUNTIME_DIR to locate the auth file, XDG_CONFIG_HOME is // use for the libpod.conf configuration file. -// SetXdgDirs internally calls EnableLinger() so that the user's processes are not -// killed once the session is terminated. EnableLinger() also attempts to -// get the runtime directory when XDG_RUNTIME_DIR is not specified. -// This function should only be called when running rootless. func SetXdgDirs() error { if !rootless.IsRootless() { return nil @@ -348,24 +82,9 @@ func SetXdgDirs() error { // Setup XDG_RUNTIME_DIR runtimeDir := os.Getenv("XDG_RUNTIME_DIR") - runtimeDirLinger, err := rootless.EnableLinger() - if err != nil { - return errors.Wrapf(err, "error enabling user session") - } - if runtimeDir == "" && runtimeDirLinger != "" { - if _, err := os.Stat(runtimeDirLinger); err != nil && os.IsNotExist(err) { - chWait := make(chan error) - defer close(chWait) - if _, err := WaitForFile(runtimeDirLinger, chWait, time.Second*10); err != nil { - return errors.Wrapf(err, "waiting for directory '%s'", runtimeDirLinger) - } - } - runtimeDir = runtimeDirLinger - } - if runtimeDir == "" { var err error - runtimeDir, err = util.GetRootlessRuntimeDir() + runtimeDir, err = util.GetRuntimeDir() if err != nil { return err } @@ -374,40 +93,26 @@ func SetXdgDirs() error { return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR") } + if rootless.IsRootless() && os.Getenv("DBUS_SESSION_BUS_ADDRESS") == "" { + sessionAddr := filepath.Join(runtimeDir, "bus") + if _, err := os.Stat(sessionAddr); err == nil { + os.Setenv("DBUS_SESSION_BUS_ADDRESS", fmt.Sprintf("unix:path=%s", sessionAddr)) + } + } + // Setup XDG_CONFIG_HOME if cfgHomeDir := os.Getenv("XDG_CONFIG_HOME"); cfgHomeDir == "" { - if cfgHomeDir, err = util.GetRootlessConfigHomeDir(); err != nil { + cfgHomeDir, err := util.GetRootlessConfigHomeDir() + if err != nil { return err } - if err = os.Setenv("XDG_CONFIG_HOME", cfgHomeDir); err != nil { + if err := os.Setenv("XDG_CONFIG_HOME", cfgHomeDir); err != nil { return errors.Wrapf(err, "cannot set XDG_CONFIG_HOME") } } return nil } -func getDefaultTmpDir() (string, error) { - if !rootless.IsRootless() { - return "/var/run/libpod", nil - } - - rootlessRuntimeDir, err := util.GetRootlessRuntimeDir() - if err != nil { - return "", err - } - libpodRuntimeDir := filepath.Join(rootlessRuntimeDir, "libpod") - - if err := os.Mkdir(libpodRuntimeDir, 0700|os.ModeSticky); err != nil { - if !os.IsExist(err) { - return "", errors.Wrapf(err, "cannot mkdir %s", libpodRuntimeDir) - } else if err := os.Chmod(libpodRuntimeDir, 0700|os.ModeSticky); err != nil { - // The directory already exist, just set the sticky bit - return "", errors.Wrapf(err, "could not set sticky bit on %s", libpodRuntimeDir) - } - } - return filepath.Join(libpodRuntimeDir, "tmp"), nil -} - // NewRuntime creates a new container runtime // Options can be passed to override the default configuration for the runtime func NewRuntime(ctx context.Context, options ...RuntimeOption) (runtime *Runtime, err error) { @@ -426,231 +131,14 @@ func NewRuntimeFromConfig(ctx context.Context, userConfigPath string, options .. return newRuntimeFromConfig(ctx, userConfigPath, options...) } -func homeDir() (string, error) { - home := os.Getenv("HOME") - if home == "" { - usr, err := user.LookupId(fmt.Sprintf("%d", rootless.GetRootlessUID())) - if err != nil { - return "", errors.Wrapf(err, "unable to resolve HOME directory") - } - home = usr.HomeDir - } - return home, nil -} - -func getRootlessConfigPath() (string, error) { - home, err := homeDir() - if err != nil { - return "", err - } - - return filepath.Join(home, ".config/containers/libpod.conf"), nil -} - -func getConfigPath() (string, error) { - if rootless.IsRootless() { - path, err := getRootlessConfigPath() - if err != nil { - return "", err - } - if _, err := os.Stat(path); err == nil { - return path, nil - } - return "", err - } - if _, err := os.Stat(OverrideConfigPath); err == nil { - // Use the override configuration path - return OverrideConfigPath, nil - } - if _, err := os.Stat(ConfigPath); err == nil { - return ConfigPath, nil - } - return "", nil -} - -// DefaultRuntimeConfig reads default config path and returns the RuntimeConfig -func DefaultRuntimeConfig() (*RuntimeConfig, error) { - configPath, err := getConfigPath() - if err != nil { - return nil, err - } - - contents, err := ioutil.ReadFile(configPath) - if err != nil { - return nil, errors.Wrapf(err, "error reading configuration file %s", configPath) - } - - // This is ugly, but we need to decode twice. - // Once to check if libpod static and tmp dirs were explicitly - // set (not enough to check if they're not the default value, - // might have been explicitly configured to the default). - // A second time to actually get a usable config. - tmpConfig := new(RuntimeConfig) - if _, err := toml.Decode(string(contents), tmpConfig); err != nil { - return nil, errors.Wrapf(err, "error decoding configuration file %s", - configPath) - } - return tmpConfig, nil -} - func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ...RuntimeOption) (runtime *Runtime, err error) { runtime = new(Runtime) - runtime.config = new(RuntimeConfig) - runtime.configuredFrom = new(runtimeConfiguredFrom) - - // Copy the default configuration - tmpDir, err := getDefaultTmpDir() - if err != nil { - return nil, err - } - defRunConf, err := defaultRuntimeConfig() + conf, err := config.NewConfig(userConfigPath) if err != nil { return nil, err } - if err := JSONDeepCopy(defRunConf, runtime.config); err != nil { - return nil, errors.Wrapf(err, "error copying runtime default config") - } - runtime.config.TmpDir = tmpDir - - storageConf, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) - if err != nil { - return nil, errors.Wrapf(err, "error retrieving storage config") - } - runtime.config.StorageConfig = storageConf - runtime.config.StaticDir = filepath.Join(storageConf.GraphRoot, "libpod") - runtime.config.VolumePath = filepath.Join(storageConf.GraphRoot, "volumes") - - configPath, err := getConfigPath() - if err != nil { - return nil, err - } - if rootless.IsRootless() { - home, err := homeDir() - if err != nil { - return nil, err - } - if runtime.config.SignaturePolicyPath == "" { - newPath := filepath.Join(home, ".config/containers/policy.json") - if _, err := os.Stat(newPath); err == nil { - runtime.config.SignaturePolicyPath = newPath - } - } - } - - if userConfigPath != "" { - configPath = userConfigPath - if _, err := os.Stat(configPath); err != nil { - // If the user specified a config file, we must fail immediately - // when it doesn't exist - return nil, errors.Wrapf(err, "cannot stat %s", configPath) - } - } - - // If we have a valid configuration file, load it in - if configPath != "" { - contents, err := ioutil.ReadFile(configPath) - if err != nil { - return nil, errors.Wrapf(err, "error reading configuration file %s", configPath) - } - - // This is ugly, but we need to decode twice. - // Once to check if libpod static and tmp dirs were explicitly - // set (not enough to check if they're not the default value, - // might have been explicitly configured to the default). - // A second time to actually get a usable config. - tmpConfig := new(RuntimeConfig) - if _, err := toml.Decode(string(contents), tmpConfig); err != nil { - return nil, errors.Wrapf(err, "error decoding configuration file %s", - configPath) - } - - if tmpConfig.StaticDir != "" { - runtime.configuredFrom.libpodStaticDirSet = true - } - if tmpConfig.TmpDir != "" { - runtime.configuredFrom.libpodTmpDirSet = true - } - if tmpConfig.VolumePath != "" { - runtime.configuredFrom.volPathSet = true - } - if tmpConfig.ConmonPath != nil { - runtime.configuredFrom.conmonPath = true - } - if tmpConfig.ConmonEnvVars != nil { - runtime.configuredFrom.conmonEnvVars = true - } - if tmpConfig.InitPath != "" { - runtime.configuredFrom.initPath = true - } - if tmpConfig.OCIRuntimes != nil { - runtime.configuredFrom.ociRuntimes = true - } - if tmpConfig.RuntimePath != nil { - runtime.configuredFrom.runtimePath = true - } - if tmpConfig.CNIPluginDir != nil { - runtime.configuredFrom.cniPluginDir = true - } - if tmpConfig.NoPivotRoot { - runtime.configuredFrom.noPivotRoot = true - } - if tmpConfig.RuntimeSupportsJSON != nil { - runtime.configuredFrom.runtimeSupportsJSON = true - } - if tmpConfig.OCIRuntime != "" { - runtime.configuredFrom.ociRuntime = true - } - - if _, err := toml.Decode(string(contents), runtime.config); err != nil { - return nil, errors.Wrapf(err, "error decoding configuration file %s", configPath) - } - } else if rootless.IsRootless() { - // If the configuration file was not found but we are running in rootless, a subset of the - // global config file is used. - for _, path := range []string{OverrideConfigPath, ConfigPath} { - contents, err := ioutil.ReadFile(path) - if err != nil { - // Ignore any error, the file might not be readable by us. - continue - } - tmpConfig := new(RuntimeConfig) - if _, err := toml.Decode(string(contents), tmpConfig); err != nil { - return nil, errors.Wrapf(err, "error decoding configuration file %s", path) - } - - // Cherry pick the settings we want from the global configuration - if !runtime.configuredFrom.conmonPath { - runtime.config.ConmonPath = tmpConfig.ConmonPath - } - if !runtime.configuredFrom.conmonEnvVars { - runtime.config.ConmonEnvVars = tmpConfig.ConmonEnvVars - } - if !runtime.configuredFrom.initPath { - runtime.config.InitPath = tmpConfig.InitPath - } - if !runtime.configuredFrom.ociRuntimes { - runtime.config.OCIRuntimes = tmpConfig.OCIRuntimes - } - if !runtime.configuredFrom.runtimePath { - runtime.config.RuntimePath = tmpConfig.RuntimePath - } - if !runtime.configuredFrom.cniPluginDir { - runtime.config.CNIPluginDir = tmpConfig.CNIPluginDir - } - if !runtime.configuredFrom.noPivotRoot { - runtime.config.NoPivotRoot = tmpConfig.NoPivotRoot - } - if !runtime.configuredFrom.runtimeSupportsJSON { - runtime.config.RuntimeSupportsJSON = tmpConfig.RuntimeSupportsJSON - } - if !runtime.configuredFrom.ociRuntime { - runtime.config.OCIRuntime = tmpConfig.OCIRuntime - } - - break - } - } + runtime.config = conf // Overwrite config with user-given configuration options for _, opt := range options { @@ -659,42 +147,6 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options .. } } - if rootless.IsRootless() && configPath == "" { - configPath, err := getRootlessConfigPath() - if err != nil { - return nil, err - } - - // storage.conf - storageConfFile, err := storage.DefaultConfigFile(rootless.IsRootless()) - if err != nil { - return nil, err - } - if _, err := os.Stat(storageConfFile); os.IsNotExist(err) { - if err := util.WriteStorageConfigFile(&runtime.config.StorageConfig, storageConfFile); err != nil { - return nil, errors.Wrapf(err, "cannot write config file %s", storageConfFile) - } - } - - if configPath != "" { - if err := os.MkdirAll(filepath.Dir(configPath), 0755); err != nil { - return nil, err - } - file, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) - if err != nil && !os.IsExist(err) { - return nil, errors.Wrapf(err, "cannot open file %s", configPath) - } - if err == nil { - defer file.Close() - enc := toml.NewEncoder(file) - if err := enc.Encode(runtime.config); err != nil { - if removeErr := os.Remove(configPath); removeErr != nil { - logrus.Debugf("unable to remove %s: %q", configPath, err) - } - } - } - } - } if err := makeRuntime(ctx, runtime); err != nil { return nil, err } @@ -721,9 +173,9 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) { } case "", "shm": - lockPath := DefaultSHMLockPath + lockPath := define.DefaultSHMLockPath if rootless.IsRootless() { - lockPath = fmt.Sprintf("%s_%d", DefaultRootlessSHMLockPath, rootless.GetRootlessUID()) + lockPath = fmt.Sprintf("%s_%d", define.DefaultRootlessSHMLockPath, rootless.GetRootlessUID()) } // Set up the lock manager manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks) @@ -757,83 +209,14 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) { return manager, nil } -// probeConmon calls conmon --version and verifies it is a new enough version for -// the runtime expectations podman currently has -func probeConmon(conmonBinary string) error { - cmd := exec.Command(conmonBinary, "--version") - var out bytes.Buffer - cmd.Stdout = &out - err := cmd.Run() - if err != nil { - return err - } - r := regexp.MustCompile(`^conmon version (?P\d+).(?P\d+).(?P\d+)`) - - matches := r.FindStringSubmatch(out.String()) - if len(matches) != 4 { - return errors.Wrapf(err, "conmon version changed format") - } - major, err := strconv.Atoi(matches[1]) - if err != nil || major < 1 { - return define.ErrConmonOutdated - } - // conmon used to be shipped with CRI-O, and was versioned along with it. - // even though the conmon that came with crio-1.9 to crio-1.15 has a higher - // version number than conmon 1.0.0, 1.0.0 is newer, so we need this check - minor, err := strconv.Atoi(matches[2]) - if err != nil || minor > 9 { - return define.ErrConmonOutdated - } - - return nil -} - // Make a new runtime based on the given configuration // Sets up containers/storage, state store, OCI runtime func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { // Find a working conmon binary - foundConmon := false - foundOutdatedConmon := false - for _, path := range runtime.config.ConmonPath { - stat, err := os.Stat(path) - if err != nil { - continue - } - if stat.IsDir() { - continue - } - if err := probeConmon(path); err != nil { - logrus.Warnf("conmon at %s invalid: %v", path, err) - foundOutdatedConmon = true - continue - } - foundConmon = true - runtime.conmonPath = path - logrus.Debugf("using conmon: %q", path) - break - } - - // Search the $PATH as last fallback - if !foundConmon { - if conmon, err := exec.LookPath("conmon"); err == nil { - if err := probeConmon(conmon); err != nil { - logrus.Warnf("conmon at %s is invalid: %v", conmon, err) - foundOutdatedConmon = true - } else { - foundConmon = true - runtime.conmonPath = conmon - logrus.Debugf("using conmon from $PATH: %q", conmon) - } - } - } - - if !foundConmon { - if foundOutdatedConmon { - return errors.Wrapf(define.ErrConmonOutdated, "please update to v1.0.0 or later") - } - return errors.Wrapf(define.ErrInvalidArg, - "could not find a working conmon binary (configured options: %v)", - runtime.config.ConmonPath) + if cPath, err := runtime.config.FindConmon(); err != nil { + return err + } else { + runtime.conmonPath = cPath } // Make the static files directory if it does not exist @@ -845,17 +228,22 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { } } - // Set up the state + // Set up the state. + // + // TODO - if we further break out the state implementation into + // libpod/state, the config could take care of the code below. It + // would further allow to move the types and consts into a coherent + // package. switch runtime.config.StateType { - case InMemoryStateStore: + case define.InMemoryStateStore: state, err := NewInMemoryState() if err != nil { return err } runtime.state = state - case SQLiteStateStore: + case define.SQLiteStateStore: return errors.Wrapf(define.ErrInvalidArg, "SQLite state is currently disabled") - case BoltDBStateStore: + case define.BoltDBStateStore: dbPath := filepath.Join(runtime.config.StaticDir, "bolt_state.db") state, err := NewBoltState(dbPath, runtime) @@ -864,7 +252,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { } runtime.state = state default: - return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed") + return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed (%v)", runtime.config.StateType) } // Grab config from the database so we can reset some defaults @@ -873,52 +261,10 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { return errors.Wrapf(err, "error retrieving runtime configuration from database") } - // Reset defaults if they were not explicitly set - if !runtime.configuredFrom.storageGraphDriverSet && dbConfig.GraphDriver != "" { - if runtime.config.StorageConfig.GraphDriverName != dbConfig.GraphDriver && - runtime.config.StorageConfig.GraphDriverName != "" { - logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve", - runtime.config.StorageConfig.GraphDriverName, dbConfig.GraphDriver) - } - runtime.config.StorageConfig.GraphDriverName = dbConfig.GraphDriver - } - if !runtime.configuredFrom.storageGraphRootSet && dbConfig.StorageRoot != "" { - if runtime.config.StorageConfig.GraphRoot != dbConfig.StorageRoot && - runtime.config.StorageConfig.GraphRoot != "" { - logrus.Debugf("Overriding graph root %q with %q from database", - runtime.config.StorageConfig.GraphRoot, dbConfig.StorageRoot) - } - runtime.config.StorageConfig.GraphRoot = dbConfig.StorageRoot - } - if !runtime.configuredFrom.storageRunRootSet && dbConfig.StorageTmp != "" { - if runtime.config.StorageConfig.RunRoot != dbConfig.StorageTmp && - runtime.config.StorageConfig.RunRoot != "" { - logrus.Debugf("Overriding run root %q with %q from database", - runtime.config.StorageConfig.RunRoot, dbConfig.StorageTmp) - } - runtime.config.StorageConfig.RunRoot = dbConfig.StorageTmp - } - if !runtime.configuredFrom.libpodStaticDirSet && dbConfig.LibpodRoot != "" { - if runtime.config.StaticDir != dbConfig.LibpodRoot && runtime.config.StaticDir != "" { - logrus.Debugf("Overriding static dir %q with %q from database", runtime.config.StaticDir, dbConfig.LibpodRoot) - } - runtime.config.StaticDir = dbConfig.LibpodRoot - } - if !runtime.configuredFrom.libpodTmpDirSet && dbConfig.LibpodTmp != "" { - if runtime.config.TmpDir != dbConfig.LibpodTmp && runtime.config.TmpDir != "" { - logrus.Debugf("Overriding tmp dir %q with %q from database", runtime.config.TmpDir, dbConfig.LibpodTmp) - } - runtime.config.TmpDir = dbConfig.LibpodTmp - } - if !runtime.configuredFrom.volPathSet && dbConfig.VolumePath != "" { - if runtime.config.VolumePath != dbConfig.VolumePath && runtime.config.VolumePath != "" { - logrus.Debugf("Overriding volume path %q with %q from database", runtime.config.VolumePath, dbConfig.VolumePath) - } - runtime.config.VolumePath = dbConfig.VolumePath + if err := runtime.config.MergeDBConfig(dbConfig); err != nil { + return errors.Wrapf(err, "error merging database config into runtime config") } - runtime.config.EventsLogFilePath = filepath.Join(runtime.config.TmpDir, "events", "events.log") - logrus.Debugf("Using graph driver %s", runtime.config.StorageConfig.GraphDriverName) logrus.Debugf("Using graph root %s", runtime.config.StorageConfig.GraphRoot) logrus.Debugf("Using run root %s", runtime.config.StorageConfig.RunRoot) @@ -990,8 +336,18 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { } } + // Make lookup tables for runtime support + supportsJSON := make(map[string]bool) + supportsNoCgroups := make(map[string]bool) + for _, r := range runtime.config.RuntimeSupportsJSON { + supportsJSON[r] = true + } + for _, r := range runtime.config.RuntimeSupportsNoCgroups { + supportsNoCgroups[r] = true + } + // Get us at least one working OCI runtime. - runtime.ociRuntimes = make(map[string]*OCIRuntime) + runtime.ociRuntimes = make(map[string]OCIRuntime) // Is the old runtime_path defined? if runtime.config.RuntimePath != nil { @@ -1007,15 +363,10 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { name := filepath.Base(runtime.config.RuntimePath[0]) - supportsJSON := false - for _, r := range runtime.config.RuntimeSupportsJSON { - if r == name { - supportsJSON = true - break - } - } + json := supportsJSON[name] + nocgroups := supportsNoCgroups[name] - ociRuntime, err := newOCIRuntime(name, runtime.config.RuntimePath, runtime.conmonPath, runtime.config, supportsJSON) + ociRuntime, err := newConmonOCIRuntime(name, runtime.config.RuntimePath, runtime.conmonPath, runtime.config, json, nocgroups) if err != nil { return err } @@ -1026,15 +377,10 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { // Initialize remaining OCI runtimes for name, paths := range runtime.config.OCIRuntimes { - supportsJSON := false - for _, r := range runtime.config.RuntimeSupportsJSON { - if r == name { - supportsJSON = true - break - } - } + json := supportsJSON[name] + nocgroups := supportsNoCgroups[name] - ociRuntime, err := newOCIRuntime(name, paths, runtime.conmonPath, runtime.config, supportsJSON) + ociRuntime, err := newConmonOCIRuntime(name, paths, runtime.conmonPath, runtime.config, json, nocgroups) if err != nil { // Don't fatally error. // This will allow us to ship configs including optional @@ -1054,15 +400,10 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { if strings.HasPrefix(runtime.config.OCIRuntime, "/") { name := filepath.Base(runtime.config.OCIRuntime) - supportsJSON := false - for _, r := range runtime.config.RuntimeSupportsJSON { - if r == name { - supportsJSON = true - break - } - } + json := supportsJSON[name] + nocgroups := supportsNoCgroups[name] - ociRuntime, err := newOCIRuntime(name, []string{runtime.config.OCIRuntime}, runtime.conmonPath, runtime.config, supportsJSON) + ociRuntime, err := newConmonOCIRuntime(name, []string{runtime.config.OCIRuntime}, runtime.conmonPath, runtime.config, json, nocgroups) if err != nil { return err } @@ -1106,17 +447,6 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { runtime.netPlugin = netPlugin } - // Set up a firewall backend - backendType := "" - if rootless.IsRootless() { - backendType = "none" - } - fwBackend, err := firewall.GetBackend(backendType) - if err != nil { - return err - } - runtime.firewallBackend = fwBackend - // We now need to see if the system has restarted // We check for the presence of a file in our tmp directory to verify this // This check must be locked to prevent races @@ -1212,7 +542,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { } // GetConfig returns a copy of the configuration used by the runtime -func (r *Runtime) GetConfig() (*RuntimeConfig, error) { +func (r *Runtime) GetConfig() (*config.Config, error) { r.lock.RLock() defer r.lock.RUnlock() @@ -1220,7 +550,7 @@ func (r *Runtime) GetConfig() (*RuntimeConfig, error) { return nil, define.ErrRuntimeStopped } - config := new(RuntimeConfig) + config := new(config.Config) // Copy so the caller won't be able to modify the actual config if err := JSONDeepCopy(r.config, config); err != nil { @@ -1437,3 +767,8 @@ func (r *Runtime) ImageRuntime() *image.Runtime { func (r *Runtime) SystemContext() *types.SystemContext { return r.imageContext } + +// GetOCIRuntimePath retrieves the path of the default OCI runtime. +func (r *Runtime) GetOCIRuntimePath() string { + return r.defaultOCIRuntime.Path() +} diff --git a/vendor/github.com/containers/libpod/libpod/runtime_cstorage.go b/vendor/github.com/containers/libpod/libpod/runtime_cstorage.go index 586db5a1e4..2d523a7d2c 100644 --- a/vendor/github.com/containers/libpod/libpod/runtime_cstorage.go +++ b/vendor/github.com/containers/libpod/libpod/runtime_cstorage.go @@ -1,6 +1,8 @@ package libpod import ( + "time" + "github.com/containers/libpod/libpod/define" "github.com/containers/storage" "github.com/pkg/errors" @@ -12,6 +14,8 @@ import ( type StorageContainer struct { ID string Names []string + Image string + CreateTime time.Time PresentInLibpod bool } @@ -31,6 +35,8 @@ func (r *Runtime) ListStorageContainers() ([]*StorageContainer, error) { storageCtr := new(StorageContainer) storageCtr.ID = ctr.ID storageCtr.Names = ctr.Names + storageCtr.Image = ctr.ImageID + storageCtr.CreateTime = ctr.Created // Look up if container is in state hasCtr, err := r.state.HasContainer(ctr.ID) @@ -54,9 +60,15 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error { r.lock.Lock() defer r.lock.Unlock() + return r.removeStorageContainer(idOrName, force) +} + +// Internal function to remove the container storage without +// locking the runtime. +func (r *Runtime) removeStorageContainer(idOrName string, force bool) error { targetID, err := r.store.Lookup(idOrName) if err != nil { - if err == storage.ErrLayerUnknown { + if errors.Cause(err) == storage.ErrLayerUnknown { return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID or name %q found", idOrName) } return errors.Wrapf(err, "error looking up container %q", idOrName) @@ -66,7 +78,7 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error { // So we can still error here. ctr, err := r.store.Container(targetID) if err != nil { - if err == storage.ErrContainerUnknown { + if errors.Cause(err) == storage.ErrContainerUnknown { return errors.Wrapf(define.ErrNoSuchCtr, "%q does not refer to a container", idOrName) } return errors.Wrapf(err, "error retrieving container %q", idOrName) @@ -84,7 +96,7 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error { if !force { timesMounted, err := r.store.Mounted(ctr.ID) if err != nil { - if err == storage.ErrContainerUnknown { + if errors.Cause(err) == storage.ErrContainerUnknown { // Container was removed from under us. // It's gone, so don't bother erroring. logrus.Warnf("Storage for container %s already removed", ctr.ID) @@ -97,7 +109,7 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error { } } else { if _, err := r.store.Unmount(ctr.ID, true); err != nil { - if err == storage.ErrContainerUnknown { + if errors.Cause(err) == storage.ErrContainerUnknown { // Container again gone, no error logrus.Warnf("Storage for container %s already removed", ctr.ID) return nil @@ -107,7 +119,7 @@ func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error { } if err := r.store.DeleteContainer(ctr.ID); err != nil { - if err == storage.ErrContainerUnknown { + if errors.Cause(err) == storage.ErrContainerUnknown { // Container again gone, no error logrus.Warnf("Storage for container %s already removed", ctr.ID) return nil diff --git a/vendor/github.com/containers/libpod/libpod/runtime_ctr.go b/vendor/github.com/containers/libpod/libpod/runtime_ctr.go index 92b2faefb2..7069d34940 100644 --- a/vendor/github.com/containers/libpod/libpod/runtime_ctr.go +++ b/vendor/github.com/containers/libpod/libpod/runtime_ctr.go @@ -8,7 +8,7 @@ import ( "strings" "time" - config2 "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/rootless" "github.com/containers/storage/pkg/stringid" @@ -35,7 +35,7 @@ func (r *Runtime) NewContainer(ctx context.Context, rSpec *spec.Spec, options .. r.lock.Lock() defer r.lock.Unlock() if !r.valid { - return nil, config2.ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } return r.newContainer(ctx, rSpec, options...) } @@ -45,7 +45,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config r.lock.Lock() defer r.lock.Unlock() if !r.valid { - return nil, config2.ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } ctr, err := r.initContainerVariables(rSpec, config) @@ -67,7 +67,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (c *Container, err error) { if rSpec == nil { - return nil, errors.Wrapf(config2.ErrInvalidArg, "must provide a valid runtime spec to create container") + return nil, errors.Wrapf(define.ErrInvalidArg, "must provide a valid runtime spec to create container") } ctr := new(Container) ctr.config = new(ContainerConfig) @@ -75,7 +75,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf if config == nil { ctr.config.ID = stringid.GenerateNonCryptoID() - ctr.config.ShmSize = DefaultShmSize + ctr.config.ShmSize = define.DefaultShmSize } else { // This is a restore from an imported checkpoint ctr.restoreFromCheckpoint = true @@ -100,9 +100,9 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf ctr.state.BindMounts = make(map[string]string) - ctr.config.StopTimeout = config2.CtrRemoveTimeout + ctr.config.StopTimeout = define.CtrRemoveTimeout - ctr.config.OCIRuntime = r.defaultOCIRuntime.name + ctr.config.OCIRuntime = r.defaultOCIRuntime.Name() // Set namespace based on current runtime namespace // Do so before options run so they can override it @@ -152,7 +152,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai }() ctr.valid = true - ctr.state.State = config2.ContainerStateConfigured + ctr.state.State = define.ContainerStateConfigured ctr.runtime = r if ctr.config.OCIRuntime == "" { @@ -160,11 +160,18 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai } else { ociRuntime, ok := r.ociRuntimes[ctr.config.OCIRuntime] if !ok { - return nil, errors.Wrapf(config2.ErrInvalidArg, "requested OCI runtime %s is not available", ctr.config.OCIRuntime) + return nil, errors.Wrapf(define.ErrInvalidArg, "requested OCI runtime %s is not available", ctr.config.OCIRuntime) } ctr.ociRuntime = ociRuntime } + // Check NoCgroups support + if ctr.config.NoCgroups { + if !ctr.ociRuntime.SupportsNoCgroups() { + return nil, errors.Wrapf(define.ErrInvalidArg, "requested OCI runtime %s is not compatible with NoCgroups", ctr.ociRuntime.Name()) + } + } + var pod *Pod if ctr.config.Pod != "" { // Get the pod from state @@ -183,43 +190,67 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai ctr.config.Name = name } - // Check CGroup parent sanity, and set it if it was not set - switch r.config.CgroupManager { - case CgroupfsCgroupsManager: - if ctr.config.CgroupParent == "" { - if pod != nil && pod.config.UsePodCgroup { - podCgroup, err := pod.CgroupPath() - if err != nil { - return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID()) + // If CGroups are disabled, we MUST create a PID namespace. + // Otherwise, the OCI runtime won't be able to stop our container. + if ctr.config.NoCgroups { + if ctr.config.Spec.Linux == nil { + return nil, errors.Wrapf(define.ErrInvalidArg, "must provide Linux namespace configuration in OCI spec when using NoCgroups") + } + foundPid := false + for _, ns := range ctr.config.Spec.Linux.Namespaces { + if ns.Type == spec.PIDNamespace { + foundPid = true + if ns.Path != "" { + return nil, errors.Wrapf(define.ErrInvalidArg, "containers not creating CGroups must create a private PID namespace - cannot use another") } - if podCgroup == "" { - return nil, errors.Wrapf(config2.ErrInternal, "pod %s cgroup is not set", pod.ID()) + break + } + } + if !foundPid { + return nil, errors.Wrapf(define.ErrInvalidArg, "containers not creating CGroups must create a private PID namespace") + } + } + + // Check CGroup parent sanity, and set it if it was not set. + // Only if we're actually configuring CGroups. + if !ctr.config.NoCgroups { + switch r.config.CgroupManager { + case define.CgroupfsCgroupsManager: + if ctr.config.CgroupParent == "" { + if pod != nil && pod.config.UsePodCgroup { + podCgroup, err := pod.CgroupPath() + if err != nil { + return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID()) + } + if podCgroup == "" { + return nil, errors.Wrapf(define.ErrInternal, "pod %s cgroup is not set", pod.ID()) + } + ctr.config.CgroupParent = podCgroup + } else { + ctr.config.CgroupParent = CgroupfsDefaultCgroupParent } - ctr.config.CgroupParent = podCgroup - } else { - ctr.config.CgroupParent = CgroupfsDefaultCgroupParent + } else if strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") { + return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs") } - } else if strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") { - return nil, errors.Wrapf(config2.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs") - } - case SystemdCgroupsManager: - if ctr.config.CgroupParent == "" { - if pod != nil && pod.config.UsePodCgroup { - podCgroup, err := pod.CgroupPath() - if err != nil { - return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID()) + case define.SystemdCgroupsManager: + if ctr.config.CgroupParent == "" { + if pod != nil && pod.config.UsePodCgroup { + podCgroup, err := pod.CgroupPath() + if err != nil { + return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID()) + } + ctr.config.CgroupParent = podCgroup + } else if rootless.IsRootless() { + ctr.config.CgroupParent = SystemdDefaultRootlessCgroupParent + } else { + ctr.config.CgroupParent = SystemdDefaultCgroupParent } - ctr.config.CgroupParent = podCgroup - } else if rootless.IsRootless() { - ctr.config.CgroupParent = SystemdDefaultRootlessCgroupParent - } else { - ctr.config.CgroupParent = SystemdDefaultCgroupParent + } else if len(ctr.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") { + return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups") } - } else if len(ctr.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") { - return nil, errors.Wrapf(config2.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups") + default: + return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.CgroupManager) } - default: - return nil, errors.Wrapf(config2.ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.CgroupManager) } if ctr.restoreFromCheckpoint { @@ -233,6 +264,14 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai g.RemoveMount("/etc/hosts") g.RemoveMount("/run/.containerenv") g.RemoveMount("/run/secrets") + + // Regenerate CGroup paths so they don't point to the old + // container ID. + cgroupPath, err := ctr.getOCICgroupPath() + if err != nil { + return nil, err + } + g.SetLinuxCgroupsPath(cgroupPath) } // Set up storage for the container @@ -253,28 +292,40 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai // Go through named volumes and add them. // If they don't exist they will be created using basic options. + // Maintain an array of them - we need to lock them later. + ctrNamedVolumes := make([]*Volume, 0, len(ctr.config.NamedVolumes)) for _, vol := range ctr.config.NamedVolumes { - // Check if it exists already - _, err := r.state.Volume(vol.Name) - if err == nil { - // The volume exists, we're good - continue - } else if errors.Cause(err) != config2.ErrNoSuchVolume { - return nil, errors.Wrapf(err, "error retrieving named volume %s for new container", vol.Name) + isAnonymous := false + if vol.Name == "" { + // Anonymous volume. We'll need to create it. + // It needs a name first. + vol.Name = stringid.GenerateNonCryptoID() + isAnonymous = true + } else { + // Check if it exists already + dbVol, err := r.state.Volume(vol.Name) + if err == nil { + ctrNamedVolumes = append(ctrNamedVolumes, dbVol) + // The volume exists, we're good + continue + } else if errors.Cause(err) != define.ErrNoSuchVolume { + return nil, errors.Wrapf(err, "error retrieving named volume %s for new container", vol.Name) + } } logrus.Debugf("Creating new volume %s for container", vol.Name) // The volume does not exist, so we need to create it. - newVol, err := r.newVolume(ctx, WithVolumeName(vol.Name), withSetCtrSpecific(), - WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID())) + volOptions := []VolumeCreateOption{WithVolumeName(vol.Name), WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID())} + if isAnonymous { + volOptions = append(volOptions, withSetCtrSpecific()) + } + newVol, err := r.newVolume(ctx, volOptions...) if err != nil { return nil, errors.Wrapf(err, "error creating named volume %q", vol.Name) } - if err := ctr.copyWithTarFromImage(vol.Dest, newVol.MountPoint()); err != nil && !os.IsNotExist(err) { - return nil, errors.Wrapf(err, "Failed to copy content into new volume mount %q", vol.Name) - } + ctrNamedVolumes = append(ctrNamedVolumes, newVol) } if ctr.config.LogPath == "" && ctr.config.LogDriver != JournaldLogging { @@ -291,6 +342,14 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai ctr.config.Mounts = append(ctr.config.Mounts, ctr.config.ShmDir) } + // Lock all named volumes we are adding ourself to, to ensure we can't + // use a volume being removed. + for _, namedVol := range ctrNamedVolumes { + toLock := namedVol + toLock.lock.Lock() + defer toLock.lock.Unlock() + } + // Add the container to the state // TODO: May be worth looking into recovering from name/ID collisions here if ctr.config.Pod != "" { @@ -373,7 +432,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } if !r.valid { - return config2.ErrRuntimeStopped + return define.ErrRuntimeStopped } // Update the container to get current state @@ -389,8 +448,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } } - if c.state.State == config2.ContainerStatePaused { - if err := c.ociRuntime.killContainer(c, 9); err != nil { + if c.state.State == define.ContainerStatePaused { + if err := c.ociRuntime.KillContainer(c, 9, false); err != nil { return err } if err := c.unpause(); err != nil { @@ -403,16 +462,16 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } // Check that the container's in a good state to be removed - if c.state.State == config2.ContainerStateRunning { - if err := c.stop(c.StopTimeout()); err != nil { + if c.state.State == define.ContainerStateRunning { + if err := c.stop(c.StopTimeout(), true); err != nil { return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID()) } } // Check that all of our exec sessions have finished - if len(c.state.ExecSessions) != 0 { - if err := c.ociRuntime.execStopContainer(c, c.StopTimeout()); err != nil { - return err + for _, session := range c.state.ExecSessions { + if err := c.ociRuntime.ExecStopContainer(c, session.ID, c.StopTimeout()); err != nil { + return errors.Wrapf(err, "error stopping exec session %s of container %s", session.ID, c.ID()) } } @@ -426,7 +485,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } if len(deps) != 0 { depsStr := strings.Join(deps, ", ") - return errors.Wrapf(config2.ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr) + return errors.Wrapf(define.ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr) } } @@ -470,8 +529,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, // Delete the container. // Not needed in Configured and Exited states, where the container // doesn't exist in the runtime - if c.state.State != config2.ContainerStateConfigured && - c.state.State != config2.ContainerStateExited { + if c.state.State != define.ContainerStateConfigured && + c.state.State != define.ContainerStateExited { if err := c.delete(ctx); err != nil { if cleanupErr == nil { cleanupErr = err @@ -501,7 +560,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, if !volume.IsCtrSpecific() { continue } - if err := runtime.removeVolume(ctx, volume, false); err != nil && err != config2.ErrNoSuchVolume && err != config2.ErrVolumeBeingUsed { + if err := runtime.removeVolume(ctx, volume, false); err != nil && err != define.ErrNoSuchVolume && err != define.ErrVolumeBeingUsed { logrus.Errorf("cleanup volume (%s): %v", v, err) } } @@ -510,13 +569,151 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, return cleanupErr } +// EvictContainer removes the given container partial or full ID or name, and +// returns the full ID of the evicted container and any error encountered. +// It should be used to remove a container when obtaining a Container struct +// pointer has failed. +// Running container will not be stopped. +// If removeVolume is specified, named volumes used by the container will +// be removed also if and only if the container is the sole user. +func (r *Runtime) EvictContainer(ctx context.Context, idOrName string, removeVolume bool) (string, error) { + r.lock.RLock() + defer r.lock.RUnlock() + return r.evictContainer(ctx, idOrName, removeVolume) +} + +// evictContainer is the internal function to handle container eviction based +// on its partial or full ID or name. +// It returns the full ID of the evicted container and any error encountered. +// This does not lock the runtime nor the container. +// removePod is used only when removing pods. It instructs Podman to ignore +// infra container protections, and *not* remove from the database (as pod +// remove will handle that). +func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVolume bool) (string, error) { + var err error + + if !r.valid { + return "", define.ErrRuntimeStopped + } + + id, err := r.state.LookupContainerID(idOrName) + if err != nil { + return "", errors.Wrapf(err, "Failed to find container %q in state", idOrName) + } + + // Begin by trying a normal removal. Valid containers will be removed normally. + tmpCtr, err := r.state.Container(id) + if err == nil { + logrus.Infof("Container %s successfully retrieved from state, attempting normal removal", id) + // Assume force = true for the evict case + err = r.removeContainer(ctx, tmpCtr, true, removeVolume, false) + if !tmpCtr.valid { + // If the container is marked invalid, remove succeeded + // in kicking it out of the state - no need to continue. + return id, err + } + + if err == nil { + // Something has gone seriously wrong - no error but + // container was not removed. + logrus.Errorf("Container %s not removed with no error", id) + } else { + logrus.Warnf("Failed to removal container %s normally, proceeding with evict: %v", id, err) + } + } + + // Error out if the container does not exist in libpod + exists, err := r.state.HasContainer(id) + if err != nil { + return id, err + } + if !exists { + return id, errors.Wrapf(err, "Failed to find container ID %q for eviction", id) + } + + // Re-create a container struct for removal purposes + c := new(Container) + c.config, err = r.state.GetContainerConfig(id) + if err != nil { + return id, errors.Wrapf(err, "failed to retrieve config for ctr ID %q", id) + } + c.state = new(ContainerState) + + // We need to lock the pod before we lock the container. + // To avoid races around removing a container and the pod it is in. + // Don't need to do this in pod removal case - we're evicting the entire + // pod. + var pod *Pod + if c.config.Pod != "" { + pod, err = r.state.Pod(c.config.Pod) + if err != nil { + return id, errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", c.ID(), pod.ID()) + } + + // Lock the pod while we're removing container + pod.lock.Lock() + defer pod.lock.Unlock() + if err := pod.updatePod(); err != nil { + return id, err + } + + infraID := pod.state.InfraContainerID + if c.ID() == infraID { + return id, errors.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID()) + } + } + + var cleanupErr error + // Remove the container from the state + if c.config.Pod != "" { + // If we're removing the pod, the container will be evicted + // from the state elsewhere + if err := r.state.RemoveContainerFromPod(pod, c); err != nil { + cleanupErr = err + } + } else { + if err := r.state.RemoveContainer(c); err != nil { + cleanupErr = err + } + } + + // Unmount container mount points + for _, mount := range c.config.Mounts { + Unmount(mount) + } + + // Remove container from c/storage + if err := r.removeStorageContainer(id, true); err != nil { + if cleanupErr == nil { + cleanupErr = err + } + } + + if !removeVolume { + return id, cleanupErr + } + + for _, v := range c.config.NamedVolumes { + if volume, err := r.state.Volume(v.Name); err == nil { + if !volume.IsCtrSpecific() { + continue + } + if err := r.removeVolume(ctx, volume, false); err != nil && err != define.ErrNoSuchVolume && err != define.ErrVolumeBeingUsed { + logrus.Errorf("cleanup volume (%s): %v", v, err) + } + } + } + + return id, cleanupErr +} + // GetContainer retrieves a container by its ID func (r *Runtime) GetContainer(id string) (*Container, error) { r.lock.RLock() defer r.lock.RUnlock() if !r.valid { - return nil, config2.ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } return r.state.Container(id) @@ -528,7 +725,7 @@ func (r *Runtime) HasContainer(id string) (bool, error) { defer r.lock.RUnlock() if !r.valid { - return false, config2.ErrRuntimeStopped + return false, define.ErrRuntimeStopped } return r.state.HasContainer(id) @@ -541,7 +738,7 @@ func (r *Runtime) LookupContainer(idOrName string) (*Container, error) { defer r.lock.RUnlock() if !r.valid { - return nil, config2.ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } return r.state.LookupContainer(idOrName) } @@ -555,7 +752,7 @@ func (r *Runtime) GetContainers(filters ...ContainerFilter) ([]*Container, error defer r.lock.RUnlock() if !r.valid { - return nil, config2.ErrRuntimeStopped + return nil, define.ErrRuntimeStopped } ctrs, err := r.state.AllContainers() @@ -588,7 +785,7 @@ func (r *Runtime) GetAllContainers() ([]*Container, error) { func (r *Runtime) GetRunningContainers() ([]*Container, error) { running := func(c *Container) bool { state, _ := c.State() - return state == config2.ContainerStateRunning + return state == define.ContainerStateRunning } return r.GetContainers(running) } @@ -616,7 +813,7 @@ func (r *Runtime) GetLatestContainer() (*Container, error) { return nil, errors.Wrapf(err, "unable to find latest container") } if len(ctrs) == 0 { - return nil, config2.ErrNoSuchCtr + return nil, define.ErrNoSuchCtr } for containerIndex, ctr := range ctrs { createdTime := ctr.config.CreatedTime diff --git a/vendor/github.com/containers/libpod/libpod/runtime_img.go b/vendor/github.com/containers/libpod/libpod/runtime_img.go index 20dee4080d..f2784c07d1 100644 --- a/vendor/github.com/containers/libpod/libpod/runtime_img.go +++ b/vendor/github.com/containers/libpod/libpod/runtime_img.go @@ -17,9 +17,9 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/containers/image/directory" - dockerarchive "github.com/containers/image/docker/archive" - ociarchive "github.com/containers/image/oci/archive" + "github.com/containers/image/v5/directory" + dockerarchive "github.com/containers/image/v5/docker/archive" + ociarchive "github.com/containers/image/v5/oci/archive" "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -69,7 +69,7 @@ func (r *Runtime) RemoveImage(ctx context.Context, img *image.Image, force bool) // the image. we figure out which repotag the user is trying to refer // to and untag it. repoName, err := img.MatchRepoTag(img.InputName) - if hasChildren && err == image.ErrRepoTagNotFound { + if hasChildren && errors.Cause(err) == image.ErrRepoTagNotFound { return "", errors.Errorf("unable to delete %q (cannot be forced) - image has dependent child images", img.ID()) } if err != nil { diff --git a/vendor/github.com/containers/libpod/libpod/runtime_migrate.go b/vendor/github.com/containers/libpod/libpod/runtime_migrate.go index c363991e6a..d856522329 100644 --- a/vendor/github.com/containers/libpod/libpod/runtime_migrate.go +++ b/vendor/github.com/containers/libpod/libpod/runtime_migrate.go @@ -5,14 +5,15 @@ package libpod import ( "context" "fmt" - "github.com/containers/libpod/pkg/util" "io/ioutil" "os" "path/filepath" "strconv" "syscall" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/rootless" + "github.com/containers/libpod/pkg/util" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -63,11 +64,34 @@ func (r *Runtime) migrate(ctx context.Context) error { } } + // Did the user request a new runtime? + runtimeChangeRequested := r.migrateRuntime != "" + requestedRuntime, runtimeExists := r.ociRuntimes[r.migrateRuntime] + if !runtimeExists && runtimeChangeRequested { + return errors.Wrapf(define.ErrInvalidArg, "change to runtime %q requested but no such runtime is defined", r.migrateRuntime) + } + for _, ctr := range allCtrs { + needsWrite := false + + // Reset pause process location oldLocation := filepath.Join(ctr.state.RunDir, "conmon.pid") if ctr.config.ConmonPidFile == oldLocation { logrus.Infof("changing conmon PID file for %s", ctr.ID()) ctr.config.ConmonPidFile = filepath.Join(ctr.config.StaticDir, "conmon.pid") + needsWrite = true + } + + // Reset runtime + if runtimeChangeRequested { + logrus.Infof("Resetting container %s runtime to runtime %s", ctr.ID(), r.migrateRuntime) + ctr.config.OCIRuntime = r.migrateRuntime + ctr.ociRuntime = requestedRuntime + + needsWrite = true + } + + if needsWrite { if err := r.state.RewriteContainerConfig(ctr, ctr.config); err != nil { return errors.Wrapf(err, "error rewriting config for container %s", ctr.ID()) } diff --git a/vendor/github.com/containers/libpod/libpod/runtime_pod_infra_linux.go b/vendor/github.com/containers/libpod/libpod/runtime_pod_infra_linux.go index 24651099fb..6a27c2800c 100644 --- a/vendor/github.com/containers/libpod/libpod/runtime_pod_infra_linux.go +++ b/vendor/github.com/containers/libpod/libpod/runtime_pod_infra_linux.go @@ -9,6 +9,7 @@ import ( "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/image" "github.com/containers/libpod/pkg/rootless" + "github.com/containers/libpod/pkg/util" "github.com/opencontainers/image-spec/specs-go/v1" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" @@ -30,8 +31,8 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, imgID return nil, err } - // Set Pod hostname as Pod name - g.Config.Hostname = p.config.Name + // Set Pod hostname + g.Config.Hostname = p.config.Hostname isRootless := rootless.IsRootless() @@ -98,7 +99,9 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, imgID if isRootless { netmode = "slirp4netns" } - options = append(options, WithNetNS(p.config.InfraContainer.PortBindings, isRootless, netmode, networks)) + // PostConfigureNetNS should not be set since user namespace sharing is not implemented + // and rootless networking no longer supports post configuration setup + options = append(options, WithNetNS(p.config.InfraContainer.PortBindings, false, netmode, networks)) return r.newContainer(ctx, g.Config, options...) } @@ -111,7 +114,7 @@ func (r *Runtime) createInfraContainer(ctx context.Context, p *Pod) (*Container, return nil, define.ErrRuntimeStopped } - newImage, err := r.ImageRuntime().New(ctx, r.config.InfraImage, "", "", nil, nil, image.SigningOptions{}, false, nil) + newImage, err := r.ImageRuntime().New(ctx, r.config.InfraImage, "", "", nil, nil, image.SigningOptions{}, nil, util.PullImageMissing) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/libpod/libpod/runtime_pod_linux.go b/vendor/github.com/containers/libpod/libpod/runtime_pod_linux.go index f38e6e7c1f..704aaf9d0e 100644 --- a/vendor/github.com/containers/libpod/libpod/runtime_pod_linux.go +++ b/vendor/github.com/containers/libpod/libpod/runtime_pod_linux.go @@ -52,6 +52,10 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Po pod.config.Name = name } + if pod.config.Hostname == "" { + pod.config.Hostname = pod.config.Name + } + // Allocate a lock for the pod lock, err := r.lockManager.AllocateLock() if err != nil { @@ -72,7 +76,7 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Po // Check CGroup parent sanity, and set it if it was not set switch r.config.CgroupManager { - case CgroupfsCgroupsManager: + case define.CgroupfsCgroupsManager: if pod.config.CgroupParent == "" { pod.config.CgroupParent = CgroupfsDefaultCgroupParent } else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") { @@ -85,7 +89,7 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Po if pod.config.UsePodCgroup { pod.state.CgroupPath = filepath.Join(pod.config.CgroupParent, pod.ID()) } - case SystemdCgroupsManager: + case define.SystemdCgroupsManager: if pod.config.CgroupParent == "" { if rootless.IsRootless() { pod.config.CgroupParent = SystemdDefaultRootlessCgroupParent @@ -196,11 +200,11 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) // the pod and conmon CGroups with a PID limit to prevent them from // spawning any further processes (particularly cleanup processes) which // would prevent removing the CGroups. - if p.runtime.config.CgroupManager == CgroupfsCgroupsManager { + if p.runtime.config.CgroupManager == define.CgroupfsCgroupsManager { // Get the conmon CGroup conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon") conmonCgroup, err := cgroups.Load(conmonCgroupPath) - if err != nil && err != cgroups.ErrCgroupDeleted { + if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless { removalErr = errors.Wrapf(err, "error retrieving pod %s conmon cgroup %s", p.ID(), conmonCgroupPath) } @@ -247,7 +251,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath) switch p.runtime.config.CgroupManager { - case SystemdCgroupsManager: + case define.SystemdCgroupsManager: if err := deleteSystemdCgroup(p.state.CgroupPath); err != nil { if removalErr == nil { removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID()) @@ -255,14 +259,14 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) logrus.Errorf("Error deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err) } } - case CgroupfsCgroupsManager: + case define.CgroupfsCgroupsManager: // Delete the cgroupfs cgroup // Make sure the conmon cgroup is deleted first // Since the pod is almost gone, don't bother failing // hard - instead, just log errors. conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon") conmonCgroup, err := cgroups.Load(conmonCgroupPath) - if err != nil && err != cgroups.ErrCgroupDeleted { + if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless { if removalErr == nil { removalErr = errors.Wrapf(err, "error retrieving pod %s conmon cgroup", p.ID()) } else { @@ -279,7 +283,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) } } cgroup, err := cgroups.Load(p.state.CgroupPath) - if err != nil && err != cgroups.ErrCgroupDeleted { + if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless { if removalErr == nil { removalErr = errors.Wrapf(err, "error retrieving pod %s cgroup", p.ID()) } else { diff --git a/vendor/github.com/containers/libpod/libpod/runtime_renumber.go b/vendor/github.com/containers/libpod/libpod/runtime_renumber.go index 735ffba34a..9de2556b24 100644 --- a/vendor/github.com/containers/libpod/libpod/runtime_renumber.go +++ b/vendor/github.com/containers/libpod/libpod/runtime_renumber.go @@ -53,6 +53,23 @@ func (r *Runtime) renumberLocks() error { return err } } + allVols, err := r.state.AllVolumes() + if err != nil { + return err + } + for _, vol := range allVols { + lock, err := r.lockManager.AllocateLock() + if err != nil { + return errors.Wrapf(err, "error allocating lock for volume %s", vol.Name()) + } + + vol.config.LockID = lock.ID() + + // Write the new lock ID + if err := r.state.RewriteVolumeConfig(vol, vol.config); err != nil { + return err + } + } r.newSystemEvent(events.Renumber) diff --git a/vendor/github.com/containers/libpod/libpod/runtime_volume.go b/vendor/github.com/containers/libpod/libpod/runtime_volume.go index d05db936b1..a6ab748e51 100644 --- a/vendor/github.com/containers/libpod/libpod/runtime_volume.go +++ b/vendor/github.com/containers/libpod/libpod/runtime_volume.go @@ -6,7 +6,6 @@ import ( "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // Contains the public Runtime API for volumes @@ -36,43 +35,32 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error return nil } } + + v.lock.Lock() + defer v.lock.Unlock() + return r.removeVolume(ctx, v, force) } -// RemoveVolumes removes a slice of volumes or all with a force bool -func (r *Runtime) RemoveVolumes(ctx context.Context, volumes []string, all, force bool) ([]string, error) { - var ( - vols []*Volume - err error - deletedVols []string - ) - if all { - vols, err = r.Volumes() - if err != nil { - return nil, errors.Wrapf(err, "unable to get all volumes") - } - } else { - for _, i := range volumes { - vol, err := r.GetVolume(i) - if err != nil { - return nil, err - } - vols = append(vols, vol) - } +// GetVolume retrieves a volume given its full name. +func (r *Runtime) GetVolume(name string) (*Volume, error) { + r.lock.RLock() + defer r.lock.RUnlock() + + if !r.valid { + return nil, define.ErrRuntimeStopped } - for _, vol := range vols { - if err := r.RemoveVolume(ctx, vol, force); err != nil { - return deletedVols, err - } - logrus.Debugf("removed volume %s", vol.Name()) - deletedVols = append(deletedVols, vol.Name()) + vol, err := r.state.Volume(name) + if err != nil { + return nil, err } - return deletedVols, nil + + return vol, nil } -// GetVolume retrieves a volume given its full name. -func (r *Runtime) GetVolume(name string) (*Volume, error) { +// LookupVolume retrieves a volume by unambigious partial name. +func (r *Runtime) LookupVolume(name string) (*Volume, error) { r.lock.RLock() defer r.lock.RUnlock() @@ -80,7 +68,7 @@ func (r *Runtime) GetVolume(name string) (*Volume, error) { return nil, define.ErrRuntimeStopped } - vol, err := r.state.Volume(name) + vol, err := r.state.LookupVolume(name) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/libpod/libpod/runtime_volume_linux.go b/vendor/github.com/containers/libpod/libpod/runtime_volume_linux.go index ac6fd02c3a..5b05acea42 100644 --- a/vendor/github.com/containers/libpod/libpod/runtime_volume_linux.go +++ b/vendor/github.com/containers/libpod/libpod/runtime_volume_linux.go @@ -7,6 +7,7 @@ import ( "os" "path/filepath" "strings" + "time" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" @@ -27,7 +28,7 @@ func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption) } // newVolume creates a new empty volume -func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) { +func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (_ *Volume, Err error) { volume, err := newVolume(r) if err != nil { return nil, errors.Wrapf(err, "error creating volume") @@ -42,13 +43,31 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) if volume.config.Name == "" { volume.config.Name = stringid.GenerateNonCryptoID() } - // TODO: support for other volume drivers if volume.config.Driver == "" { - volume.config.Driver = "local" + volume.config.Driver = define.VolumeDriverLocal } - // TODO: determine when the scope is global and set it to that - if volume.config.Scope == "" { - volume.config.Scope = "local" + volume.config.CreatedTime = time.Now() + + // Check if volume with given name exists. + exists, err := r.state.HasVolume(volume.config.Name) + if err != nil { + return nil, errors.Wrapf(err, "error checking if volume with name %s exists", volume.config.Name) + } + if exists { + return nil, errors.Wrapf(define.ErrVolumeExists, "volume with name %s already exists", volume.config.Name) + } + + if volume.config.Driver == define.VolumeDriverLocal { + logrus.Debugf("Validating options for local driver") + // Validate options + for key := range volume.config.Options { + switch key { + case "device", "o", "type": + // Do nothing, valid keys + default: + return nil, errors.Wrapf(define.ErrInvalidArg, "invalid mount option %s for driver 'local'", key) + } + } } // Create the mountpoint of this volume @@ -71,6 +90,21 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) } volume.config.MountPoint = fullVolPath + lock, err := r.lockManager.AllocateLock() + if err != nil { + return nil, errors.Wrapf(err, "error allocating lock for new volume") + } + volume.lock = lock + volume.config.LockID = volume.lock.ID() + + defer func() { + if Err != nil { + if err := volume.lock.Free(); err != nil { + logrus.Errorf("Error freeing volume lock after failed creation: %v", err) + } + } + }() + volume.valid = true // Add the volume to state @@ -90,6 +124,11 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error return define.ErrVolumeRemoved } + // Update volume status to pick up a potential removal from state + if err := v.update(); err != nil { + return err + } + deps, err := r.state.VolumeInUse(v) if err != nil { return err @@ -113,6 +152,8 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error return errors.Wrapf(err, "error removing container %s that depends on volume %s", dep, v.Name()) } + logrus.Debugf("Removing container %s (depends on volume %q)", ctr.ID(), v.Name()) + // TODO: do we want to set force here when removing // containers? // I'm inclined to say no, in case someone accidentally @@ -123,6 +164,18 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error } } + // If the volume is still mounted - force unmount it + if err := v.unmount(true); err != nil { + if force { + // If force is set, evict the volume, even if errors + // occur. Otherwise we'll never be able to get rid of + // them. + logrus.Errorf("Error unmounting volume %s: %v", v.Name(), err) + } else { + return errors.Wrapf(err, "error unmounting volume %s", v.Name()) + } + } + // Set volume as invalid so it can no longer be used v.valid = false @@ -131,12 +184,24 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error return errors.Wrapf(err, "error removing volume %s", v.Name()) } - // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes + var removalErr error + + // Free the volume's lock + if err := v.lock.Free(); err != nil { + removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name()) + } + + // Delete the mountpoint path of the volume, that is delete the volume + // from /var/lib/containers/storage/volumes if err := v.teardownStorage(); err != nil { - return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name()) + if removalErr == nil { + removalErr = errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name()) + } else { + logrus.Errorf("error cleaning up volume storage for volume %q: %v", v.Name(), err) + } } defer v.newVolumeEvent(events.Remove) logrus.Debugf("Removed volume %s", v.Name()) - return nil + return removalErr } diff --git a/vendor/github.com/containers/libpod/libpod/state.go b/vendor/github.com/containers/libpod/libpod/state.go index d0ad1a1f87..b246b5eacf 100644 --- a/vendor/github.com/containers/libpod/libpod/state.go +++ b/vendor/github.com/containers/libpod/libpod/state.go @@ -1,15 +1,6 @@ package libpod -// DBConfig is a set of Libpod runtime configuration settings that are saved -// in a State when it is first created, and can subsequently be retrieved. -type DBConfig struct { - LibpodRoot string - LibpodTmp string - StorageRoot string - StorageTmp string - GraphDriver string - VolumePath string -} +import "github.com/containers/libpod/libpod/config" // State is a storage backend for libpod's current state. // A State is only initialized once per instance of libpod. @@ -37,7 +28,7 @@ type State interface { // root and tmp dirs, and c/storage graph driver. // This is not implemented by the in-memory state, as it has no need to // validate runtime configuration. - GetDBConfig() (*DBConfig, error) + GetDBConfig() (*config.DBConfig, error) // ValidateDBConfig validates the config in the given Runtime struct // against paths stored in the configured database. @@ -58,6 +49,9 @@ type State interface { // If the container is not in the set namespace, an error will be // returned. Container(id string) (*Container, error) + // Return a container ID from the database by full or partial ID or full + // name. + LookupContainerID(idOrName string) (string, error) // Return a container from the database by full or partial ID or full // name. // Containers not in the set namespace will be ignored. @@ -98,6 +92,9 @@ type State interface { // returned. AllContainers() ([]*Container, error) + // Return a container config from the database by full ID + GetContainerConfig(id string) (*ContainerConfig, error) + // PLEASE READ FULL DESCRIPTION BEFORE USING. // Rewrite a container's configuration. // This function breaks libpod's normal prohibition on a read-only @@ -115,12 +112,20 @@ type State interface { // answer is this: use this only very sparingly, and only if you really // know what you're doing. RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error - // PLEASE READ THE ABOVE DESCRIPTION BEFORE USING. + // PLEASE READ THE DESCRIPTION FOR RewriteContainerConfig BEFORE USING. // This function is identical to RewriteContainerConfig, save for the // fact that it is used with pods instead. // It is subject to the same conditions as RewriteContainerConfig. // Please do not use this unless you know what you're doing. RewritePodConfig(pod *Pod, newCfg *PodConfig) error + // PLEASE READ THE DESCRIPTION FOR RewriteContainerConfig BEFORE USING. + // This function is identical to RewriteContainerConfig, save for the + // fact that it is used with volumes instead. + // It is subject to the same conditions as RewriteContainerConfig. + // The exception is that volumes do not have IDs, so only volume name + // cannot be altered. + // Please do not use this unless you know what you're doing. + RewriteVolumeConfig(volume *Volume, newCfg *VolumeConfig) error // Accepts full ID of pod. // If the pod given is not in the set namespace, an error will be @@ -182,6 +187,9 @@ type State interface { // Volume accepts full name of volume // If the volume doesn't exist, an error will be returned Volume(volName string) (*Volume, error) + // LookupVolume accepts an unambiguous partial name or full name of a + // volume. Ambiguous names will result in an error. + LookupVolume(name string) (*Volume, error) // HasVolume returns true if volName exists in the state, // otherwise it returns false HasVolume(volName string) (bool, error) @@ -195,6 +203,10 @@ type State interface { // RemoveVolume removes the specified volume. // Only volumes that have no container dependencies can be removed RemoveVolume(volume *Volume) error + // UpdateVolume updates the volume's state from the database. + UpdateVolume(volume *Volume) error + // SaveVolume saves a volume's state to the database. + SaveVolume(volume *Volume) error // AllVolumes returns all the volumes available in the state AllVolumes() ([]*Volume, error) } diff --git a/vendor/github.com/containers/libpod/libpod/stats.go b/vendor/github.com/containers/libpod/libpod/stats.go index 776870bd22..5513abce5d 100644 --- a/vendor/github.com/containers/libpod/libpod/stats.go +++ b/vendor/github.com/containers/libpod/libpod/stats.go @@ -19,6 +19,10 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container stats.ContainerID = c.ID() stats.Name = c.Name() + if c.config.NoCgroups { + return nil, errors.Wrapf(define.ErrNoCgroups, "cannot run top on container %s as it did not create a cgroup", c.ID()) + } + if !c.batched { c.lock.Lock() defer c.lock.Unlock() diff --git a/vendor/github.com/containers/libpod/libpod/storage.go b/vendor/github.com/containers/libpod/libpod/storage.go index 0814672be0..6375d031bc 100644 --- a/vendor/github.com/containers/libpod/libpod/storage.go +++ b/vendor/github.com/containers/libpod/libpod/storage.go @@ -4,8 +4,8 @@ import ( "context" "time" - istorage "github.com/containers/image/storage" - "github.com/containers/image/types" + istorage "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/types" "github.com/containers/libpod/libpod/define" "github.com/containers/storage" "github.com/opencontainers/image-spec/specs-go/v1" diff --git a/vendor/github.com/containers/libpod/libpod/util.go b/vendor/github.com/containers/libpod/libpod/util.go index b60575264a..7bd834e302 100644 --- a/vendor/github.com/containers/libpod/libpod/util.go +++ b/vendor/github.com/containers/libpod/libpod/util.go @@ -3,13 +3,16 @@ package libpod import ( "fmt" "os" + "os/exec" "path/filepath" "sort" "strconv" "strings" "time" + "github.com/containers/libpod/libpod/config" "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/utils" "github.com/fsnotify/fsnotify" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -17,9 +20,7 @@ import ( // Runtime API constants const ( - // DefaultTransport is a prefix that we apply to an image name - // to check docker hub first for the image - DefaultTransport = "docker://" + unknownPackage = "Unknown" ) // FuncTimer helps measure the execution time of a function @@ -69,7 +70,11 @@ func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, e defer watcher.Close() } - timeoutChan := time.After(timeout) + var timeoutChan <-chan time.Time + + if timeout != 0 { + timeoutChan = time.After(timeout) + } for { select { @@ -148,3 +153,53 @@ func JSONDeepCopy(from, to interface{}) error { } return json.Unmarshal(tmp, to) } + +func dpkgVersion(path string) string { + output := unknownPackage + cmd := exec.Command("/usr/bin/dpkg", "-S", path) + if outp, err := cmd.Output(); err == nil { + output = string(outp) + } + return strings.Trim(output, "\n") +} + +func rpmVersion(path string) string { + output := unknownPackage + cmd := exec.Command("/usr/bin/rpm", "-q", "-f", path) + if outp, err := cmd.Output(); err == nil { + output = string(outp) + } + return strings.Trim(output, "\n") +} + +func packageVersion(program string) string { + if out := rpmVersion(program); out != unknownPackage { + return out + } + return dpkgVersion(program) +} + +func programVersion(mountProgram string) (string, error) { + output, err := utils.ExecCmd(mountProgram, "--version") + if err != nil { + return "", err + } + return strings.TrimSuffix(output, "\n"), nil +} + +func DefaultSeccompPath() (string, error) { + _, err := os.Stat(config.SeccompOverridePath) + if err == nil { + return config.SeccompOverridePath, nil + } + if !os.IsNotExist(err) { + return "", errors.Wrapf(err, "can't check if %q exists", config.SeccompOverridePath) + } + if _, err := os.Stat(config.SeccompDefaultPath); err != nil { + if !os.IsNotExist(err) { + return "", errors.Wrapf(err, "can't check if %q exists", config.SeccompDefaultPath) + } + return "", nil + } + return config.SeccompDefaultPath, nil +} diff --git a/vendor/github.com/containers/libpod/libpod/util_linux.go b/vendor/github.com/containers/libpod/libpod/util_linux.go index d5c113dafb..631f6836c6 100644 --- a/vendor/github.com/containers/libpod/libpod/util_linux.go +++ b/vendor/github.com/containers/libpod/libpod/util_linux.go @@ -5,6 +5,7 @@ package libpod import ( "fmt" "strings" + "syscall" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/cgroups" @@ -12,6 +13,7 @@ import ( "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) // systemdSliceFromPath makes a new systemd slice under the given parent with @@ -107,3 +109,14 @@ func LabelVolumePath(path string, shared bool) error { } return nil } + +// Unmount umounts a target directory +func Unmount(mount string) { + if err := unix.Unmount(mount, unix.MNT_DETACH); err != nil { + if err != syscall.EINVAL { + logrus.Warnf("failed to unmount %s : %v", mount, err) + } else { + logrus.Debugf("failed to unmount %s : %v", mount, err) + } + } +} diff --git a/vendor/github.com/containers/libpod/libpod/util_unsupported.go b/vendor/github.com/containers/libpod/libpod/util_unsupported.go index 58b0dfbcd3..9a9a6eeb6b 100644 --- a/vendor/github.com/containers/libpod/libpod/util_unsupported.go +++ b/vendor/github.com/containers/libpod/libpod/util_unsupported.go @@ -28,3 +28,7 @@ func assembleSystemdCgroupName(baseSlice, newSlice string) (string, error) { func LabelVolumePath(path string, shared bool) error { return define.ErrNotImplemented } + +func Unmount(mount string) error { + return define.ErrNotImplemented +} diff --git a/vendor/github.com/containers/libpod/libpod/volume.go b/vendor/github.com/containers/libpod/libpod/volume.go index 9ed2ff0873..c4771bbb81 100644 --- a/vendor/github.com/containers/libpod/libpod/volume.go +++ b/vendor/github.com/containers/libpod/libpod/volume.go @@ -1,27 +1,69 @@ package libpod -// Volume is the type used to create named volumes -// TODO: all volumes should be created using this and the Volume API +import ( + "time" + + "github.com/containers/libpod/libpod/lock" +) + +// Volume is a libpod named volume. +// Named volumes may be shared by multiple containers, and may be created using +// more complex options than normal bind mounts. They may be backed by a mounted +// filesystem on the host. type Volume struct { config *VolumeConfig + state *VolumeState valid bool runtime *Runtime + lock lock.Locker } -// VolumeConfig holds the volume's config information +// VolumeConfig holds the volume's immutable configuration. type VolumeConfig struct { - // Name of the volume + // Name of the volume. Name string `json:"name"` + // ID of the volume's lock. + LockID uint32 `json:"lockID"` + // Labels for the volume. + Labels map[string]string `json:"labels"` + // The volume driver. Empty string or local does not activate a volume + // driver, all other volumes will. + Driver string `json:"volumeDriver"` + // The location the volume is mounted at. + MountPoint string `json:"mountPoint"` + // Time the volume was created. + CreatedTime time.Time `json:"createdAt,omitempty"` + // Options to pass to the volume driver. For the local driver, this is + // a list of mount options. For other drivers, they are passed to the + // volume driver handling the volume. + Options map[string]string `json:"volumeOptions,omitempty"` + // Whether this volume was created for a specific container and will be + // removed with it. + IsCtrSpecific bool `json:"ctrSpecific"` + // UID the volume will be created as. + UID int `json:"uid"` + // GID the volume will be created as. + GID int `json:"gid"` +} - Labels map[string]string `json:"labels"` - MountPoint string `json:"mountPoint"` - Driver string `json:"driver"` - Options map[string]string `json:"options"` - Scope string `json:"scope"` - IsCtrSpecific bool `json:"ctrSpecific"` - UID int `json:"uid"` - GID int `json:"gid"` +// VolumeState holds the volume's mutable state. +// Volumes are not guaranteed to have a state. Only volumes using the Local +// driver that have mount options set will create a state. +type VolumeState struct { + // MountCount is the number of times this volume has been requested to + // be mounted. + // It is incremented on mount() and decremented on unmount(). + // On incrementing from 0, the volume will be mounted on the host. + // On decrementing to 0, the volume will be unmounted on the host. + MountCount uint `json:"mountCount"` + // NeedsCopyUp indicates that the next time the volume is mounted into + // a container, the container will "copy up" the contents of the + // mountpoint into the volume. + // This should only be done once. As such, this is set at container + // create time, then cleared after the copy up is done and never set + // again. + NeedsCopyUp bool `json:"notYetMounted,omitempty"` } // Name retrieves the volume's name @@ -29,6 +71,18 @@ func (v *Volume) Name() string { return v.config.Name } +// Driver retrieves the volume's driver. +func (v *Volume) Driver() string { + return v.config.Driver +} + +// Scope retrieves the volume's scope. +// Libpod does not implement volume scoping, and this is provided solely for +// Docker compatibility. It returns only "local". +func (v *Volume) Scope() string { + return "local" +} + // Labels returns the volume's labels func (v *Volume) Labels() map[string]string { labels := make(map[string]string) @@ -43,29 +97,34 @@ func (v *Volume) MountPoint() string { return v.config.MountPoint } -// Driver returns the volume's driver -func (v *Volume) Driver() string { - return v.config.Driver -} - // Options return the volume's options func (v *Volume) Options() map[string]string { options := make(map[string]string) - for key, value := range v.config.Options { - options[key] = value + for k, v := range v.config.Options { + options[k] = v } - return options } -// Scope returns the scope of the volume -func (v *Volume) Scope() string { - return v.config.Scope -} - // IsCtrSpecific returns whether this volume was created specifically for a // given container. Images with this set to true will be removed when the // container is removed with the Volumes parameter set to true. func (v *Volume) IsCtrSpecific() bool { return v.config.IsCtrSpecific } + +// UID returns the UID the volume will be created as. +func (v *Volume) UID() int { + return v.config.UID +} + +// GID returns the GID the volume will be created as. +func (v *Volume) GID() int { + return v.config.GID +} + +// CreatedTime returns the time the volume was created at. It was not tracked +// for some time, so older volumes may not contain one. +func (v *Volume) CreatedTime() time.Time { + return v.config.CreatedTime +} diff --git a/vendor/github.com/containers/libpod/libpod/volume_inspect.go b/vendor/github.com/containers/libpod/libpod/volume_inspect.go new file mode 100644 index 0000000000..c333b89618 --- /dev/null +++ b/vendor/github.com/containers/libpod/libpod/volume_inspect.go @@ -0,0 +1,73 @@ +package libpod + +import ( + "time" + + "github.com/containers/libpod/libpod/define" +) + +// InspectVolumeData is the output of Inspect() on a volume. It is matched to +// the format of 'docker volume inspect'. +type InspectVolumeData struct { + // Name is the name of the volume. + Name string `json:"Name"` + // Driver is the driver used to create the volume. + // This will be properly implemented in a future version. + Driver string `json:"Driver"` + // Mountpoint is the path on the host where the volume is mounted. + Mountpoint string `json:"Mountpoint"` + // CreatedAt is the date and time the volume was created at. This is not + // stored for older Libpod volumes; if so, it will be omitted. + CreatedAt time.Time `json:"CreatedAt,omitempty"` + // Status is presently unused and provided only for Docker compatibility. + // In the future it will be used to return information on the volume's + // current state. + Status map[string]string `json:"Status,omitempty"` + // Labels includes the volume's configured labels, key:value pairs that + // can be passed during volume creation to provide information for third + // party tools. + Labels map[string]string `json:"Labels"` + // Scope is unused and provided solely for Docker compatibility. It is + // unconditionally set to "local". + Scope string `json:"Scope"` + // Options is a set of options that were used when creating the volume. + // It is presently not used. + Options map[string]string `json:"Options"` + // UID is the UID that the volume was created with. + UID int `json:"UID,omitempty"` + // GID is the GID that the volume was created with. + GID int `json:"GID,omitempty"` + // ContainerSpecific indicates that the volume was created as part of a + // specific container, and will be removed when that container is + // removed. + ContainerSpecific bool `json:"ContainerSpecific,omitempty"` +} + +// Inspect provides detailed information about the configuration of the given +// volume. +func (v *Volume) Inspect() (*InspectVolumeData, error) { + if !v.valid { + return nil, define.ErrVolumeRemoved + } + + data := new(InspectVolumeData) + + data.Name = v.config.Name + data.Driver = v.config.Driver + data.Mountpoint = v.config.MountPoint + data.CreatedAt = v.config.CreatedTime + data.Labels = make(map[string]string) + for k, v := range v.config.Labels { + data.Labels[k] = v + } + data.Scope = v.Scope() + data.Options = make(map[string]string) + for k, v := range v.config.Options { + data.Options[k] = v + } + data.UID = v.config.UID + data.GID = v.config.GID + data.ContainerSpecific = v.config.IsCtrSpecific + + return data, nil +} diff --git a/vendor/github.com/containers/libpod/libpod/volume_internal.go b/vendor/github.com/containers/libpod/libpod/volume_internal.go index 35f0ca19d6..42b935e7ca 100644 --- a/vendor/github.com/containers/libpod/libpod/volume_internal.go +++ b/vendor/github.com/containers/libpod/libpod/volume_internal.go @@ -3,15 +3,19 @@ package libpod import ( "os" "path/filepath" + + "github.com/containers/libpod/libpod/define" ) // Creates a new volume func newVolume(runtime *Runtime) (*Volume, error) { volume := new(Volume) volume.config = new(VolumeConfig) + volume.state = new(VolumeState) volume.runtime = runtime volume.config.Labels = make(map[string]string) volume.config.Options = make(map[string]string) + volume.state.NeedsCopyUp = true return volume, nil } @@ -20,3 +24,25 @@ func newVolume(runtime *Runtime) (*Volume, error) { func (v *Volume) teardownStorage() error { return os.RemoveAll(filepath.Join(v.runtime.config.VolumePath, v.Name())) } + +// Volumes with options set, or a filesystem type, or a device to mount need to +// be mounted and unmounted. +func (v *Volume) needsMount() bool { + return len(v.config.Options) > 0 && v.config.Driver == define.VolumeDriverLocal +} + +// update() updates the volume state from the DB. +func (v *Volume) update() error { + if err := v.runtime.state.UpdateVolume(v); err != nil { + return err + } + if !v.valid { + return define.ErrVolumeRemoved + } + return nil +} + +// save() saves the volume state to the DB +func (v *Volume) save() error { + return v.runtime.state.SaveVolume(v) +} diff --git a/vendor/github.com/containers/libpod/libpod/volume_internal_linux.go b/vendor/github.com/containers/libpod/libpod/volume_internal_linux.go new file mode 100644 index 0000000000..70eccbecbd --- /dev/null +++ b/vendor/github.com/containers/libpod/libpod/volume_internal_linux.go @@ -0,0 +1,147 @@ +// +build linux + +package libpod + +import ( + "os/exec" + "strings" + + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/pkg/rootless" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// mount mounts the volume if necessary. +// A mount is necessary if a volume has any options set. +// If a mount is necessary, v.state.MountCount will be incremented. +// If it was 0 when the increment occurred, the volume will be mounted on the +// host. Otherwise, we assume it is already mounted. +// Must be done while the volume is locked. +// Is a no-op on volumes that do not require a mount (as defined by +// volumeNeedsMount()) +func (v *Volume) mount() error { + if !v.needsMount() { + return nil + } + + // We cannot mount volumes as rootless. + if rootless.IsRootless() { + return errors.Wrapf(define.ErrRootless, "cannot mount volumes without root privileges") + } + + // Update the volume from the DB to get an accurate mount counter. + if err := v.update(); err != nil { + return err + } + + // If the count is non-zero, the volume is already mounted. + // Nothing to do. + if v.state.MountCount > 0 { + v.state.MountCount = v.state.MountCount + 1 + logrus.Debugf("Volume %s mount count now at %d", v.Name(), v.state.MountCount) + return v.save() + } + + volDevice := v.config.Options["device"] + volType := v.config.Options["type"] + volOptions := v.config.Options["o"] + + // Some filesystems (tmpfs) don't have a device, but we still need to + // give the kernel something. + if volDevice == "" && volType != "" { + volDevice = volType + } + + // We need to use the actual mount command. + // Convincing unix.Mount to use the same semantics as the mount command + // itself seems prohibitively difficult. + // TODO: might want to cache this path in the runtime? + mountPath, err := exec.LookPath("mount") + if err != nil { + return errors.Wrapf(err, "error locating 'mount' binary") + } + mountArgs := []string{} + if volOptions != "" { + mountArgs = append(mountArgs, "-o", volOptions) + } + if volType != "" { + mountArgs = append(mountArgs, "-t", volType) + } + mountArgs = append(mountArgs, volDevice, v.config.MountPoint) + mountCmd := exec.Command(mountPath, mountArgs...) + + logrus.Debugf("Running mount command: %s %s", mountPath, strings.Join(mountArgs, " ")) + if output, err := mountCmd.CombinedOutput(); err != nil { + logrus.Debugf("Mount failed with %v", err) + return errors.Wrapf(errors.Errorf(string(output)), "error mounting volume %s", v.Name()) + } + + logrus.Debugf("Mounted volume %s", v.Name()) + + // Increment the mount counter + v.state.MountCount = v.state.MountCount + 1 + logrus.Debugf("Volume %s mount count now at %d", v.Name(), v.state.MountCount) + return v.save() +} + +// unmount unmounts the volume if necessary. +// Unmounting a volume that is not mounted is a no-op. +// Unmounting a volume that does not require a mount is a no-op. +// The volume must be locked for this to occur. +// The mount counter will be decremented if non-zero. If the counter reaches 0, +// the volume will really be unmounted, as no further containers are using the +// volume. +// If force is set, the volume will be unmounted regardless of mount counter. +func (v *Volume) unmount(force bool) error { + if !v.needsMount() { + return nil + } + + // Update the volume from the DB to get an accurate mount counter. + if err := v.update(); err != nil { + return err + } + + if v.state.MountCount == 0 { + logrus.Debugf("Volume %s already unmounted", v.Name()) + return nil + } + + // We cannot unmount volumes as rootless. + if rootless.IsRootless() { + // If force is set, just clear the counter and bail without + // error, so we can remove volumes from the state if they are in + // an awkward configuration. + if force { + logrus.Errorf("Volume %s is mounted despite being rootless - state is not sane", v.Name()) + v.state.MountCount = 0 + return v.save() + } + + return errors.Wrapf(define.ErrRootless, "cannot mount or unmount volumes without root privileges") + } + + if !force { + v.state.MountCount = v.state.MountCount - 1 + } else { + v.state.MountCount = 0 + } + + logrus.Debugf("Volume %s mount count now at %d", v.Name(), v.state.MountCount) + + if v.state.MountCount == 0 { + // Unmount the volume + if err := unix.Unmount(v.config.MountPoint, unix.MNT_DETACH); err != nil { + if err == unix.EINVAL { + // Ignore EINVAL - the mount no longer exists. + return nil + } + return errors.Wrapf(err, "error unmounting volume %s", v.Name()) + } + logrus.Debugf("Unmounted volume %s", v.Name()) + } + + return v.save() +} diff --git a/vendor/github.com/containers/libpod/libpod/volume_internal_unsupported.go b/vendor/github.com/containers/libpod/libpod/volume_internal_unsupported.go new file mode 100644 index 0000000000..74e24cfdad --- /dev/null +++ b/vendor/github.com/containers/libpod/libpod/volume_internal_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux + +package libpod + +import ( + "github.com/containers/libpod/libpod/define" +) + +func (v *Volume) mount() error { + return define.ErrNotImplemented +} + +func (v *Volume) unmount(force bool) error { + return define.ErrNotImplemented +} diff --git a/vendor/github.com/containers/libpod/pkg/annotations/annotations.go b/vendor/github.com/containers/libpod/pkg/annotations/annotations.go index fe2591a0c7..19b1029d1e 100644 --- a/vendor/github.com/containers/libpod/pkg/annotations/annotations.go +++ b/vendor/github.com/containers/libpod/pkg/annotations/annotations.go @@ -102,6 +102,10 @@ const ( // CNIResult is the JSON string representation of the Result from CNI CNIResult = "io.kubernetes.cri-o.CNIResult" + + // ContainerManager is the annotation key for indicating the creator and + // manager of the container + ContainerManager = "io.container.manager" ) // ContainerType values @@ -112,3 +116,7 @@ const ( // ContainerTypeContainer represents a container running within a pod ContainerTypeContainer = "container" ) + +// ContainerManagerLibpod indicates that libpod created and manages the +// container +const ContainerManagerLibpod = "libpod" diff --git a/vendor/github.com/containers/libpod/pkg/cgroups/cgroups.go b/vendor/github.com/containers/libpod/pkg/cgroups/cgroups.go index 0857188558..9711e81200 100644 --- a/vendor/github.com/containers/libpod/pkg/cgroups/cgroups.go +++ b/vendor/github.com/containers/libpod/pkg/cgroups/cgroups.go @@ -10,6 +10,7 @@ import ( "strconv" "strings" + "github.com/containers/libpod/pkg/rootless" systemdDbus "github.com/coreos/go-systemd/dbus" "github.com/godbus/dbus" spec "github.com/opencontainers/runtime-spec/specs-go" @@ -19,7 +20,9 @@ import ( var ( // ErrCgroupDeleted means the cgroup was deleted - ErrCgroupDeleted = errors.New("cgroups: cgroup deleted") + ErrCgroupDeleted = errors.New("cgroup deleted") + // ErrCgroupV1Rootless means the cgroup v1 were attempted to be used in rootless environmen + ErrCgroupV1Rootless = errors.New("no support for CGroups V1 in rootless environments") ) // CgroupControl controls a cgroup hierarchy @@ -339,6 +342,9 @@ func Load(path string) (*CgroupControl, error) { p := control.getCgroupv1Path(name) if _, err := os.Stat(p); err != nil { if os.IsNotExist(err) { + if rootless.IsRootless() { + return nil, ErrCgroupV1Rootless + } // compatible with the error code // used by containerd/cgroups return nil, ErrCgroupDeleted diff --git a/vendor/github.com/containers/libpod/pkg/cgroups/cgroups_supported.go b/vendor/github.com/containers/libpod/pkg/cgroups/cgroups_supported.go index fcd44dfc80..2a36777d43 100644 --- a/vendor/github.com/containers/libpod/pkg/cgroups/cgroups_supported.go +++ b/vendor/github.com/containers/libpod/pkg/cgroups/cgroups_supported.go @@ -3,8 +3,15 @@ package cgroups import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strings" "sync" "syscall" + + "github.com/pkg/errors" ) var ( @@ -25,3 +32,58 @@ func IsCgroup2UnifiedMode() (bool, error) { }) return isUnified, isUnifiedErr } + +// UserOwnsCurrentSystemdCgroup checks whether the current EUID owns the +// current cgroup. +func UserOwnsCurrentSystemdCgroup() (bool, error) { + uid := os.Geteuid() + + cgroup2, err := IsCgroup2UnifiedMode() + if err != nil { + return false, err + } + + f, err := os.Open("/proc/self/cgroup") + if err != nil { + return false, errors.Wrapf(err, "open file /proc/self/cgroup") + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.SplitN(line, ":", 3) + + if len(parts) < 3 { + continue + } + + var cgroupPath string + + if cgroup2 { + cgroupPath = filepath.Join(cgroupRoot, parts[2]) + } else { + if parts[1] != "name=systemd" { + continue + } + cgroupPath = filepath.Join(cgroupRoot, "systemd", parts[2]) + } + + st, err := os.Stat(cgroupPath) + if err != nil { + return false, err + } + s := st.Sys() + if s == nil { + return false, fmt.Errorf("error stat cgroup path %s", cgroupPath) + } + + if int(s.(*syscall.Stat_t).Uid) != uid { + return false, nil + } + } + if err := scanner.Err(); err != nil { + return false, errors.Wrapf(err, "parsing file /proc/self/cgroup") + } + return true, nil +} diff --git a/vendor/github.com/containers/libpod/pkg/cgroups/cgroups_unsupported.go b/vendor/github.com/containers/libpod/pkg/cgroups/cgroups_unsupported.go index 9dc196e42a..cd140fbf3c 100644 --- a/vendor/github.com/containers/libpod/pkg/cgroups/cgroups_unsupported.go +++ b/vendor/github.com/containers/libpod/pkg/cgroups/cgroups_unsupported.go @@ -6,3 +6,9 @@ package cgroups func IsCgroup2UnifiedMode() (bool, error) { return false, nil } + +// UserOwnsCurrentSystemdCgroup checks whether the current EUID owns the +// current cgroup. +func UserOwnsCurrentSystemdCgroup() (bool, error) { + return false, nil +} diff --git a/vendor/github.com/containers/libpod/pkg/firewall/common.go b/vendor/github.com/containers/libpod/pkg/firewall/common.go deleted file mode 100644 index a65d4f03d5..0000000000 --- a/vendor/github.com/containers/libpod/pkg/firewall/common.go +++ /dev/null @@ -1,55 +0,0 @@ -package firewall - -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "net" - - "github.com/containernetworking/cni/pkg/types/current" -) - -// FirewallNetConf represents the firewall configuration. -// Nolint applied for firewall.Firewall... name duplication notice. -//nolint -type FirewallNetConf struct { - //types.NetConf - - // IptablesAdminChainName is an optional name to use instead of the default - // admin rules override chain name that includes the interface name. - IptablesAdminChainName string - - // FirewalldZone is an optional firewalld zone to place the interface into. If - // the firewalld backend is used but the zone is not given, it defaults - // to 'trusted' - FirewalldZone string - - PrevResult *current.Result -} - -// FirewallBackend is an interface to the system firewall, allowing addition and -// removal of firewall rules. -// Nolint applied for firewall.Firewall... name duplication notice. -//nolint -type FirewallBackend interface { - Add(*FirewallNetConf) error - Del(*FirewallNetConf) error -} - -func ipString(ip net.IPNet) string { - if ip.IP.To4() == nil { - return ip.IP.String() + "/128" - } - return ip.IP.String() + "/32" -} diff --git a/vendor/github.com/containers/libpod/pkg/firewall/firewall_linux.go b/vendor/github.com/containers/libpod/pkg/firewall/firewall_linux.go deleted file mode 100644 index 4ac45427ba..0000000000 --- a/vendor/github.com/containers/libpod/pkg/firewall/firewall_linux.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build linux - -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package firewall - -import ( - "fmt" -) - -// GetBackend retrieves a firewall backend for adding or removing firewall rules -// on the system. -// Valid backend names are firewalld, iptables, and none. -// If the empty string is given, a firewalld backend will be returned if -// firewalld is running, and an iptables backend will be returned otherwise. -func GetBackend(backend string) (FirewallBackend, error) { - switch backend { - case "firewalld": - return newFirewalldBackend() - case "iptables": - return newIptablesBackend() - case "none": - return newNoneBackend() - case "": - // Default to firewalld if it's running - if isFirewalldRunning() { - return newFirewalldBackend() - } - - // Otherwise iptables - return newIptablesBackend() - default: - return nil, fmt.Errorf("unrecognized firewall backend %q", backend) - } -} diff --git a/vendor/github.com/containers/libpod/pkg/firewall/firewall_none.go b/vendor/github.com/containers/libpod/pkg/firewall/firewall_none.go deleted file mode 100644 index 9add24842c..0000000000 --- a/vendor/github.com/containers/libpod/pkg/firewall/firewall_none.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package firewall - -import ( - "fmt" -) - -// FirewallNone is a firewall backend for environments where manipulating the -// system firewall is unsupported (for example, when running without root). -// Nolint applied to avoid firewall.FirewallNone name duplication notes. -//nolint -type FirewallNone struct{} - -func newNoneBackend() (FirewallBackend, error) { - return &FirewallNone{}, nil -} - -// Add adds a rule to the system firewall. -// No action is taken and an error is unconditionally returned as this backend -// does not support manipulating the firewall. -func (f *FirewallNone) Add(conf *FirewallNetConf) error { - return fmt.Errorf("cannot modify system firewall rules") -} - -// Del deletes a rule from the system firewall. -// No action is taken and an error is unconditionally returned as this backend -// does not support manipulating the firewall. -func (f *FirewallNone) Del(conf *FirewallNetConf) error { - return fmt.Errorf("cannot modify system firewall rules") -} diff --git a/vendor/github.com/containers/libpod/pkg/firewall/firewall_unsupported.go b/vendor/github.com/containers/libpod/pkg/firewall/firewall_unsupported.go deleted file mode 100644 index 24c07a8a9c..0000000000 --- a/vendor/github.com/containers/libpod/pkg/firewall/firewall_unsupported.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !linux - -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package firewall - -import ( - "fmt" -) - -// GetBackend retrieves a firewall backend for adding or removing firewall rules -// on the system. -func GetBackend(backend string) (FirewallBackend, error) { - return nil, fmt.Errorf("firewall backends are not presently supported on this OS") -} diff --git a/vendor/github.com/containers/libpod/pkg/firewall/firewalld.go b/vendor/github.com/containers/libpod/pkg/firewall/firewalld.go deleted file mode 100644 index 15e845cb73..0000000000 --- a/vendor/github.com/containers/libpod/pkg/firewall/firewalld.go +++ /dev/null @@ -1,122 +0,0 @@ -// +build linux - -// Copyright 2018 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package firewall - -import ( - "fmt" - "github.com/sirupsen/logrus" - "strings" - - "github.com/godbus/dbus" -) - -const ( - dbusName = "org.freedesktop.DBus" - dbusPath = "/org/freedesktop/DBus" - dbusGetNameOwnerMethod = "GetNameOwner" - - firewalldName = "org.fedoraproject.FirewallD1" - firewalldPath = "/org/fedoraproject/FirewallD1" - firewalldZoneInterface = "org.fedoraproject.FirewallD1.zone" - firewalldAddSourceMethod = "addSource" - firewalldRemoveSourceMethod = "removeSource" - - errZoneAlreadySet = "ZONE_ALREADY_SET" -) - -// Only used for testcases to override the D-Bus connection -var testConn *dbus.Conn - -type fwdBackend struct { - conn *dbus.Conn -} - -// fwdBackend implements the FirewallBackend interface -var _ FirewallBackend = &fwdBackend{} - -func getConn() (*dbus.Conn, error) { - if testConn != nil { - return testConn, nil - } - return dbus.SystemBus() -} - -// isFirewalldRunning checks whether firewalld is running. -func isFirewalldRunning() bool { - conn, err := getConn() - if err != nil { - return false - } - - dbusObj := conn.Object(dbusName, dbusPath) - var res string - if err := dbusObj.Call(dbusName+"."+dbusGetNameOwnerMethod, 0, firewalldName).Store(&res); err != nil { - return false - } - - return true -} - -func newFirewalldBackend() (FirewallBackend, error) { - conn, err := getConn() - if err != nil { - return nil, err - } - - backend := &fwdBackend{ - conn: conn, - } - return backend, nil -} - -func getFirewalldZone(conf *FirewallNetConf) string { - if conf.FirewalldZone != "" { - return conf.FirewalldZone - } - - return "trusted" -} - -func (fb *fwdBackend) Add(conf *FirewallNetConf) error { - zone := getFirewalldZone(conf) - - for _, ip := range conf.PrevResult.IPs { - ipStr := ipString(ip.Address) - // Add a firewalld rule which assigns the given source IP to the given zone - firewalldObj := fb.conn.Object(firewalldName, firewalldPath) - var res string - if err := firewalldObj.Call(firewalldZoneInterface+"."+firewalldAddSourceMethod, 0, zone, ipStr).Store(&res); err != nil { - if !strings.Contains(err.Error(), errZoneAlreadySet) { - return fmt.Errorf("failed to add the address %v to %v zone: %v", ipStr, zone, err) - } - } - } - return nil -} - -func (fb *fwdBackend) Del(conf *FirewallNetConf) error { - for _, ip := range conf.PrevResult.IPs { - ipStr := ipString(ip.Address) - // Remove firewalld rules which assigned the given source IP to the given zone - firewalldObj := fb.conn.Object(firewalldName, firewalldPath) - var res string - if err := firewalldObj.Call(firewalldZoneInterface+"."+firewalldRemoveSourceMethod, 0, getFirewalldZone(conf), ipStr).Store(&res); err != nil { - logrus.Errorf("unable to store firewallobj") - } - } - return nil -} diff --git a/vendor/github.com/containers/libpod/pkg/firewall/iptables.go b/vendor/github.com/containers/libpod/pkg/firewall/iptables.go deleted file mode 100644 index 169ddc1d74..0000000000 --- a/vendor/github.com/containers/libpod/pkg/firewall/iptables.go +++ /dev/null @@ -1,195 +0,0 @@ -// +build linux - -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This is a "meta-plugin". It reads in its own netconf, it does not create -// any network interface but just changes the network sysctl. - -package firewall - -import ( - "fmt" - "github.com/sirupsen/logrus" - "net" - - "github.com/coreos/go-iptables/iptables" -) - -func getPrivChainRules(ip string) [][]string { - var rules [][]string - rules = append(rules, []string{"-d", ip, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"}) - rules = append(rules, []string{"-s", ip, "-j", "ACCEPT"}) - return rules -} - -func ensureChain(ipt *iptables.IPTables, table, chain string) error { - chains, err := ipt.ListChains(table) - if err != nil { - return fmt.Errorf("failed to list iptables chains: %v", err) - } - for _, ch := range chains { - if ch == chain { - return nil - } - } - - return ipt.NewChain(table, chain) -} - -func generateFilterRule(privChainName string) []string { - return []string{"-m", "comment", "--comment", "CNI firewall plugin rules", "-j", privChainName} -} - -func cleanupRules(ipt *iptables.IPTables, privChainName string, rules [][]string) { - for _, rule := range rules { - if err := ipt.Delete("filter", privChainName, rule...); err != nil { - logrus.Errorf("failed to delete iptables rule %s", privChainName) - } - } -} - -func ensureFirstChainRule(ipt *iptables.IPTables, chain string, rule []string) error { - exists, err := ipt.Exists("filter", chain, rule...) - if !exists && err == nil { - err = ipt.Insert("filter", chain, 1, rule...) - } - return err -} - -func (ib *iptablesBackend) setupChains(ipt *iptables.IPTables) error { - privRule := generateFilterRule(ib.privChainName) - adminRule := generateFilterRule(ib.adminChainName) - - // Ensure our private chains exist - if err := ensureChain(ipt, "filter", ib.privChainName); err != nil { - return err - } - if err := ensureChain(ipt, "filter", ib.adminChainName); err != nil { - return err - } - - // Ensure our filter rule exists in the forward chain - if err := ensureFirstChainRule(ipt, "FORWARD", privRule); err != nil { - return err - } - - // Ensure our admin override chain rule exists in our private chain - if err := ensureFirstChainRule(ipt, ib.privChainName, adminRule); err != nil { - return err - } - - return nil -} - -func protoForIP(ip net.IPNet) iptables.Protocol { - if ip.IP.To4() != nil { - return iptables.ProtocolIPv4 - } - return iptables.ProtocolIPv6 -} - -func (ib *iptablesBackend) addRules(conf *FirewallNetConf, ipt *iptables.IPTables, proto iptables.Protocol) error { - rules := make([][]string, 0) - for _, ip := range conf.PrevResult.IPs { - if protoForIP(ip.Address) == proto { - rules = append(rules, getPrivChainRules(ipString(ip.Address))...) - } - } - - if len(rules) > 0 { - if err := ib.setupChains(ipt); err != nil { - return err - } - - // Clean up on any errors - var err error - defer func() { - if err != nil { - cleanupRules(ipt, ib.privChainName, rules) - } - }() - - for _, rule := range rules { - err = ipt.AppendUnique("filter", ib.privChainName, rule...) - if err != nil { - return err - } - } - } - - return nil -} - -func (ib *iptablesBackend) delRules(conf *FirewallNetConf, ipt *iptables.IPTables, proto iptables.Protocol) error { - rules := make([][]string, 0) - for _, ip := range conf.PrevResult.IPs { - if protoForIP(ip.Address) == proto { - rules = append(rules, getPrivChainRules(ipString(ip.Address))...) - } - } - - if len(rules) > 0 { - cleanupRules(ipt, ib.privChainName, rules) - } - - return nil -} - -type iptablesBackend struct { - protos map[iptables.Protocol]*iptables.IPTables - privChainName string - adminChainName string -} - -// iptablesBackend implements the FirewallBackend interface -var _ FirewallBackend = &iptablesBackend{} - -func newIptablesBackend() (FirewallBackend, error) { - adminChainName := "CNI-ADMIN" - - backend := &iptablesBackend{ - privChainName: "CNI-FORWARD", - adminChainName: adminChainName, - protos: make(map[iptables.Protocol]*iptables.IPTables), - } - - for _, proto := range []iptables.Protocol{iptables.ProtocolIPv4, iptables.ProtocolIPv6} { - ipt, err := iptables.NewWithProtocol(proto) - if err != nil { - return nil, fmt.Errorf("could not initialize iptables protocol %v: %v", proto, err) - } - backend.protos[proto] = ipt - } - - return backend, nil -} - -func (ib *iptablesBackend) Add(conf *FirewallNetConf) error { - for proto, ipt := range ib.protos { - if err := ib.addRules(conf, ipt, proto); err != nil { - return err - } - } - return nil -} - -func (ib *iptablesBackend) Del(conf *FirewallNetConf) error { - for proto, ipt := range ib.protos { - if err := ib.delRules(conf, ipt, proto); err != nil { - logrus.Errorf("failed to delete iptables backend rule %s", conf.IptablesAdminChainName) - } - } - return nil -} diff --git a/vendor/github.com/containers/libpod/pkg/lookup/lookup.go b/vendor/github.com/containers/libpod/pkg/lookup/lookup.go index 70b97144f3..a249dd753c 100644 --- a/vendor/github.com/containers/libpod/pkg/lookup/lookup.go +++ b/vendor/github.com/containers/libpod/pkg/lookup/lookup.go @@ -29,17 +29,30 @@ func GetUserGroupInfo(containerMount, containerUser string, override *Overrides) defaultExecUser *user.ExecUser err error ) - passwdPath := etcpasswd - groupPath := etcgroup if override != nil { // Check for an override /etc/passwd path if override.ContainerEtcPasswdPath != "" { - passwdPath = override.ContainerEtcPasswdPath + passwdDest = override.ContainerEtcPasswdPath } // Check for an override for /etc/group path if override.ContainerEtcGroupPath != "" { - groupPath = override.ContainerEtcGroupPath + groupDest = override.ContainerEtcGroupPath + } + } + + if passwdDest == "" { + // Make sure the /etc/passwd destination is not a symlink to something naughty + if passwdDest, err = securejoin.SecureJoin(containerMount, etcpasswd); err != nil { + logrus.Debug(err) + return nil, err + } + } + if groupDest == "" { + // Make sure the /etc/group destination is not a symlink to something naughty + if groupDest, err = securejoin.SecureJoin(containerMount, etcgroup); err != nil { + logrus.Debug(err) + return nil, err } } @@ -56,15 +69,6 @@ func GetUserGroupInfo(containerMount, containerUser string, override *Overrides) } - // Make sure the /etc/group and /etc/passwd destinations are not a symlink to something naughty - if passwdDest, err = securejoin.SecureJoin(containerMount, passwdPath); err != nil { - logrus.Debug(err) - return nil, err - } - if groupDest, err = securejoin.SecureJoin(containerMount, groupPath); err != nil { - logrus.Debug(err) - return nil, err - } return user.GetExecUserPath(containerUser, defaultExecUser, passwdDest, groupDest) } diff --git a/vendor/github.com/containers/libpod/pkg/netns/netns_linux.go b/vendor/github.com/containers/libpod/pkg/netns/netns_linux.go index 1d6fb873c5..e765bd46fd 100644 --- a/vendor/github.com/containers/libpod/pkg/netns/netns_linux.go +++ b/vendor/github.com/containers/libpod/pkg/netns/netns_linux.go @@ -23,23 +23,42 @@ import ( "fmt" "os" "path" + "path/filepath" "runtime" "strings" "sync" "github.com/containernetworking/plugins/pkg/ns" + "github.com/containers/libpod/pkg/rootless" + "github.com/containers/libpod/pkg/util" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) -const nsRunDir = "/var/run/netns" +// get NSRunDir returns the dir of where to create the netNS. When running +// rootless, it needs to be at a location writable by user. +func getNSRunDir() (string, error) { + if rootless.IsRootless() { + rootlessDir, err := util.GetRuntimeDir() + if err != nil { + return "", err + } + return filepath.Join(rootlessDir, "netns"), nil + } + return "/var/run/netns", nil +} // NewNS creates a new persistent (bind-mounted) network namespace and returns // an object representing that namespace, without switching to it. func NewNS() (ns.NetNS, error) { + nsRunDir, err := getNSRunDir() + if err != nil { + return nil, err + } + b := make([]byte, 16) - _, err := rand.Reader.Read(b) + _, err = rand.Reader.Read(b) if err != nil { return nil, fmt.Errorf("failed to generate random netns name: %v", err) } @@ -107,9 +126,12 @@ func NewNS() (ns.NetNS, error) { // Don't unlock. By not unlocking, golang will kill the OS thread when the // goroutine is done (for go1.10+) + threadNsPath := getCurrentThreadNetNSPath() + var origNS ns.NetNS - origNS, err = ns.GetNS(getCurrentThreadNetNSPath()) + origNS, err = ns.GetNS(threadNsPath) if err != nil { + logrus.Warnf("cannot open current network namespace %s: %q", threadNsPath, err) return } defer func() { @@ -121,20 +143,27 @@ func NewNS() (ns.NetNS, error) { // create a new netns on the current thread err = unix.Unshare(unix.CLONE_NEWNET) if err != nil { + logrus.Warnf("cannot create a new network namespace: %q", err) return } // Put this thread back to the orig ns, since it might get reused (pre go1.10) defer func() { if err := origNS.Set(); err != nil { - logrus.Errorf("unable to set namespace: %q", err) + if rootless.IsRootless() && strings.Contains(err.Error(), "operation not permitted") { + // When running in rootless mode it will fail to re-join + // the network namespace owned by root on the host. + return + } + logrus.Warnf("unable to reset namespace: %q", err) } }() // bind mount the netns from the current thread (from /proc) onto the // mount point. This causes the namespace to persist, even when there - // are no threads in the ns. - err = unix.Mount(getCurrentThreadNetNSPath(), nsPath, "none", unix.MS_BIND, "") + // are no threads in the ns. Make this a shared mount; it needs to be + // back-propogated to the host + err = unix.Mount(threadNsPath, nsPath, "none", unix.MS_BIND|unix.MS_SHARED|unix.MS_REC, "") if err != nil { err = fmt.Errorf("failed to bind mount ns at %s: %v", nsPath, err) } @@ -150,6 +179,11 @@ func NewNS() (ns.NetNS, error) { // UnmountNS unmounts the NS held by the netns object func UnmountNS(ns ns.NetNS) error { + nsRunDir, err := getNSRunDir() + if err != nil { + return err + } + nsPath := ns.Path() // Only unmount if it's been bind-mounted (don't touch namespaces in /proc...) if strings.HasPrefix(nsPath, nsRunDir) { diff --git a/vendor/github.com/containers/libpod/pkg/registries/registries.go b/vendor/github.com/containers/libpod/pkg/registries/registries.go index de63dcbf11..9643c947fb 100644 --- a/vendor/github.com/containers/libpod/pkg/registries/registries.go +++ b/vendor/github.com/containers/libpod/pkg/registries/registries.go @@ -5,8 +5,8 @@ import ( "path/filepath" "strings" - "github.com/containers/image/pkg/sysregistriesv2" - "github.com/containers/image/types" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" "github.com/containers/libpod/pkg/rootless" "github.com/docker/distribution/reference" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/libpod/pkg/rootless/rootless.go b/vendor/github.com/containers/libpod/pkg/rootless/rootless.go new file mode 100644 index 0000000000..7e9fe9db6c --- /dev/null +++ b/vendor/github.com/containers/libpod/pkg/rootless/rootless.go @@ -0,0 +1,45 @@ +package rootless + +import ( + "os" + + "github.com/containers/storage" + "github.com/pkg/errors" +) + +func TryJoinPauseProcess(pausePidPath string) (bool, int, error) { + if _, err := os.Stat(pausePidPath); err != nil { + return false, -1, nil + } + + became, ret, err := TryJoinFromFilePaths("", false, []string{pausePidPath}) + if err == nil { + return became, ret, err + } + + // It could not join the pause process, let's lock the file before trying to delete it. + pidFileLock, err := storage.GetLockfile(pausePidPath) + if err != nil { + // The file was deleted by another process. + if os.IsNotExist(err) { + return false, -1, nil + } + return false, -1, errors.Wrapf(err, "error acquiring lock on %s", pausePidPath) + } + + pidFileLock.Lock() + defer func() { + if pidFileLock.Locked() { + pidFileLock.Unlock() + } + }() + + // Now the pause PID file is locked. Try to join once again in case it changed while it was not locked. + became, ret, err = TryJoinFromFilePaths("", false, []string{pausePidPath}) + if err != nil { + // It is still failing. We can safely remove it. + os.Remove(pausePidPath) + return false, -1, nil + } + return became, ret, err +} diff --git a/vendor/github.com/containers/libpod/pkg/rootless/rootless_linux.go b/vendor/github.com/containers/libpod/pkg/rootless/rootless_linux.go index 6e48988c50..94c42f7d04 100644 --- a/vendor/github.com/containers/libpod/pkg/rootless/rootless_linux.go +++ b/vendor/github.com/containers/libpod/pkg/rootless/rootless_linux.go @@ -3,26 +3,24 @@ package rootless import ( + "bufio" "fmt" + "io" "io/ioutil" "os" "os/exec" gosignal "os/signal" "os/user" - "path/filepath" "runtime" "strconv" - "strings" "sync" - "syscall" "unsafe" "github.com/containers/libpod/pkg/errorhandling" "github.com/containers/storage/pkg/idtools" - "github.com/docker/docker/pkg/signal" - "github.com/godbus/dbus" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) /* @@ -106,7 +104,7 @@ func tryMappingTool(tool string, pid int, hostID int, mappings []idtools.IDMap) } appendTriplet := func(l []string, a, b, c int) []string { - return append(l, fmt.Sprintf("%d", a), fmt.Sprintf("%d", b), fmt.Sprintf("%d", c)) + return append(l, strconv.Itoa(a), strconv.Itoa(b), strconv.Itoa(c)) } args := []string{path, fmt.Sprintf("%d", pid)} @@ -128,7 +126,7 @@ func tryMappingTool(tool string, pid int, hostID int, mappings []idtools.IDMap) func readUserNs(path string) (string, error) { b := make([]byte, 256) - _, err := syscall.Readlink(path, b) + _, err := unix.Readlink(path, b) if err != nil { return "", err } @@ -141,7 +139,7 @@ func readUserNsFd(fd uintptr) (string, error) { func getParentUserNs(fd uintptr) (uintptr, error) { const nsGetParent = 0xb702 - ret, _, errno := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(nsGetParent), 0) + ret, _, errno := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(nsGetParent), 0) if errno != 0 { return 0, errno } @@ -177,7 +175,7 @@ func getUserNSFirstChild(fd uintptr) (*os.File, error) { for { nextFd, err := getParentUserNs(fd) if err != nil { - if err == syscall.ENOTTY { + if err == unix.ENOTTY { return os.NewFile(fd, "userns child"), nil } return nil, errors.Wrapf(err, "cannot get parent user namespace") @@ -189,14 +187,14 @@ func getUserNSFirstChild(fd uintptr) (*os.File, error) { } if ns == currentNS { - if err := syscall.Close(int(nextFd)); err != nil { + if err := unix.Close(int(nextFd)); err != nil { return nil, err } // Drop O_CLOEXEC for the fd. - _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, fd, syscall.F_SETFD, 0) + _, _, errno := unix.Syscall(unix.SYS_FCNTL, fd, unix.F_SETFD, 0) if errno != 0 { - if err := syscall.Close(int(fd)); err != nil { + if err := unix.Close(int(fd)); err != nil { logrus.Errorf("failed to close file descriptor %d", fd) } return nil, errno @@ -204,99 +202,13 @@ func getUserNSFirstChild(fd uintptr) (*os.File, error) { return os.NewFile(fd, "userns child"), nil } - if err := syscall.Close(int(fd)); err != nil { + if err := unix.Close(int(fd)); err != nil { return nil, err } fd = nextFd } } -// EnableLinger configures the system to not kill the user processes once the session -// terminates -func EnableLinger() (string, error) { - uid := fmt.Sprintf("%d", GetRootlessUID()) - - conn, err := dbus.SystemBus() - if err == nil { - defer func() { - if err := conn.Close(); err != nil { - logrus.Errorf("unable to close dbus connection: %q", err) - } - }() - } - - lingerEnabled := false - - // If we have a D-BUS connection, attempt to read the LINGER property from it. - if conn != nil { - path := dbus.ObjectPath(fmt.Sprintf("/org/freedesktop/login1/user/_%s", uid)) - ret, err := conn.Object("org.freedesktop.login1", path).GetProperty("org.freedesktop.login1.User.Linger") - if err == nil && ret.Value().(bool) { - lingerEnabled = true - } - } - - xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR") - lingerFile := "" - if xdgRuntimeDir != "" && !lingerEnabled { - lingerFile = filepath.Join(xdgRuntimeDir, "libpod/linger") - _, err := os.Stat(lingerFile) - if err == nil { - lingerEnabled = true - } - } - - if !lingerEnabled { - // First attempt with D-BUS, if it fails, then attempt with "loginctl enable-linger" - if conn != nil { - o := conn.Object("org.freedesktop.login1", "/org/freedesktop/login1") - ret := o.Call("org.freedesktop.login1.Manager.SetUserLinger", 0, uint32(GetRootlessUID()), true, true) - if ret.Err == nil { - lingerEnabled = true - } - } - if !lingerEnabled { - err := exec.Command("loginctl", "enable-linger", uid).Run() - if err == nil { - lingerEnabled = true - } else { - logrus.Debugf("cannot run `loginctl enable-linger` for the current user: %v", err) - } - } - if lingerEnabled && lingerFile != "" { - f, err := os.Create(lingerFile) - if err == nil { - if err := f.Close(); err != nil { - logrus.Errorf("failed to close %s", f.Name()) - } - } else { - logrus.Debugf("could not create linger file: %v", err) - } - } - } - - if !lingerEnabled { - return "", nil - } - - // If we have a D-BUS connection, attempt to read the RUNTIME PATH from it. - if conn != nil { - path := dbus.ObjectPath(fmt.Sprintf("/org/freedesktop/login1/user/_%s", uid)) - ret, err := conn.Object("org.freedesktop.login1", path).GetProperty("org.freedesktop.login1.User.RuntimePath") - if err == nil { - return strings.Trim(ret.String(), "\"\n"), nil - } - } - - // If XDG_RUNTIME_DIR is not set and the D-BUS call didn't work, try to get the runtime path with "loginctl" - output, err := exec.Command("loginctl", "-pRuntimePath", "show-user", uid).Output() - if err != nil { - logrus.Debugf("could not get RuntimePath using loginctl: %v", err) - return "", nil - } - return strings.Trim(strings.Replace(string(output), "RuntimePath=", "", -1), "\"\n"), nil -} - // joinUserAndMountNS re-exec podman in a new userNS and join the user and mount // namespace of the specified PID without looking up its parent. Useful to join directly // the conmon process. @@ -345,6 +257,32 @@ func joinUserAndMountNS(pid uint, pausePid string) (bool, int, error) { return true, int(ret), nil } +// GetConfiguredMappings returns the additional IDs configured for the current user. +func GetConfiguredMappings() ([]idtools.IDMap, []idtools.IDMap, error) { + var uids, gids []idtools.IDMap + username := os.Getenv("USER") + if username == "" { + var id string + if os.Geteuid() == 0 { + id = strconv.Itoa(GetRootlessUID()) + } else { + id = strconv.Itoa(os.Geteuid()) + } + userID, err := user.LookupId(id) + if err == nil { + username = userID.Username + } + } + mappings, err := idtools.NewIDMappings(username, username) + if err != nil { + logrus.Errorf("cannot find mappings for user %s: %v", username, err) + } else { + uids = mappings.UIDs() + gids = mappings.GIDs() + } + return uids, gids, nil +} + func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (bool, int, error) { if os.Geteuid() == 0 || os.Getenv("_CONTAINERS_USERNS_CONFIGURED") != "" { if os.Getenv("_CONTAINERS_USERNS_CONFIGURED") == "init" { @@ -366,7 +304,7 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (bool, runtime.LockOSThread() defer runtime.UnlockOSThread() - fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_DGRAM, 0) + fds, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_DGRAM, 0) if err != nil { return false, -1, err } @@ -386,25 +324,14 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (bool, return false, -1, errors.Errorf("cannot re-exec process") } - var uids, gids []idtools.IDMap - username := os.Getenv("USER") - if username == "" { - userID, err := user.LookupId(fmt.Sprintf("%d", os.Getuid())) - if err == nil { - username = userID.Username - } - } - mappings, err := idtools.NewIDMappings(username, username) + uids, gids, err := GetConfiguredMappings() if err != nil { - logrus.Warnf("cannot find mappings for user %s: %v", username, err) - } else { - uids = mappings.UIDs() - gids = mappings.GIDs() + return false, -1, err } uidsMapped := false - if mappings != nil && uids != nil { - err := tryMappingTool("newuidmap", pid, os.Getuid(), uids) + if uids != nil { + err := tryMappingTool("newuidmap", pid, os.Geteuid(), uids) uidsMapped = err == nil } if !uidsMapped { @@ -414,22 +341,24 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (bool, if err != nil { return false, -1, errors.Wrapf(err, "cannot write setgroups file") } + logrus.Debugf("write setgroups file exited with 0") uidMap := fmt.Sprintf("/proc/%d/uid_map", pid) - err = ioutil.WriteFile(uidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Getuid())), 0666) + err = ioutil.WriteFile(uidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Geteuid())), 0666) if err != nil { return false, -1, errors.Wrapf(err, "cannot write uid_map") } + logrus.Debugf("write uid_map exited with 0") } gidsMapped := false - if mappings != nil && gids != nil { - err := tryMappingTool("newgidmap", pid, os.Getgid(), gids) + if gids != nil { + err := tryMappingTool("newgidmap", pid, os.Getegid(), gids) gidsMapped = err == nil } if !gidsMapped { gidMap := fmt.Sprintf("/proc/%d/gid_map", pid) - err = ioutil.WriteFile(gidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Getgid())), 0666) + err = ioutil.WriteFile(gidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Getegid())), 0666) if err != nil { return false, -1, errors.Wrapf(err, "cannot write gid_map") } @@ -472,21 +401,21 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (bool, signals := []os.Signal{} for sig := 0; sig < numSig; sig++ { - if sig == int(syscall.SIGTSTP) { + if sig == int(unix.SIGTSTP) { continue } - signals = append(signals, syscall.Signal(sig)) + signals = append(signals, unix.Signal(sig)) } gosignal.Notify(c, signals...) defer gosignal.Reset() go func() { for s := range c { - if s == signal.SIGCHLD || s == signal.SIGPIPE { + if s == unix.SIGCHLD || s == unix.SIGPIPE { continue } - if err := syscall.Kill(int(pidC), s.(syscall.Signal)); err != nil { + if err := unix.Kill(int(pidC), s.(unix.Signal)); err != nil { logrus.Errorf("failed to kill %d", int(pidC)) } } @@ -541,7 +470,7 @@ func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []st lastErr = nil break } else { - fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_DGRAM, 0) + fds, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_DGRAM, 0) if err != nil { lastErr = err continue @@ -549,10 +478,10 @@ func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []st r, w := os.NewFile(uintptr(fds[0]), "read file"), os.NewFile(uintptr(fds[1]), "write file") - defer errorhandling.CloseQuiet(w) defer errorhandling.CloseQuiet(r) if _, _, err := becomeRootInUserNS("", path, w); err != nil { + w.Close() lastErr = err continue } @@ -561,7 +490,6 @@ func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []st return false, 0, err } defer func() { - errorhandling.CloseQuiet(r) C.reexec_in_user_namespace_wait(-1, 0) }() @@ -586,3 +514,85 @@ func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []st return joinUserAndMountNS(uint(pausePid), pausePidPath) } +func ReadMappingsProc(path string) ([]idtools.IDMap, error) { + file, err := os.Open(path) + if err != nil { + return nil, errors.Wrapf(err, "cannot open %s", path) + } + defer file.Close() + + mappings := []idtools.IDMap{} + + buf := bufio.NewReader(file) + for { + line, _, err := buf.ReadLine() + if err != nil { + if err == io.EOF { + return mappings, nil + } + return nil, errors.Wrapf(err, "cannot read line from %s", path) + } + if line == nil { + return mappings, nil + } + + containerID, hostID, size := 0, 0, 0 + if _, err := fmt.Sscanf(string(line), "%d %d %d", &containerID, &hostID, &size); err != nil { + return nil, errors.Wrapf(err, "cannot parse %s", string(line)) + } + mappings = append(mappings, idtools.IDMap{ContainerID: containerID, HostID: hostID, Size: size}) + } +} + +func matches(id int, configuredIDs []idtools.IDMap, currentIDs []idtools.IDMap) bool { + // The first mapping is the host user, handle it separately. + if currentIDs[0].HostID != id || currentIDs[0].Size != 1 { + return false + } + + currentIDs = currentIDs[1:] + if len(currentIDs) != len(configuredIDs) { + return false + } + + // It is fine to iterate sequentially as both slices are sorted. + for i := range currentIDs { + if currentIDs[i].HostID != configuredIDs[i].HostID { + return false + } + if currentIDs[i].Size != configuredIDs[i].Size { + return false + } + } + + return true +} + +// ConfigurationMatches checks whether the additional uids/gids configured for the user +// match the current user namespace. +func ConfigurationMatches() (bool, error) { + if !IsRootless() || os.Geteuid() != 0 { + return true, nil + } + + uids, gids, err := GetConfiguredMappings() + if err != nil { + return false, err + } + + currentUIDs, err := ReadMappingsProc("/proc/self/uid_map") + if err != nil { + return false, err + } + + if !matches(GetRootlessUID(), uids, currentUIDs) { + return false, err + } + + currentGIDs, err := ReadMappingsProc("/proc/self/gid_map") + if err != nil { + return false, err + } + + return matches(GetRootlessGID(), gids, currentGIDs), nil +} diff --git a/vendor/github.com/containers/libpod/pkg/rootless/rootless_unsupported.go b/vendor/github.com/containers/libpod/pkg/rootless/rootless_unsupported.go index a8485c083d..1499b737f1 100644 --- a/vendor/github.com/containers/libpod/pkg/rootless/rootless_unsupported.go +++ b/vendor/github.com/containers/libpod/pkg/rootless/rootless_unsupported.go @@ -5,6 +5,7 @@ package rootless import ( "os" + "github.com/containers/storage/pkg/idtools" "github.com/pkg/errors" ) @@ -36,12 +37,6 @@ func GetRootlessGID() int { return -1 } -// EnableLinger configures the system to not kill the user processes once the session -// terminates -func EnableLinger() (string, error) { - return "", nil -} - // TryJoinFromFilePaths attempts to join the namespaces of the pid files in paths. // This is useful when there are already running containers and we // don't have a pause process yet. We can use the paths to the conmon @@ -53,3 +48,19 @@ func EnableLinger() (string, error) { func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []string) (bool, int, error) { return false, -1, errors.New("this function is not supported on this os") } + +// ConfigurationMatches checks whether the additional uids/gids configured for the user +// match the current user namespace. +func ConfigurationMatches() (bool, error) { + return true, nil +} + +// GetConfiguredMappings returns the additional IDs configured for the current user. +func GetConfiguredMappings() ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, errors.New("this function is not supported on this os") +} + +// ReadMappingsProc returns the uid_map and gid_map +func ReadMappingsProc(path string) ([]idtools.IDMap, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/libpod/pkg/spec/config_linux.go b/vendor/github.com/containers/libpod/pkg/spec/config_linux.go index 60d31d78ea..32d8cb4deb 100644 --- a/vendor/github.com/containers/libpod/pkg/spec/config_linux.go +++ b/vendor/github.com/containers/libpod/pkg/spec/config_linux.go @@ -4,6 +4,7 @@ package createconfig import ( "fmt" + "io/ioutil" "os" "path/filepath" "strings" @@ -98,6 +99,26 @@ func addDevice(g *generate.Generator, device string) error { if err != nil { return errors.Wrapf(err, "%s is not a valid device", src) } + if rootless.IsRootless() { + if _, err := os.Stat(src); err != nil { + if os.IsNotExist(err) { + return errors.Wrapf(err, "the specified device %s doesn't exist", src) + } + return errors.Wrapf(err, "stat device %s exist", src) + } + perm := "ro" + if strings.Contains(permissions, "w") { + perm = "rw" + } + devMnt := spec.Mount{ + Destination: dst, + Type: TypeBind, + Source: src, + Options: []string{"slave", "nosuid", "noexec", perm, "rbind"}, + } + g.Config.Mounts = append(g.Config.Mounts, devMnt) + return nil + } dev.Path = dst linuxdev := spec.LinuxDevice{ Path: dev.Path, @@ -113,8 +134,53 @@ func addDevice(g *generate.Generator, device string) error { return nil } +// based on getDevices from runc (libcontainer/devices/devices.go) +func getDevices(path string) ([]*configs.Device, error) { + files, err := ioutil.ReadDir(path) + if err != nil { + if rootless.IsRootless() && os.IsPermission(err) { + return nil, nil + } + return nil, err + } + out := []*configs.Device{} + for _, f := range files { + switch { + case f.IsDir(): + switch f.Name() { + // ".lxc" & ".lxd-mounts" added to address https://github.com/lxc/lxd/issues/2825 + case "pts", "shm", "fd", "mqueue", ".lxc", ".lxd-mounts": + continue + default: + sub, err := getDevices(filepath.Join(path, f.Name())) + if err != nil { + return nil, err + } + if sub != nil { + out = append(out, sub...) + } + continue + } + case f.Name() == "console": + continue + } + device, err := devices.DeviceFromPath(filepath.Join(path, f.Name()), "rwm") + if err != nil { + if err == devices.ErrNotADevice { + continue + } + if os.IsNotExist(err) { + continue + } + return nil, err + } + out = append(out, device) + } + return out, nil +} + func (c *CreateConfig) addPrivilegedDevices(g *generate.Generator) error { - hostDevices, err := devices.HostDevices() + hostDevices, err := getDevices("/dev") if err != nil { return err } @@ -153,15 +219,16 @@ func (c *CreateConfig) addPrivilegedDevices(g *generate.Generator) error { newMounts = append(newMounts, devMnt) } g.Config.Mounts = append(newMounts, g.Config.Mounts...) + g.Config.Linux.Resources.Devices = nil } else { for _, d := range hostDevices { g.AddDevice(Device(d)) } + // Add resources device - need to clear the existing one first. + g.Config.Linux.Resources.Devices = nil + g.AddLinuxResourcesDevice(true, "", nil, nil, "rwm") } - // Add resources device - need to clear the existing one first. - g.Config.Linux.Resources.Devices = nil - g.AddLinuxResourcesDevice(true, "", nil, nil, "rwm") return nil } diff --git a/vendor/github.com/containers/libpod/pkg/spec/config_linux_cgo.go b/vendor/github.com/containers/libpod/pkg/spec/config_linux_cgo.go index e6e92a7ccd..a1527752ae 100644 --- a/vendor/github.com/containers/libpod/pkg/spec/config_linux_cgo.go +++ b/vendor/github.com/containers/libpod/pkg/spec/config_linux_cgo.go @@ -5,9 +5,9 @@ package createconfig import ( "io/ioutil" - "github.com/docker/docker/profiles/seccomp" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" + seccomp "github.com/seccomp/containers-golang" ) func getSeccompConfig(config *CreateConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) { diff --git a/vendor/github.com/containers/libpod/pkg/spec/createconfig.go b/vendor/github.com/containers/libpod/pkg/spec/createconfig.go index 3f70e59355..2a8fe7332f 100644 --- a/vendor/github.com/containers/libpod/pkg/spec/createconfig.go +++ b/vendor/github.com/containers/libpod/pkg/spec/createconfig.go @@ -7,7 +7,7 @@ import ( "strings" "syscall" - "github.com/containers/image/manifest" + "github.com/containers/image/v5/manifest" "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/namespaces" @@ -64,6 +64,7 @@ type CreateConfig struct { CidFile string ConmonPidFile string Cgroupns string + Cgroups string CgroupParent string // cgroup-parent Command []string // Full command that will be used UserCommand []string // User-entered command (or image CMD) @@ -103,7 +104,8 @@ type CreateConfig struct { NetworkAlias []string //network-alias PidMode namespaces.PidMode //pid Pod string //pod - CgroupMode namespaces.CgroupMode //cgroup + PodmanPath string + CgroupMode namespaces.CgroupMode //cgroup PortBindings nat.PortMap Privileged bool //privileged Publish []string //publish @@ -152,7 +154,16 @@ func (c *CreateConfig) createExitCommand(runtime *libpod.Runtime) ([]string, err return nil, err } - cmd, _ := os.Executable() + // We need a cleanup process for containers in the current model. + // But we can't assume that the caller is Podman - it could be another + // user of the API. + // As such, provide a way to specify a path to Podman, so we can + // still invoke a cleanup process. + cmd := c.PodmanPath + if cmd == "" { + cmd, _ = os.Executable() + } + command := []string{cmd, "--root", config.StorageConfig.GraphRoot, "--runroot", config.StorageConfig.RunRoot, @@ -194,8 +205,7 @@ func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *l if c.Interactive { options = append(options, libpod.WithStdin()) } - if c.Systemd && (strings.HasSuffix(c.Command[0], "init") || - strings.HasSuffix(c.Command[0], "systemd")) { + if c.Systemd { options = append(options, libpod.WithSystemd()) } if c.Name != "" { @@ -206,6 +216,9 @@ func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *l logrus.Debugf("adding container to pod %s", c.Pod) options = append(options, runtime.WithPod(pod)) } + if c.Cgroups == "disabled" { + options = append(options, libpod.WithNoCgroups()) + } if len(c.PortBindings) > 0 { portBindings, err = c.CreatePortBindings() if err != nil { @@ -271,7 +284,7 @@ func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *l options = append(options, libpod.WithNetNSFrom(connectedCtr)) } else if !c.NetMode.IsHost() && !c.NetMode.IsNone() { hasUserns := c.UsernsMode.IsContainer() || c.UsernsMode.IsNS() || len(c.IDMappings.UIDMap) > 0 || len(c.IDMappings.GIDMap) > 0 - postConfigureNetNS := c.NetMode.IsSlirp4netns() || (hasUserns && !c.UsernsMode.IsHost()) + postConfigureNetNS := hasUserns && !c.UsernsMode.IsHost() options = append(options, libpod.WithNetNS(portBindings, postConfigureNetNS, string(c.NetMode), networks)) } diff --git a/vendor/github.com/containers/libpod/pkg/spec/spec.go b/vendor/github.com/containers/libpod/pkg/spec/spec.go index 156d6849df..86d701f7eb 100644 --- a/vendor/github.com/containers/libpod/pkg/spec/spec.go +++ b/vendor/github.com/containers/libpod/pkg/spec/spec.go @@ -2,13 +2,14 @@ package createconfig import ( "os" - "path/filepath" "strings" "github.com/containers/libpod/libpod" + libpodconfig "github.com/containers/libpod/libpod/config" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/rootless" - pmount "github.com/containers/storage/pkg/mount" + "github.com/containers/libpod/pkg/sysinfo" "github.com/docker/docker/oci/caps" "github.com/docker/go-units" "github.com/opencontainers/runc/libcontainer/user" @@ -301,10 +302,35 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM blockAccessToKernelFilesystems(config, &g) + var runtimeConfig *libpodconfig.Config + + if runtime != nil { + runtimeConfig, err = runtime.GetConfig() + if err != nil { + return nil, err + } + } + // RESOURCES - PIDS - if config.Resources.PidsLimit != 0 { - g.SetLinuxResourcesPidsLimit(config.Resources.PidsLimit) - addedResources = true + if config.Resources.PidsLimit > 0 { + // if running on rootless on a cgroupv1 machine or using the cgroupfs manager, pids + // limit is not supported. If the value is still the default + // then ignore the settings. If the caller asked for a + // non-default, then try to use it. + setPidLimit := true + if rootless.IsRootless() { + cgroup2, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + if (!cgroup2 || (runtimeConfig != nil && runtimeConfig.CgroupManager != define.SystemdCgroupsManager)) && config.Resources.PidsLimit == sysinfo.GetDefaultPidsLimit() { + setPidLimit = false + } + } + if setPidLimit { + g.SetLinuxResourcesPidsLimit(config.Resources.PidsLimit) + addedResources = true + } } for name, val := range config.Env { @@ -368,7 +394,11 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM // BIND MOUNTS configSpec.Mounts = supercedeUserMounts(userMounts, configSpec.Mounts) // Process mounts to ensure correct options - configSpec.Mounts = initFSMounts(configSpec.Mounts) + finalMounts, err := initFSMounts(configSpec.Mounts) + if err != nil { + return nil, err + } + configSpec.Mounts = finalMounts // BLOCK IO blkio, err := config.CreateBlockIO() @@ -385,50 +415,31 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM if err != nil { return nil, err } - if addedResources && !cgroup2 { - return nil, errors.New("invalid configuration, cannot set resources with rootless containers not using cgroups v2 unified mode") + if !addedResources { + configSpec.Linux.Resources = &spec.LinuxResources{} } - if !cgroup2 { + + canUseResources := cgroup2 && runtimeConfig != nil && (runtimeConfig.CgroupManager == define.SystemdCgroupsManager) + + if addedResources && !canUseResources { + return nil, errors.New("invalid configuration, cannot specify resource limits without cgroups v2 and --cgroup-manager=systemd") + } + if !canUseResources { // Force the resources block to be empty instead of having default values. configSpec.Linux.Resources = &spec.LinuxResources{} } } - // Make sure that the bind mounts keep options like nosuid, noexec, nodev. - mounts, err := pmount.GetMounts() - if err != nil { - return nil, err - } - for i := range configSpec.Mounts { - m := &configSpec.Mounts[i] - isBind := false - for _, o := range m.Options { - if o == "bind" || o == "rbind" { - isBind = true - break - } - } - if !isBind { - continue - } - mount, err := findMount(m.Source, mounts) - if err != nil { - return nil, err - } - if mount == nil { - continue - } - next_option: - for _, o := range strings.Split(mount.Opts, ",") { - if o == "nosuid" || o == "noexec" || o == "nodev" { - for _, e := range m.Options { - if e == o { - continue next_option - } - } - m.Options = append(m.Options, o) - } + switch config.Cgroups { + case "disabled": + if addedResources { + return nil, errors.New("cannot specify resource limits when cgroups are disabled is specified") } + configSpec.Linux.Resources = &spec.LinuxResources{} + case "enabled", "": + // Do nothing + default: + return nil, errors.New("unrecognized option for cgroups; supported are 'default' and 'disabled'") } // Add annotations @@ -490,25 +501,6 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM return configSpec, nil } -func findMount(target string, mounts []*pmount.Info) (*pmount.Info, error) { - var err error - target, err = filepath.Abs(target) - if err != nil { - return nil, errors.Wrapf(err, "cannot resolve %s", target) - } - var bestSoFar *pmount.Info - for _, i := range mounts { - if bestSoFar != nil && len(bestSoFar.Mountpoint) > len(i.Mountpoint) { - // Won't be better than what we have already found - continue - } - if strings.HasPrefix(target, i.Mountpoint) { - bestSoFar = i - } - } - return bestSoFar, nil -} - func blockAccessToKernelFilesystems(config *CreateConfig, g *generate.Generator) { if !config.Privileged { for _, mp := range []string{ diff --git a/vendor/github.com/containers/libpod/pkg/spec/storage.go b/vendor/github.com/containers/libpod/pkg/spec/storage.go index a8dc7f4a87..0955345896 100644 --- a/vendor/github.com/containers/libpod/pkg/spec/storage.go +++ b/vendor/github.com/containers/libpod/pkg/spec/storage.go @@ -10,7 +10,7 @@ import ( "github.com/containers/buildah/pkg/parse" "github.com/containers/libpod/libpod" "github.com/containers/libpod/pkg/util" - "github.com/containers/storage/pkg/stringid" + pmount "github.com/containers/storage/pkg/mount" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -160,22 +160,21 @@ func (config *CreateConfig) parseVolumes(runtime *libpod.Runtime) ([]spec.Mount, } // If requested, add tmpfs filesystems for read-only containers. - // Need to keep track of which we created, so we don't modify options - // for them later... - readonlyTmpfs := map[string]bool{ - "/tmp": false, - "/var/tmp": false, - "/run": false, - } if config.ReadOnlyRootfs && config.ReadOnlyTmpfs { - options := []string{"rw", "rprivate", "nosuid", "nodev", "tmpcopyup", "size=65536k"} - for dest := range readonlyTmpfs { + readonlyTmpfs := []string{"/tmp", "/var/tmp", "/run"} + options := []string{"rw", "rprivate", "nosuid", "nodev", "tmpcopyup"} + for _, dest := range readonlyTmpfs { if _, ok := baseMounts[dest]; ok { continue } + if _, ok := baseVolumes[dest]; ok { + continue + } localOpts := options if dest == "/run" { - localOpts = append(localOpts, "noexec") + localOpts = append(localOpts, "noexec", "size=65536k") + } else { + localOpts = append(localOpts, "exec") } baseMounts[dest] = spec.Mount{ Destination: dest, @@ -183,7 +182,6 @@ func (config *CreateConfig) parseVolumes(runtime *libpod.Runtime) ([]spec.Mount, Source: "tmpfs", Options: localOpts, } - readonlyTmpfs[dest] = true } } @@ -202,16 +200,6 @@ func (config *CreateConfig) parseVolumes(runtime *libpod.Runtime) ([]spec.Mount, // Final step: maps to arrays finalMounts := make([]spec.Mount, 0, len(baseMounts)) for _, mount := range baseMounts { - // All user-added tmpfs mounts need their options processed. - // Exception: mounts added by the ReadOnlyTmpfs option, which - // contain several exceptions to normal options rules. - if mount.Type == TypeTmpfs && !readonlyTmpfs[mount.Destination] { - opts, err := util.ProcessTmpfsOptions(mount.Options) - if err != nil { - return nil, nil, err - } - mount.Options = opts - } if mount.Type == TypeBind { absSrc, err := filepath.Abs(mount.Source) if err != nil { @@ -226,9 +214,6 @@ func (config *CreateConfig) parseVolumes(runtime *libpod.Runtime) ([]spec.Mount, finalVolumes = append(finalVolumes, volume) } - logrus.Debugf("Got mounts: %v", finalMounts) - logrus.Debugf("Got volumes: %v", finalVolumes) - return finalMounts, finalVolumes, nil } @@ -237,6 +222,7 @@ func (config *CreateConfig) parseVolumes(runtime *libpod.Runtime) ([]spec.Mount, // volumes, and return a list of them. // Conflicts are resolved simply - the last container specified wins. // Container names may be suffixed by mount options after a colon. +// TODO: We should clean these paths if possible func (config *CreateConfig) getVolumesFrom(runtime *libpod.Runtime) (map[string]spec.Mount, map[string]*libpod.ContainerNamedVolume, error) { // Both of these are maps of mount destination to mount type. // We ensure that each destination is only mounted to once in this way. @@ -250,14 +236,17 @@ func (config *CreateConfig) getVolumesFrom(runtime *libpod.Runtime) (map[string] splitVol = strings.SplitN(vol, ":", 2) ) if len(splitVol) == 2 { - if strings.Contains(splitVol[1], "Z") || - strings.Contains(splitVol[1], "private") || - strings.Contains(splitVol[1], "slave") || - strings.Contains(splitVol[1], "shared") { - return nil, nil, errors.Errorf("invalid options %q, can only specify 'ro', 'rw', and 'z", splitVol[1]) + splitOpts := strings.Split(splitVol[1], ",") + for _, checkOpt := range splitOpts { + switch checkOpt { + case "z", "ro", "rw": + // Do nothing, these are valid options + default: + return nil, nil, errors.Errorf("invalid options %q, can only specify 'ro', 'rw', and 'z'", splitVol[1]) + } } - if options, err = parse.ValidateVolumeOpts(strings.Split(splitVol[1], ",")); err != nil { + if options, err = parse.ValidateVolumeOpts(splitOpts); err != nil { return nil, nil, err } } @@ -403,9 +392,7 @@ func getBindMount(args []string) (spec.Mount, error) { Type: TypeBind, } - setSource := false - setDest := false - setRORW := false + var setSource, setDest, setRORW, setSuid, setDev, setExec, setRelabel bool for _, val := range args { kv := strings.Split(val, "=") @@ -440,9 +427,23 @@ func getBindMount(args []string) (spec.Mount, error) { } else { return newMount, errors.Wrapf(optionArgError, "badly formatted option %q", val) } - case "nosuid", "nodev", "noexec": - // TODO: detect duplication of these options. - // (Is this necessary?) + case "nosuid", "suid": + if setSuid { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once") + } + setSuid = true + newMount.Options = append(newMount.Options, kv[0]) + case "nodev", "dev": + if setDev { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once") + } + setDev = true + newMount.Options = append(newMount.Options, kv[0]) + case "noexec", "exec": + if setExec { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once") + } + setExec = true newMount.Options = append(newMount.Options, kv[0]) case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z": newMount.Options = append(newMount.Options, kv[0]) @@ -467,8 +468,24 @@ func getBindMount(args []string) (spec.Mount, error) { if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { return newMount, err } - newMount.Destination = kv[1] + newMount.Destination = filepath.Clean(kv[1]) setDest = true + case "relabel": + if setRelabel { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'relabel' option more than once") + } + setRelabel = true + if len(kv) != 2 { + return newMount, errors.Wrapf(util.ErrBadMntOption, "%s mount option must be 'private' or 'shared'", kv[0]) + } + switch kv[1] { + case "private": + newMount.Options = append(newMount.Options, "z") + case "shared": + newMount.Options = append(newMount.Options, "Z") + default: + return newMount, errors.Wrapf(util.ErrBadMntOption, "%s mount option must be 'private' or 'shared'", kv[0]) + } default: return newMount, errors.Wrapf(util.ErrBadMntOption, kv[0]) } @@ -497,12 +514,34 @@ func getTmpfsMount(args []string) (spec.Mount, error) { Source: TypeTmpfs, } - setDest := false + var setDest, setRORW, setSuid, setDev, setExec bool for _, val := range args { kv := strings.Split(val, "=") switch kv[0] { - case "ro", "nosuid", "nodev", "noexec": + case "ro", "rw": + if setRORW { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'ro' and 'rw' options more than once") + } + setRORW = true + newMount.Options = append(newMount.Options, kv[0]) + case "nosuid", "suid": + if setSuid { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once") + } + setSuid = true + newMount.Options = append(newMount.Options, kv[0]) + case "nodev", "dev": + if setDev { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once") + } + setDev = true + newMount.Options = append(newMount.Options, kv[0]) + case "noexec", "exec": + if setExec { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once") + } + setExec = true newMount.Options = append(newMount.Options, kv[0]) case "tmpfs-mode": if len(kv) == 1 { @@ -523,7 +562,7 @@ func getTmpfsMount(args []string) (spec.Mount, error) { if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { return newMount, err } - newMount.Destination = kv[1] + newMount.Destination = filepath.Clean(kv[1]) setDest = true default: return newMount, errors.Wrapf(util.ErrBadMntOption, kv[0]) @@ -543,14 +582,34 @@ func getTmpfsMount(args []string) (spec.Mount, error) { func getNamedVolume(args []string) (*libpod.ContainerNamedVolume, error) { newVolume := new(libpod.ContainerNamedVolume) - setSource := false - setDest := false + var setSource, setDest, setRORW, setSuid, setDev, setExec bool for _, val := range args { kv := strings.Split(val, "=") switch kv[0] { - case "ro", "nosuid", "nodev", "noexec": - // TODO: detect duplication of these options + case "ro", "rw": + if setRORW { + return nil, errors.Wrapf(optionArgError, "cannot pass 'ro' and 'rw' options more than once") + } + setRORW = true + newVolume.Options = append(newVolume.Options, kv[0]) + case "nosuid", "suid": + if setSuid { + return nil, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once") + } + setSuid = true + newVolume.Options = append(newVolume.Options, kv[0]) + case "nodev", "dev": + if setDev { + return nil, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once") + } + setDev = true + newVolume.Options = append(newVolume.Options, kv[0]) + case "noexec", "exec": + if setExec { + return nil, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once") + } + setExec = true newVolume.Options = append(newVolume.Options, kv[0]) case "volume-label": return nil, errors.Errorf("the --volume-label option is not presently implemented") @@ -567,7 +626,7 @@ func getNamedVolume(args []string) (*libpod.ContainerNamedVolume, error) { if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { return nil, err } - newVolume.Dest = kv[1] + newVolume.Dest = filepath.Clean(kv[1]) setDest = true default: return nil, errors.Wrapf(util.ErrBadMntOption, kv[0]) @@ -588,7 +647,7 @@ func (config *CreateConfig) getVolumeMounts() (map[string]spec.Mount, map[string mounts := make(map[string]spec.Mount) volumes := make(map[string]*libpod.ContainerNamedVolume) - volumeFormatErr := errors.Errorf("incorrect volume format, should be host-dir:ctr-dir[:option]") + volumeFormatErr := errors.Errorf("incorrect volume format, should be [host-dir:]ctr-dir[:option]") for _, vol := range config.Volumes { var ( @@ -605,7 +664,11 @@ func (config *CreateConfig) getVolumeMounts() (map[string]spec.Mount, map[string src = splitVol[0] if len(splitVol) == 1 { - dest = src + // This is an anonymous named volume. Only thing given + // is destination. + // Name/source will be blank, and populated by libpod. + src = "" + dest = splitVol[0] } else if len(splitVol) > 1 { dest = splitVol[1] } @@ -615,17 +678,22 @@ func (config *CreateConfig) getVolumeMounts() (map[string]spec.Mount, map[string } } - if err := parse.ValidateVolumeHostDir(src); err != nil { - return nil, nil, err + // Do not check source dir for anonymous volumes + if len(splitVol) > 1 { + if err := parse.ValidateVolumeHostDir(src); err != nil { + return nil, nil, err + } } if err := parse.ValidateVolumeCtrDir(dest); err != nil { return nil, nil, err } + cleanDest := filepath.Clean(dest) + if strings.HasPrefix(src, "/") || strings.HasPrefix(src, ".") { // This is not a named volume newMount := spec.Mount{ - Destination: dest, + Destination: cleanDest, Type: string(TypeBind), Source: src, Options: options, @@ -638,7 +706,7 @@ func (config *CreateConfig) getVolumeMounts() (map[string]spec.Mount, map[string // This is a named volume newNamedVol := new(libpod.ContainerNamedVolume) newNamedVol.Name = src - newNamedVol.Dest = dest + newNamedVol.Dest = cleanDest newNamedVol.Options = options if _, ok := volumes[newNamedVol.Dest]; ok { @@ -663,20 +731,21 @@ func (config *CreateConfig) getImageVolumes() (map[string]spec.Mount, map[string } for vol := range config.BuiltinImgVolumes { + cleanDest := filepath.Clean(vol) if config.ImageVolumeType == "tmpfs" { // Tmpfs image volumes are handled as mounts mount := spec.Mount{ - Destination: vol, + Destination: cleanDest, Source: TypeTmpfs, Type: TypeTmpfs, - Options: []string{"rprivate", "rw", "nodev"}, + Options: []string{"rprivate", "rw", "nodev", "exec"}, } mounts[vol] = mount } else { + // Anonymous volumes have no name. namedVolume := new(libpod.ContainerNamedVolume) - namedVolume.Name = stringid.GenerateNonCryptoID() - namedVolume.Options = []string{"rprivate", "rw", "nodev"} - namedVolume.Dest = vol + namedVolume.Options = []string{"rprivate", "rw", "nodev", "exec"} + namedVolume.Dest = cleanDest volumes[vol] = namedVolume } } @@ -692,6 +761,9 @@ func (config *CreateConfig) getTmpfsMounts() (map[string]spec.Mount, error) { var options []string spliti := strings.Split(i, ":") destPath := spliti[0] + if err := parse.ValidateVolumeCtrDir(spliti[0]); err != nil { + return nil, err + } if len(spliti) > 1 { options = strings.Split(spliti[1], ",") } @@ -701,7 +773,7 @@ func (config *CreateConfig) getTmpfsMounts() (map[string]spec.Mount, error) { } mount := spec.Mount{ - Destination: destPath, + Destination: filepath.Clean(destPath), Type: string(TypeTmpfs), Options: options, Source: string(TypeTmpfs), @@ -775,16 +847,75 @@ func supercedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.M } // Ensure mount options on all mounts are correct -func initFSMounts(inputMounts []spec.Mount) []spec.Mount { +func initFSMounts(inputMounts []spec.Mount) ([]spec.Mount, error) { + // We need to look up mounts so we can figure out the proper mount flags + // to apply. + systemMounts, err := pmount.GetMounts() + if err != nil { + return nil, errors.Wrapf(err, "error retrieving system mounts to look up mount options") + } + + // TODO: We probably don't need to re-build the mounts array var mounts []spec.Mount for _, m := range inputMounts { if m.Type == TypeBind { - m.Options = util.ProcessOptions(m.Options) + baseMnt, err := findMount(m.Destination, systemMounts) + if err != nil { + return nil, errors.Wrapf(err, "error looking up mountpoint for mount %s", m.Destination) + } + var noexec, nosuid, nodev bool + for _, baseOpt := range strings.Split(baseMnt.Opts, ",") { + switch baseOpt { + case "noexec": + noexec = true + case "nosuid": + nosuid = true + case "nodev": + nodev = true + } + } + + defaultMountOpts := new(util.DefaultMountOptions) + defaultMountOpts.Noexec = noexec + defaultMountOpts.Nosuid = nosuid + defaultMountOpts.Nodev = nodev + + opts, err := util.ProcessOptions(m.Options, false, defaultMountOpts) + if err != nil { + return nil, err + } + m.Options = opts } if m.Type == TypeTmpfs && filepath.Clean(m.Destination) != "/dev" { - m.Options = append(m.Options, "tmpcopyup") + opts, err := util.ProcessOptions(m.Options, true, nil) + if err != nil { + return nil, err + } + m.Options = opts } + mounts = append(mounts, m) } - return mounts + return mounts, nil +} + +// TODO: We could make this a bit faster by building a tree of the mountpoints +// and traversing it to identify the correct mount. +func findMount(target string, mounts []*pmount.Info) (*pmount.Info, error) { + var err error + target, err = filepath.Abs(target) + if err != nil { + return nil, errors.Wrapf(err, "cannot resolve %s", target) + } + var bestSoFar *pmount.Info + for _, i := range mounts { + if bestSoFar != nil && len(bestSoFar.Mountpoint) > len(i.Mountpoint) { + // Won't be better than what we have already found + continue + } + if strings.HasPrefix(target, i.Mountpoint) { + bestSoFar = i + } + } + return bestSoFar, nil } diff --git a/vendor/github.com/containers/libpod/pkg/sysinfo/README.md b/vendor/github.com/containers/libpod/pkg/sysinfo/README.md new file mode 100644 index 0000000000..c1530cef0d --- /dev/null +++ b/vendor/github.com/containers/libpod/pkg/sysinfo/README.md @@ -0,0 +1 @@ +SysInfo stores information about which features a kernel supports. diff --git a/vendor/github.com/containers/libpod/pkg/sysinfo/numcpu.go b/vendor/github.com/containers/libpod/pkg/sysinfo/numcpu.go new file mode 100644 index 0000000000..aeb1a3a804 --- /dev/null +++ b/vendor/github.com/containers/libpod/pkg/sysinfo/numcpu.go @@ -0,0 +1,12 @@ +// +build !linux,!windows + +package sysinfo + +import ( + "runtime" +) + +// NumCPU returns the number of CPUs +func NumCPU() int { + return runtime.NumCPU() +} diff --git a/vendor/github.com/containers/libpod/pkg/sysinfo/numcpu_linux.go b/vendor/github.com/containers/libpod/pkg/sysinfo/numcpu_linux.go new file mode 100644 index 0000000000..f1d2d9db30 --- /dev/null +++ b/vendor/github.com/containers/libpod/pkg/sysinfo/numcpu_linux.go @@ -0,0 +1,44 @@ +// +build linux + +package sysinfo + +import ( + "runtime" + "unsafe" + + "golang.org/x/sys/unix" +) + +// numCPU queries the system for the count of threads available +// for use to this process. +// +// Issues two syscalls. +// Returns 0 on errors. Use |runtime.NumCPU| in that case. +func numCPU() int { + // Gets the affinity mask for a process: The very one invoking this function. + pid, _, _ := unix.RawSyscall(unix.SYS_GETPID, 0, 0, 0) + + var mask [1024 / 64]uintptr + _, _, err := unix.RawSyscall(unix.SYS_SCHED_GETAFFINITY, pid, uintptr(len(mask)*8), uintptr(unsafe.Pointer(&mask[0]))) + if err != 0 { + return 0 + } + + // For every available thread a bit is set in the mask. + ncpu := 0 + for _, e := range mask { + if e == 0 { + continue + } + ncpu += int(popcnt(uint64(e))) + } + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/vendor/github.com/containers/libpod/pkg/sysinfo/numcpu_windows.go b/vendor/github.com/containers/libpod/pkg/sysinfo/numcpu_windows.go new file mode 100644 index 0000000000..1d89dd5503 --- /dev/null +++ b/vendor/github.com/containers/libpod/pkg/sysinfo/numcpu_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package sysinfo + +import ( + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + kernel32 = windows.NewLazySystemDLL("kernel32.dll") + getCurrentProcess = kernel32.NewProc("GetCurrentProcess") + getProcessAffinityMask = kernel32.NewProc("GetProcessAffinityMask") +) + +func numCPU() int { + // Gets the affinity mask for a process + var mask, sysmask uintptr + currentProcess, _, _ := getCurrentProcess.Call() + ret, _, _ := getProcessAffinityMask.Call(currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask))) + if ret == 0 { + return 0 + } + // For every available thread a bit is set in the mask. + ncpu := int(popcnt(uint64(mask))) + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/vendor/github.com/containers/libpod/pkg/sysinfo/sysinfo.go b/vendor/github.com/containers/libpod/pkg/sysinfo/sysinfo.go new file mode 100644 index 0000000000..686f66ce52 --- /dev/null +++ b/vendor/github.com/containers/libpod/pkg/sysinfo/sysinfo.go @@ -0,0 +1,153 @@ +package sysinfo + +import "github.com/docker/docker/pkg/parsers" + +// SysInfo stores information about which features a kernel supports. +// TODO Windows: Factor out platform specific capabilities. +type SysInfo struct { + // Whether the kernel supports AppArmor or not + AppArmor bool + // Whether the kernel supports Seccomp or not + Seccomp bool + + cgroupMemInfo + cgroupCPUInfo + cgroupBlkioInfo + cgroupCpusetInfo + cgroupPids + + // Whether IPv4 forwarding is supported or not, if this was disabled, networking will not work + IPv4ForwardingDisabled bool + + // Whether bridge-nf-call-iptables is supported or not + BridgeNFCallIPTablesDisabled bool + + // Whether bridge-nf-call-ip6tables is supported or not + BridgeNFCallIP6TablesDisabled bool + + // Whether the cgroup has the mountpoint of "devices" or not + CgroupDevicesEnabled bool +} + +type cgroupMemInfo struct { + // Whether memory limit is supported or not + MemoryLimit bool + + // Whether swap limit is supported or not + SwapLimit bool + + // Whether soft limit is supported or not + MemoryReservation bool + + // Whether OOM killer disable is supported or not + OomKillDisable bool + + // Whether memory swappiness is supported or not + MemorySwappiness bool + + // Whether kernel memory limit is supported or not + KernelMemory bool +} + +type cgroupCPUInfo struct { + // Whether CPU shares is supported or not + CPUShares bool + + // Whether CPU CFS(Completely Fair Scheduler) period is supported or not + CPUCfsPeriod bool + + // Whether CPU CFS(Completely Fair Scheduler) quota is supported or not + CPUCfsQuota bool + + // Whether CPU real-time period is supported or not + CPURealtimePeriod bool + + // Whether CPU real-time runtime is supported or not + CPURealtimeRuntime bool +} + +type cgroupBlkioInfo struct { + // Whether Block IO weight is supported or not + BlkioWeight bool + + // Whether Block IO weight_device is supported or not + BlkioWeightDevice bool + + // Whether Block IO read limit in bytes per second is supported or not + BlkioReadBpsDevice bool + + // Whether Block IO write limit in bytes per second is supported or not + BlkioWriteBpsDevice bool + + // Whether Block IO read limit in IO per second is supported or not + BlkioReadIOpsDevice bool + + // Whether Block IO write limit in IO per second is supported or not + BlkioWriteIOpsDevice bool +} + +type cgroupCpusetInfo struct { + // Whether Cpuset is supported or not + Cpuset bool + + // Available Cpuset's cpus + Cpus string + + // Available Cpuset's memory nodes + Mems string +} + +type cgroupPids struct { + // Whether Pids Limit is supported or not + PidsLimit bool +} + +// IsCpusetCpusAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.cpus set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetCpusAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Cpus) +} + +// IsCpusetMemsAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.mems set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetMemsAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Mems) +} + +func isCpusetListAvailable(provided, available string) (bool, error) { + parsedProvided, err := parsers.ParseUintList(provided) + if err != nil { + return false, err + } + parsedAvailable, err := parsers.ParseUintList(available) + if err != nil { + return false, err + } + for k := range parsedProvided { + if !parsedAvailable[k] { + return false, nil + } + } + return true, nil +} + +// Returns bit count of 1, used by NumCPU +func popcnt(x uint64) (n byte) { + x -= (x >> 1) & 0x5555555555555555 + x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 + x += x >> 4 + x &= 0x0f0f0f0f0f0f0f0f + x *= 0x0101010101010101 + return byte(x >> 56) +} + +// GetDefaultPidsLimit returns the default pids limit to run containers with +func GetDefaultPidsLimit() int64 { + sysInfo := New(true) + if !sysInfo.PidsLimit { + return 0 + } + return 4096 +} diff --git a/vendor/github.com/containers/libpod/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/containers/libpod/pkg/sysinfo/sysinfo_linux.go new file mode 100644 index 0000000000..76bda23c6b --- /dev/null +++ b/vendor/github.com/containers/libpod/pkg/sysinfo/sysinfo_linux.go @@ -0,0 +1,261 @@ +package sysinfo + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + + cg "github.com/containers/libpod/pkg/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +func findCgroupMountpoints() (map[string]string, error) { + cgMounts, err := cgroups.GetCgroupMounts(false) + if err != nil { + return nil, fmt.Errorf("failed to parse cgroup information: %v", err) + } + mps := make(map[string]string) + for _, m := range cgMounts { + for _, ss := range m.Subsystems { + mps[ss] = m.Mountpoint + } + } + return mps, nil +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. If `quiet` is `false` warnings are printed in logs +// whenever an error occurs or misconfigurations are present. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + cgMounts, err := findCgroupMountpoints() + if err != nil { + logrus.Warnf("Failed to parse cgroup information: %v", err) + } else { + sysInfo.cgroupMemInfo = checkCgroupMem(cgMounts, quiet) + sysInfo.cgroupCPUInfo = checkCgroupCPU(cgMounts, quiet) + sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(cgMounts, quiet) + sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(cgMounts, quiet) + sysInfo.cgroupPids = checkCgroupPids(quiet) + } + + _, ok := cgMounts["devices"] + sysInfo.CgroupDevicesEnabled = ok + + sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward") + sysInfo.BridgeNFCallIPTablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables") + sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables") + + // Check if AppArmor is supported. + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + sysInfo.AppArmor = true + } + + // Check if Seccomp is supported, via CONFIG_SECCOMP. + if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { + sysInfo.Seccomp = true + } + } + + return sysInfo +} + +// checkCgroupMem reads the memory information from the memory cgroup mount point. +func checkCgroupMem(cgMounts map[string]string, quiet bool) cgroupMemInfo { + mountPoint, ok := cgMounts["memory"] + if !ok { + if !quiet { + logrus.Warn("Your kernel does not support cgroup memory limit") + } + return cgroupMemInfo{} + } + + swapLimit := cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes") + if !quiet && !swapLimit { + logrus.Warn("Your kernel does not support swap memory limit") + } + memoryReservation := cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes") + if !quiet && !memoryReservation { + logrus.Warn("Your kernel does not support memory reservation") + } + oomKillDisable := cgroupEnabled(mountPoint, "memory.oom_control") + if !quiet && !oomKillDisable { + logrus.Warn("Your kernel does not support oom control") + } + memorySwappiness := cgroupEnabled(mountPoint, "memory.swappiness") + if !quiet && !memorySwappiness { + logrus.Warn("Your kernel does not support memory swappiness") + } + kernelMemory := cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes") + if !quiet && !kernelMemory { + logrus.Warn("Your kernel does not support kernel memory limit") + } + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: swapLimit, + MemoryReservation: memoryReservation, + OomKillDisable: oomKillDisable, + MemorySwappiness: memorySwappiness, + KernelMemory: kernelMemory, + } +} + +// checkCgroupCPU reads the cpu information from the cpu cgroup mount point. +func checkCgroupCPU(cgMounts map[string]string, quiet bool) cgroupCPUInfo { + mountPoint, ok := cgMounts["cpu"] + if !ok { + if !quiet { + logrus.Warn("Unable to find cpu cgroup in mounts") + } + return cgroupCPUInfo{} + } + + cpuShares := cgroupEnabled(mountPoint, "cpu.shares") + if !quiet && !cpuShares { + logrus.Warn("Your kernel does not support cgroup cpu shares") + } + + cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us") + if !quiet && !cpuCfsPeriod { + logrus.Warn("Your kernel does not support cgroup cfs period") + } + + cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us") + if !quiet && !cpuCfsQuota { + logrus.Warn("Your kernel does not support cgroup cfs quotas") + } + + cpuRealtimePeriod := cgroupEnabled(mountPoint, "cpu.rt_period_us") + if !quiet && !cpuRealtimePeriod { + logrus.Warn("Your kernel does not support cgroup rt period") + } + + cpuRealtimeRuntime := cgroupEnabled(mountPoint, "cpu.rt_runtime_us") + if !quiet && !cpuRealtimeRuntime { + logrus.Warn("Your kernel does not support cgroup rt runtime") + } + + return cgroupCPUInfo{ + CPUShares: cpuShares, + CPUCfsPeriod: cpuCfsPeriod, + CPUCfsQuota: cpuCfsQuota, + CPURealtimePeriod: cpuRealtimePeriod, + CPURealtimeRuntime: cpuRealtimeRuntime, + } +} + +// checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point. +func checkCgroupBlkioInfo(cgMounts map[string]string, quiet bool) cgroupBlkioInfo { + mountPoint, ok := cgMounts["blkio"] + if !ok { + if !quiet { + logrus.Warn("Unable to find blkio cgroup in mounts") + } + return cgroupBlkioInfo{} + } + + weight := cgroupEnabled(mountPoint, "blkio.weight") + if !quiet && !weight { + logrus.Warn("Your kernel does not support cgroup blkio weight") + } + + weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device") + if !quiet && !weightDevice { + logrus.Warn("Your kernel does not support cgroup blkio weight_device") + } + + readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device") + if !quiet && !readBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device") + } + + writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device") + if !quiet && !writeBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device") + } + readIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device") + if !quiet && !readIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_iops_device") + } + + writeIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device") + if !quiet && !writeIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_iops_device") + } + return cgroupBlkioInfo{ + BlkioWeight: weight, + BlkioWeightDevice: weightDevice, + BlkioReadBpsDevice: readBpsDevice, + BlkioWriteBpsDevice: writeBpsDevice, + BlkioReadIOpsDevice: readIOpsDevice, + BlkioWriteIOpsDevice: writeIOpsDevice, + } +} + +// checkCgroupCpusetInfo reads the cpuset information from the cpuset cgroup mount point. +func checkCgroupCpusetInfo(cgMounts map[string]string, quiet bool) cgroupCpusetInfo { + mountPoint, ok := cgMounts["cpuset"] + if !ok { + if !quiet { + logrus.Warn("Unable to find cpuset cgroup in mounts") + } + return cgroupCpusetInfo{} + } + + cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus")) + if err != nil { + return cgroupCpusetInfo{} + } + + mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems")) + if err != nil { + return cgroupCpusetInfo{} + } + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: strings.TrimSpace(string(cpus)), + Mems: strings.TrimSpace(string(mems)), + } +} + +// checkCgroupPids reads the pids information from the pids cgroup mount point. +func checkCgroupPids(quiet bool) cgroupPids { + cgroup2, err := cg.IsCgroup2UnifiedMode() + if err != nil { + logrus.Errorf("Failed to check cgroups version: %v", err) + } + if !cgroup2 { + _, err := cgroups.FindCgroupMountpoint("", "pids") + if err != nil { + if !quiet { + logrus.Warn(err) + } + return cgroupPids{} + } + } + + return cgroupPids{ + PidsLimit: true, + } +} + +func cgroupEnabled(mountPoint, name string) bool { + _, err := os.Stat(path.Join(mountPoint, name)) + return err == nil +} + +func readProcBool(path string) bool { + val, err := ioutil.ReadFile(path) + if err != nil { + return false + } + return strings.TrimSpace(string(val)) == "1" +} diff --git a/vendor/github.com/containers/libpod/pkg/sysinfo/sysinfo_solaris.go b/vendor/github.com/containers/libpod/pkg/sysinfo/sysinfo_solaris.go new file mode 100644 index 0000000000..7463cdd8f0 --- /dev/null +++ b/vendor/github.com/containers/libpod/pkg/sysinfo/sysinfo_solaris.go @@ -0,0 +1,122 @@ +// +build solaris,cgo + +package sysinfo + +import ( + "bytes" + "os/exec" + "strconv" + "strings" +) + +/* +#cgo LDFLAGS: -llgrp +#cgo CFLAGS: -Wall -Werror +#include +#include +#include +int getLgrpCount() { + lgrp_cookie_t lgrpcookie = LGRP_COOKIE_NONE; + uint_t nlgrps; + + if ((lgrpcookie = lgrp_init(LGRP_VIEW_OS)) == LGRP_COOKIE_NONE) { + return -1; + } + nlgrps = lgrp_nlgrps(lgrpcookie); + return nlgrps; +} +*/ +import "C" + +// IsCPUSharesAvailable returns whether CPUShares setting is supported. +// We need FSS to be set as default scheduling class to support CPU Shares +func IsCPUSharesAvailable() bool { + cmd := exec.Command("/usr/sbin/dispadmin", "-d") + outBuf := new(bytes.Buffer) + errBuf := new(bytes.Buffer) + cmd.Stderr = errBuf + cmd.Stdout = outBuf + + if err := cmd.Run(); err != nil { + return false + } + return (strings.Contains(outBuf.String(), "FSS")) +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. +//NOTE Solaris: If we change the below capabilities be sure +// to update verifyPlatformContainerSettings() in daemon_solaris.go +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + sysInfo.cgroupMemInfo = setCgroupMem(quiet) + sysInfo.cgroupCPUInfo = setCgroupCPU(quiet) + sysInfo.cgroupBlkioInfo = setCgroupBlkioInfo(quiet) + sysInfo.cgroupCpusetInfo = setCgroupCPUsetInfo(quiet) + + sysInfo.IPv4ForwardingDisabled = false + + sysInfo.AppArmor = false + + return sysInfo +} + +// setCgroupMem reads the memory information for Solaris. +func setCgroupMem(quiet bool) cgroupMemInfo { + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: true, + MemoryReservation: false, + OomKillDisable: false, + MemorySwappiness: false, + KernelMemory: false, + } +} + +// setCgroupCPU reads the cpu information for Solaris. +func setCgroupCPU(quiet bool) cgroupCPUInfo { + + return cgroupCPUInfo{ + CPUShares: true, + CPUCfsPeriod: false, + CPUCfsQuota: true, + CPURealtimePeriod: false, + CPURealtimeRuntime: false, + } +} + +// blkio switches are not supported in Solaris. +func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo { + + return cgroupBlkioInfo{ + BlkioWeight: false, + BlkioWeightDevice: false, + } +} + +// setCgroupCPUsetInfo reads the cpuset information for Solaris. +func setCgroupCPUsetInfo(quiet bool) cgroupCpusetInfo { + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: getCPUCount(), + Mems: getLgrpCount(), + } +} + +func getCPUCount() string { + ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN) + if ncpus <= 0 { + return "" + } + return strconv.FormatInt(int64(ncpus), 16) +} + +func getLgrpCount() string { + nlgrps := C.getLgrpCount() + if nlgrps <= 0 { + return "" + } + return strconv.FormatInt(int64(nlgrps), 16) +} diff --git a/vendor/github.com/containers/libpod/pkg/sysinfo/sysinfo_unix.go b/vendor/github.com/containers/libpod/pkg/sysinfo/sysinfo_unix.go new file mode 100644 index 0000000000..45f3ef1c65 --- /dev/null +++ b/vendor/github.com/containers/libpod/pkg/sysinfo/sysinfo_unix.go @@ -0,0 +1,9 @@ +// +build !linux,!solaris,!windows + +package sysinfo + +// New returns an empty SysInfo for non linux nor solaris for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/vendor/github.com/containers/libpod/pkg/sysinfo/sysinfo_windows.go b/vendor/github.com/containers/libpod/pkg/sysinfo/sysinfo_windows.go new file mode 100644 index 0000000000..4e6255bc59 --- /dev/null +++ b/vendor/github.com/containers/libpod/pkg/sysinfo/sysinfo_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package sysinfo + +// New returns an empty SysInfo for windows for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/vendor/github.com/containers/libpod/pkg/util/mountOpts.go b/vendor/github.com/containers/libpod/pkg/util/mountOpts.go index 40c99384d5..670daeaf93 100644 --- a/vendor/github.com/containers/libpod/pkg/util/mountOpts.go +++ b/vendor/github.com/containers/libpod/pkg/util/mountOpts.go @@ -10,94 +10,120 @@ var ( // ErrBadMntOption indicates that an invalid mount option was passed. ErrBadMntOption = errors.Errorf("invalid mount option") // ErrDupeMntOption indicates that a duplicate mount option was passed. - ErrDupeMntOption = errors.Errorf("duplicate option passed") + ErrDupeMntOption = errors.Errorf("duplicate mount option passed") ) -// ProcessOptions parses the options for a bind mount and ensures that they are -// sensible and follow convention. -func ProcessOptions(options []string) []string { - var ( - foundbind, foundrw, foundro bool - rootProp string - ) - - for _, opt := range options { - switch opt { - case "bind", "rbind": - foundbind = true - case "ro": - foundro = true - case "rw": - foundrw = true - case "private", "rprivate", "slave", "rslave", "shared", "rshared": - rootProp = opt - } - } - if !foundbind { - options = append(options, "rbind") - } - if !foundrw && !foundro { - options = append(options, "rw") - } - if rootProp == "" { - options = append(options, "rprivate") - } - return options +// DefaultMountOptions sets default mount options for ProcessOptions. +type DefaultMountOptions struct { + Noexec bool + Nosuid bool + Nodev bool } -// ProcessTmpfsOptions parses the options for a tmpfs mountpoint and ensures -// that they are sensible and follow convention. -func ProcessTmpfsOptions(options []string) ([]string, error) { +// ProcessOptions parses the options for a bind or tmpfs mount and ensures that +// they are sensible and follow convention. The isTmpfs variable controls +// whether extra, tmpfs-specific options will be allowed. +// The defaults variable controls default mount options that will be set. If it +// is not included, they will be set unconditionally. +func ProcessOptions(options []string, isTmpfs bool, defaults *DefaultMountOptions) ([]string, error) { var ( - foundWrite, foundSize, foundProp, foundMode bool + foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ bool ) - baseOpts := []string{"noexec", "nosuid", "nodev"} for _, opt := range options { // Some options have parameters - size, mode splitOpt := strings.SplitN(opt, "=", 2) switch splitOpt[0] { + case "exec", "noexec": + if foundExec { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'noexec' and 'exec' can be used") + } + foundExec = true + case "suid", "nosuid": + if foundSuid { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'nosuid' and 'suid' can be used") + } + foundSuid = true + case "nodev", "dev": + if foundDev { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'nodev' and 'dev' can be used") + } + foundDev = true case "rw", "ro": if foundWrite { - return nil, errors.Wrapf(ErrDupeMntOption, "only one of rw and ro can be used") + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'rw' and 'ro' can be used") } foundWrite = true - baseOpts = append(baseOpts, opt) case "private", "rprivate", "slave", "rslave", "shared", "rshared": if foundProp { return nil, errors.Wrapf(ErrDupeMntOption, "only one root propagation mode can be used") } foundProp = true - baseOpts = append(baseOpts, opt) case "size": + if !isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'size' option is only allowed with tmpfs mounts") + } if foundSize { return nil, errors.Wrapf(ErrDupeMntOption, "only one tmpfs size can be specified") } foundSize = true - baseOpts = append(baseOpts, opt) case "mode": + if !isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'mode' option is only allowed with tmpfs mounts") + } if foundMode { return nil, errors.Wrapf(ErrDupeMntOption, "only one tmpfs mode can be specified") } foundMode = true - baseOpts = append(baseOpts, opt) - case "noexec", "nodev", "nosuid": - // Do nothing. We always include these even if they are - // not explicitly requested. + case "tmpcopyup": + if !isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'tmpcopyup' option is only allowed with tmpfs mounts") + } + if foundCopyUp { + return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' option can only be set once") + } + foundCopyUp = true + case "bind", "rbind": + if isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'bind' and 'rbind' options are not allowed with tmpfs mounts") + } + if foundBind { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'rbind' and 'bind' can be used") + } + foundBind = true + case "z", "Z": + if isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'z' and 'Z' options are not allowed with tmpfs mounts") + } + if foundZ { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'z' and 'Z' can be used") + } default: - return nil, errors.Wrapf(ErrBadMntOption, "unknown tmpfs option %q", opt) + return nil, errors.Wrapf(ErrBadMntOption, "unknown mount option %q", opt) } } if !foundWrite { - baseOpts = append(baseOpts, "rw") - } - if !foundSize { - baseOpts = append(baseOpts, "size=65536k") + options = append(options, "rw") } if !foundProp { - baseOpts = append(baseOpts, "rprivate") + options = append(options, "rprivate") + } + if !foundExec && (defaults == nil || defaults.Noexec) { + options = append(options, "noexec") + } + if !foundSuid && (defaults == nil || defaults.Nosuid) { + options = append(options, "nosuid") + } + if !foundDev && (defaults == nil || defaults.Nodev) { + options = append(options, "nodev") + } + if isTmpfs && !foundCopyUp { + options = append(options, "tmpcopyup") + } + if !isTmpfs && !foundBind { + options = append(options, "rbind") } - return baseOpts, nil + return options, nil } diff --git a/vendor/github.com/containers/libpod/pkg/util/utils.go b/vendor/github.com/containers/libpod/pkg/util/utils.go index 520e41438a..633d8a1246 100644 --- a/vendor/github.com/containers/libpod/pkg/util/utils.go +++ b/vendor/github.com/containers/libpod/pkg/util/utils.go @@ -3,21 +3,22 @@ package util import ( "fmt" "os" - ouser "os/user" + "os/user" "path/filepath" + "regexp" "strings" "sync" "time" "github.com/BurntSushi/toml" - "github.com/containers/image/types" + "github.com/containers/image/v5/types" "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/pkg/errorhandling" "github.com/containers/libpod/pkg/namespaces" "github.com/containers/libpod/pkg/rootless" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" - "github.com/opencontainers/image-spec/specs-go/v1" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" @@ -70,6 +71,50 @@ func StringInSlice(s string, sl []string) bool { return false } +// ParseChanges returns key, value(s) pair for given option. +func ParseChanges(option string) (key string, vals []string, err error) { + // Supported format as below + // 1. key=value + // 2. key value + // 3. key ["value","value1"] + if strings.Contains(option, " ") { + // This handles 2 & 3 conditions. + var val string + tokens := strings.SplitAfterN(option, " ", 2) + if len(tokens) < 2 { + return "", []string{}, fmt.Errorf("invalid key value %s", option) + } + key = strings.Trim(tokens[0], " ") // Need to trim whitespace part of delimeter. + val = tokens[1] + if strings.Contains(tokens[1], "[") && strings.Contains(tokens[1], "]") { + //Trim '[',']' if exist. + val = strings.TrimLeft(strings.TrimRight(tokens[1], "]"), "[") + } + vals = strings.Split(val, ",") + } else if strings.Contains(option, "=") { + // handles condition 1. + tokens := strings.Split(option, "=") + key = tokens[0] + vals = tokens[1:] + } else { + // either ` ` or `=` must be provided after command + return "", []string{}, fmt.Errorf("invalid format %s", option) + } + + if len(vals) == 0 { + return "", []string{}, errors.Errorf("no value given for instruction %q", key) + } + + for _, v := range vals { + //each option must not have ' '., `[`` or `]` & empty strings + whitespaces := regexp.MustCompile(`[\[\s\]]`) + if whitespaces.MatchString(v) || len(v) == 0 { + return "", []string{}, fmt.Errorf("invalid value %s", v) + } + } + return key, vals, nil +} + // GetImageConfig converts the --change flag values in the format "CMD=/bin/bash USER=example" // to a type v1.ImageConfig func GetImageConfig(changes []string) (v1.ImageConfig, error) { @@ -88,40 +133,42 @@ func GetImageConfig(changes []string) (v1.ImageConfig, error) { exposedPorts := make(map[string]struct{}) volumes := make(map[string]struct{}) labels := make(map[string]string) - for _, ch := range changes { - pair := strings.Split(ch, "=") - if len(pair) == 1 { - return v1.ImageConfig{}, errors.Errorf("no value given for instruction %q", ch) + key, vals, err := ParseChanges(ch) + if err != nil { + return v1.ImageConfig{}, err } - switch pair[0] { + + switch key { case "USER": - user = pair[1] + user = vals[0] case "EXPOSE": var st struct{} - exposedPorts[pair[1]] = st + exposedPorts[vals[0]] = st case "ENV": - if len(pair) < 3 { - return v1.ImageConfig{}, errors.Errorf("no value given for environment variable %q", pair[1]) + if len(vals) < 2 { + return v1.ImageConfig{}, errors.Errorf("no value given for environment variable %q", vals[0]) } - env = append(env, strings.Join(pair[1:], "=")) + env = append(env, strings.Join(vals[0:], "=")) case "ENTRYPOINT": - entrypoint = append(entrypoint, pair[1]) + // ENTRYPOINT and CMD can have array of strings + entrypoint = append(entrypoint, vals...) case "CMD": - cmd = append(cmd, pair[1]) + // ENTRYPOINT and CMD can have array of strings + cmd = append(cmd, vals...) case "VOLUME": var st struct{} - volumes[pair[1]] = st + volumes[vals[0]] = st case "WORKDIR": - workingDir = pair[1] + workingDir = vals[0] case "LABEL": - if len(pair) == 3 { - labels[pair[1]] = pair[2] + if len(vals) == 2 { + labels[vals[0]] = vals[1] } else { - labels[pair[1]] = "" + labels[vals[0]] = "" } case "STOPSIGNAL": - stopSignal = pair[1] + stopSignal = vals[0] } } @@ -156,22 +203,15 @@ func ParseIDMapping(mode namespaces.UsernsMode, UIDMapSlice, GIDMapSlice []strin uid := rootless.GetRootlessUID() gid := rootless.GetRootlessGID() - username := os.Getenv("USER") - if username == "" { - user, err := ouser.LookupId(fmt.Sprintf("%d", uid)) - if err == nil { - username = user.Username - } - } - mappings, err := idtools.NewIDMappings(username, username) + uids, gids, err := rootless.GetConfiguredMappings() if err != nil { - return nil, errors.Wrapf(err, "cannot find mappings for user %s", username) + return nil, errors.Wrapf(err, "cannot read mappings") } maxUID, maxGID := 0, 0 - for _, u := range mappings.UIDs() { + for _, u := range uids { maxUID += u.Size } - for _, g := range mappings.GIDs() { + for _, g := range gids { maxGID += g.Size } @@ -279,7 +319,7 @@ func WriteStorageConfigFile(storageOpts *storage.StoreOptions, storageConf strin if err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil { return err } - storageFile, err := os.OpenFile(storageConf, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + storageFile, err := os.OpenFile(storageConf, os.O_RDWR|os.O_TRUNC, 0600) if err != nil { return errors.Wrapf(err, "cannot open %s", storageConf) } @@ -356,3 +396,61 @@ func OpenExclusiveFile(path string) (*os.File, error) { } return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) } + +// PullType whether to pull new image +type PullType int + +const ( + // PullImageAlways always try to pull new image when create or run + PullImageAlways PullType = iota + // PullImageMissing pulls image if it is not locally + PullImageMissing + // PullImageNever will never pull new image + PullImageNever +) + +// ValidatePullType check if the pullType from CLI is valid and returns the valid enum type +// if the value from CLI is invalid returns the error +func ValidatePullType(pullType string) (PullType, error) { + switch pullType { + case "always": + return PullImageAlways, nil + case "missing": + return PullImageMissing, nil + case "never": + return PullImageNever, nil + case "": + return PullImageMissing, nil + default: + return PullImageMissing, errors.Errorf("invalid pull type %q", pullType) + } +} + +// ExitCode reads the error message when failing to executing container process +// and then returns 0 if no error, 126 if command does not exist, or 127 for +// all other errors +func ExitCode(err error) int { + if err == nil { + return 0 + } + e := strings.ToLower(err.Error()) + if strings.Contains(e, "file not found") || + strings.Contains(e, "no such file or directory") { + return 127 + } + + return 126 +} + +// HomeDir returns the home directory for the current user. +func HomeDir() (string, error) { + home := os.Getenv("HOME") + if home == "" { + usr, err := user.LookupId(fmt.Sprintf("%d", rootless.GetRootlessUID())) + if err != nil { + return "", errors.Wrapf(err, "unable to resolve HOME directory") + } + home = usr.HomeDir + } + return home, nil +} diff --git a/vendor/github.com/containers/libpod/pkg/util/utils_supported.go b/vendor/github.com/containers/libpod/pkg/util/utils_supported.go index c7c8787a0a..253460686b 100644 --- a/vendor/github.com/containers/libpod/pkg/util/utils_supported.go +++ b/vendor/github.com/containers/libpod/pkg/util/utils_supported.go @@ -16,8 +16,8 @@ import ( "github.com/sirupsen/logrus" ) -// GetRootlessRuntimeDir returns the runtime directory when running as non root -func GetRootlessRuntimeDir() (string, error) { +// GetRuntimeDir returns the runtime directory +func GetRuntimeDir() (string, error) { var rootlessRuntimeDirError error rootlessRuntimeDirOnce.Do(func() { @@ -83,7 +83,7 @@ func GetRootlessConfigHomeDir() (string, error) { logrus.Errorf("unable to make temp dir %s", tmpDir) } st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0755 { + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() >= 0700 { cfgHomeDir = tmpDir } } @@ -100,7 +100,7 @@ func GetRootlessConfigHomeDir() (string, error) { // GetRootlessPauseProcessPidPath returns the path to the file that holds the pid for // the pause process func GetRootlessPauseProcessPidPath() (string, error) { - runtimeDir, err := GetRootlessRuntimeDir() + runtimeDir, err := GetRuntimeDir() if err != nil { return "", err } diff --git a/vendor/github.com/containers/libpod/pkg/util/utils_windows.go b/vendor/github.com/containers/libpod/pkg/util/utils_windows.go index e781e6717e..9bba2d1ee7 100644 --- a/vendor/github.com/containers/libpod/pkg/util/utils_windows.go +++ b/vendor/github.com/containers/libpod/pkg/util/utils_windows.go @@ -25,12 +25,12 @@ func GetRootlessPauseProcessPidPath() (string, error) { return "", errors.Wrap(errNotImplemented, "GetRootlessPauseProcessPidPath") } -// GetRootlessRuntimeDir returns the runtime directory when running as non root -func GetRootlessRuntimeDir() (string, error) { - return "", errors.Wrap(errNotImplemented, "GetRootlessRuntimeDir") +// GetRuntimeDir returns the runtime directory +func GetRuntimeDir() (string, error) { + return "", errors.New("this function is not implemented for windows") } // GetRootlessConfigHomeDir returns the config home directory when running as non root func GetRootlessConfigHomeDir() (string, error) { - return "", errors.Wrap(errNotImplemented, "GetRootlessConfigHomeDir") + return "", errors.New("this function is not implemented for windows") } diff --git a/vendor/github.com/containers/libpod/utils/utils_supported.go b/vendor/github.com/containers/libpod/utils/utils_supported.go index 8b0ba44384..8bc232179d 100644 --- a/vendor/github.com/containers/libpod/utils/utils_supported.go +++ b/vendor/github.com/containers/libpod/utils/utils_supported.go @@ -3,6 +3,8 @@ package utils import ( + "github.com/containers/libpod/pkg/cgroups" + "github.com/containers/libpod/pkg/rootless" systemdDbus "github.com/coreos/go-systemd/dbus" "github.com/godbus/dbus" ) @@ -10,9 +12,19 @@ import ( // RunUnderSystemdScope adds the specified pid to a systemd scope func RunUnderSystemdScope(pid int, slice string, unitName string) error { var properties []systemdDbus.Property - conn, err := systemdDbus.New() - if err != nil { - return err + var conn *systemdDbus.Conn + var err error + + if rootless.IsRootless() { + conn, err = cgroups.GetUserConnection(rootless.GetRootlessUID()) + if err != nil { + return err + } + } else { + conn, err = systemdDbus.New() + if err != nil { + return err + } } properties = append(properties, systemdDbus.PropSlice(slice)) properties = append(properties, newProp("PIDs", []uint32{uint32(pid)})) diff --git a/vendor/github.com/containers/libpod/version/version.go b/vendor/github.com/containers/libpod/version/version.go index 118b096c52..c0dbeadfe2 100644 --- a/vendor/github.com/containers/libpod/version/version.go +++ b/vendor/github.com/containers/libpod/version/version.go @@ -4,7 +4,7 @@ package version // NOTE: remember to bump the version at the top // of the top-level README.md file when this is // bumped. -const Version = "1.5.1" +const Version = "1.6.3-dev" // RemoteAPIVersion is the version for the remote // client API. It is used to determine compatibility diff --git a/vendor/github.com/containers/libtrust/CONTRIBUTING.md b/vendor/github.com/containers/libtrust/CONTRIBUTING.md new file mode 100644 index 0000000000..05be0f8ab3 --- /dev/null +++ b/vendor/github.com/containers/libtrust/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/vendor/github.com/containers/libtrust/LICENSE b/vendor/github.com/containers/libtrust/LICENSE new file mode 100644 index 0000000000..27448585ad --- /dev/null +++ b/vendor/github.com/containers/libtrust/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/libtrust/MAINTAINERS b/vendor/github.com/containers/libtrust/MAINTAINERS new file mode 100644 index 0000000000..9768175feb --- /dev/null +++ b/vendor/github.com/containers/libtrust/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/containers/libtrust/README.md b/vendor/github.com/containers/libtrust/README.md new file mode 100644 index 0000000000..dcffb31ae4 --- /dev/null +++ b/vendor/github.com/containers/libtrust/README.md @@ -0,0 +1,22 @@ +# libtrust + +> **WARNING** this library is no longer actively developed, and will be integrated +> in the [docker/distribution][https://www.github.com/docker/distribution] +> repository in future. + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/vendor/github.com/containers/libtrust/certificates.go b/vendor/github.com/containers/libtrust/certificates.go new file mode 100644 index 0000000000..3dcca33cb1 --- /dev/null +++ b/vendor/github.com/containers/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/vendor/github.com/containers/libtrust/doc.go b/vendor/github.com/containers/libtrust/doc.go new file mode 100644 index 0000000000..ec5d2159c1 --- /dev/null +++ b/vendor/github.com/containers/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/vendor/github.com/containers/libtrust/ec_key.go b/vendor/github.com/containers/libtrust/ec_key.go new file mode 100644 index 0000000000..0ee1b9110a --- /dev/null +++ b/vendor/github.com/containers/libtrust/ec_key.go @@ -0,0 +1,422 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + r, s, err := k.sign(data, hashID) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/vendor/github.com/containers/libtrust/ec_key_no_openssl.go b/vendor/github.com/containers/libtrust/ec_key_no_openssl.go new file mode 100644 index 0000000000..d6cdaca3f8 --- /dev/null +++ b/vendor/github.com/containers/libtrust/ec_key_no_openssl.go @@ -0,0 +1,23 @@ +// +build !libtrust_openssl + +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func (k *ecPrivateKey) sign(data io.Reader, hashID crypto.Hash) (r, s *big.Int, err error) { + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, nil, fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + return ecdsa.Sign(rand.Reader, k.PrivateKey, hash) +} diff --git a/vendor/github.com/containers/libtrust/ec_key_openssl.go b/vendor/github.com/containers/libtrust/ec_key_openssl.go new file mode 100644 index 0000000000..4137511f11 --- /dev/null +++ b/vendor/github.com/containers/libtrust/ec_key_openssl.go @@ -0,0 +1,24 @@ +// +build libtrust_openssl + +package libtrust + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func (k *ecPrivateKey) sign(data io.Reader, hashID crypto.Hash) (r, s *big.Int, err error) { + hId := k.signatureAlgorithm.HashID() + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(data) + if err != nil { + return nil, nil, fmt.Errorf("error reading data: %s", err) + } + + return ecdsa.HashSign(rand.Reader, k.PrivateKey, buf.Bytes(), hId) +} diff --git a/vendor/github.com/containers/libtrust/filter.go b/vendor/github.com/containers/libtrust/filter.go new file mode 100644 index 0000000000..5b2b4fca6f --- /dev/null +++ b/vendor/github.com/containers/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/vendor/github.com/containers/libtrust/hash.go b/vendor/github.com/containers/libtrust/hash.go new file mode 100644 index 0000000000..a2df787dd9 --- /dev/null +++ b/vendor/github.com/containers/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/vendor/github.com/containers/libtrust/jsonsign.go b/vendor/github.com/containers/libtrust/jsonsign.go new file mode 100644 index 0000000000..cb2ca9a769 --- /dev/null +++ b/vendor/github.com/containers/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/vendor/github.com/containers/libtrust/key.go b/vendor/github.com/containers/libtrust/key.go new file mode 100644 index 0000000000..73642db2a8 --- /dev/null +++ b/vendor/github.com/containers/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/vendor/github.com/containers/libtrust/key_files.go b/vendor/github.com/containers/libtrust/key_files.go new file mode 100644 index 0000000000..c526de5455 --- /dev/null +++ b/vendor/github.com/containers/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/vendor/github.com/containers/libtrust/key_manager.go b/vendor/github.com/containers/libtrust/key_manager.go new file mode 100644 index 0000000000..9a98ae3574 --- /dev/null +++ b/vendor/github.com/containers/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/vendor/github.com/containers/libtrust/rsa_key.go b/vendor/github.com/containers/libtrust/rsa_key.go new file mode 100644 index 0000000000..dac4cacf20 --- /dev/null +++ b/vendor/github.com/containers/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/vendor/github.com/containers/libtrust/util.go b/vendor/github.com/containers/libtrust/util.go new file mode 100644 index 0000000000..a5a101d3f1 --- /dev/null +++ b/vendor/github.com/containers/libtrust/util.go @@ -0,0 +1,363 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + s = strings.Replace(s, "\n", "", -1) + s = strings.Replace(s, " ", "", -1) + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/vendor/github.com/containers/psgo/.travis.yml b/vendor/github.com/containers/psgo/.travis.yml index 1399d24d3c..c07bb140bf 100644 --- a/vendor/github.com/containers/psgo/.travis.yml +++ b/vendor/github.com/containers/psgo/.travis.yml @@ -14,6 +14,6 @@ before_install: - sudo apt-get install -qq bats script: - - make validate || travis_terminate 1 - - make build || travis_terminate 1 - - make test || travis_terminate 1 + - make validate + - make build + - make test diff --git a/vendor/github.com/containers/psgo/Makefile b/vendor/github.com/containers/psgo/Makefile index 6050b9d5b8..3618207846 100644 --- a/vendor/github.com/containers/psgo/Makefile +++ b/vendor/github.com/containers/psgo/Makefile @@ -1,4 +1,5 @@ export GO111MODULE=off +export GOPROXY=https://proxy.golang.org SHELL= /bin/bash GO ?= go @@ -9,11 +10,17 @@ PROJECT := github.com/containers/psgo BATS_TESTS := *.bats GO_SRC=$(shell find . -name \*.go) +GO_BUILD=$(GO) build +# Go module support: set `-mod=vendor` to use the vendored sources +ifeq ($(shell go help mod >/dev/null 2>&1 && echo true), true) + GO_BUILD=GO111MODULE=on $(GO) build -mod=vendor +endif + all: validate build .PHONY: build build: $(GO_SRC) - $(GO) build -buildmode=pie -o $(BUILD_DIR)/$(NAME) $(PROJECT)/sample + $(GO_BUILD) -buildmode=pie -o $(BUILD_DIR)/$(NAME) $(PROJECT)/sample .PHONY: clean clean: diff --git a/vendor/github.com/containers/psgo/README.md b/vendor/github.com/containers/psgo/README.md index 55cb73a713..fed42c6834 100644 --- a/vendor/github.com/containers/psgo/README.md +++ b/vendor/github.com/containers/psgo/README.md @@ -85,6 +85,8 @@ The ps library is compatible with all AIX format descriptors of the ps command-l - Seccomp mode of the process (i.e., disabled, strict or filter). See seccomp(2) for more information. - **state** - Process state codes (e.g, **R** for *running*, **S** for *sleeping*). See proc(5) for more information. +- **stime** + - Process start time (e.g, "2019-12-09 10:50:36 +0100 CET). We can try out different format descriptors with the psgo binary: diff --git a/vendor/github.com/containers/psgo/go.mod b/vendor/github.com/containers/psgo/go.mod index a194ec196a..d9d54c5f77 100644 --- a/vendor/github.com/containers/psgo/go.mod +++ b/vendor/github.com/containers/psgo/go.mod @@ -6,6 +6,6 @@ require ( github.com/opencontainers/runc v0.0.0-20190425234816-dae70e8efea4 github.com/pkg/errors v0.0.0-20190227000051-27936f6d90f9 github.com/sirupsen/logrus v0.0.0-20190403091019-9b3cdde74fbe - github.com/stretchr/testify v1.3.0 + github.com/stretchr/testify v1.4.0 golang.org/x/sys v0.0.0-20190425145619-16072639606e ) diff --git a/vendor/github.com/containers/psgo/go.sum b/vendor/github.com/containers/psgo/go.sum index da6c750db7..bbdd99730d 100644 --- a/vendor/github.com/containers/psgo/go.sum +++ b/vendor/github.com/containers/psgo/go.sum @@ -16,8 +16,12 @@ github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190425145619-16072639606e h1:4ktJgTV34+N3qOZUc5fAaG3Pb11qzMm3PkAoTAgUZ2I= golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/containers/psgo/internal/cgroups/cgroups.go b/vendor/github.com/containers/psgo/internal/cgroups/cgroups.go new file mode 100644 index 0000000000..eecaf87cb8 --- /dev/null +++ b/vendor/github.com/containers/psgo/internal/cgroups/cgroups.go @@ -0,0 +1,44 @@ +// Copyright 2019 psgo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cgroups + +import ( + "sync" + "syscall" +) + +const ( + CgroupRoot = "/sys/fs/cgroup" + cgroup2SuperMagic = 0x63677270 +) + +var ( + isUnifiedOnce sync.Once + isUnified bool + isUnifiedErr error +) + +// IsCgroup2UnifiedMode returns whether we are running in cgroup or cgroupv2 mode. +func IsCgroup2UnifiedMode() (bool, error) { + isUnifiedOnce.Do(func() { + var st syscall.Statfs_t + if err := syscall.Statfs(CgroupRoot, &st); err != nil { + isUnified, isUnifiedErr = false, err + } else { + isUnified, isUnifiedErr = st.Type == cgroup2SuperMagic, nil + } + }) + return isUnified, isUnifiedErr +} diff --git a/vendor/github.com/containers/psgo/internal/proc/pids.go b/vendor/github.com/containers/psgo/internal/proc/pids.go index ff4887364c..69e8befc19 100644 --- a/vendor/github.com/containers/psgo/internal/proc/pids.go +++ b/vendor/github.com/containers/psgo/internal/proc/pids.go @@ -1,4 +1,4 @@ -// Copyright 2018 psgo authors +// Copyright 2018-2019 psgo authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,8 +18,11 @@ import ( "bufio" "fmt" "os" + "path/filepath" "strconv" "strings" + + "github.com/containers/psgo/internal/cgroups" ) // GetPIDs extracts and returns all PIDs from /proc. @@ -49,45 +52,98 @@ func GetPIDs() ([]string, error) { return pids, nil } -// pidCgroupPath returns the path to the pid's pids cgroup. -func pidCgroupPath(pid string) (string, error) { +// GetPIDsFromCgroup returns a strings slice of all pids listesd in pid's pids +// cgroup. It automatically detects if we're running in unified mode or not. +func GetPIDsFromCgroup(pid string) ([]string, error) { + unified, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + if unified { + return getPIDsFromCgroupV2(pid) + } + return getPIDsFromCgroupV1(pid) +} + +// getPIDsFromCgroupV1 returns a strings slice of all pids listesd in pid's pids +// cgroup. +func getPIDsFromCgroupV1(pid string) ([]string, error) { + // First, find the corresponding path to the PID cgroup. f, err := os.Open(fmt.Sprintf("/proc/%s/cgroup", pid)) if err != nil { - return "", err + return nil, err } defer f.Close() scanner := bufio.NewScanner(f) + cgroupPath := "" for scanner.Scan() { fields := strings.Split(scanner.Text(), ":") if len(fields) != 3 { continue } if fields[1] == "pids" { - return fmt.Sprintf("/sys/fs/cgroup/pids/%s/cgroup.procs", fields[2]), nil + cgroupPath = fmt.Sprintf("/sys/fs/cgroup/pids/%s/cgroup.procs", fields[2]) } } - return "", fmt.Errorf("couldn't find pids group for PID %s", pid) + + if cgroupPath == "" { + return nil, fmt.Errorf("couldn't find v1 pids group for PID %s", pid) + } + + // Second, extract the PIDs inside the cgroup. + f, err = os.Open(cgroupPath) + if err != nil { + return nil, err + } + defer f.Close() + + pids := []string{} + scanner = bufio.NewScanner(f) + for scanner.Scan() { + pids = append(pids, scanner.Text()) + } + + return pids, nil } -// GetPIDsFromCgroup returns a strings slice of all pids listesd in pid's pids +// getPIDsFromCgroupV2 returns a strings slice of all pids listesd in pid's pids // cgroup. -func GetPIDsFromCgroup(pid string) ([]string, error) { - cgroupPath, err := pidCgroupPath(pid) +func getPIDsFromCgroupV2(pid string) ([]string, error) { + // First, find the corresponding path to the PID cgroup. + f, err := os.Open(fmt.Sprintf("/proc/%s/cgroup", pid)) if err != nil { return nil, err } + defer f.Close() + + scanner := bufio.NewScanner(f) + cgroupSlice := "" + for scanner.Scan() { + fields := strings.Split(scanner.Text(), ":") + if len(fields) != 3 { + continue + } + cgroupSlice = fields[2] + break + } + + if cgroupSlice == "" { + return nil, fmt.Errorf("couldn't find v2 pids group for PID %s", pid) + } - f, err := os.Open(cgroupPath) + // Second, extract the PIDs inside the cgroup. + f, err = os.Open(filepath.Join(cgroups.CgroupRoot, cgroupSlice, "cgroup.procs")) if err != nil { return nil, err } defer f.Close() pids := []string{} - scanner := bufio.NewScanner(f) + scanner = bufio.NewScanner(f) for scanner.Scan() { pids = append(pids, scanner.Text()) } + return pids, nil } diff --git a/vendor/github.com/containers/psgo/internal/process/process.go b/vendor/github.com/containers/psgo/internal/process/process.go index 20e40163f2..a936cc4ef2 100644 --- a/vendor/github.com/containers/psgo/internal/process/process.go +++ b/vendor/github.com/containers/psgo/internal/process/process.go @@ -188,23 +188,30 @@ func (p *Process) SetHostData() error { // ElapsedTime returns the time.Duration since process p was created. func (p *Process) ElapsedTime() (time.Duration, error) { - sinceBoot, err := strconv.ParseInt(p.Stat.Starttime, 10, 64) + startTime, err := p.StartTime() if err != nil { return 0, err } + return (time.Now()).Sub(startTime), nil +} + +// StarTime returns the time.Time when process p was started. +func (p *Process) StartTime() (time.Time, error) { + sinceBoot, err := strconv.ParseInt(p.Stat.Starttime, 10, 64) + if err != nil { + return time.Time{}, err + } clockTicks, err := host.ClockTicks() if err != nil { - return 0, err + return time.Time{}, err } - - sinceBoot = sinceBoot / clockTicks - bootTime, err := host.BootTime() if err != nil { - return 0, err + return time.Time{}, err } - created := time.Unix(sinceBoot+bootTime, 0) - return (time.Now()).Sub(created), nil + + sinceBoot = sinceBoot / clockTicks + return time.Unix(sinceBoot+bootTime, 0), nil } // CPUTime returns the cumlative CPU time of process p as a time.Duration. diff --git a/vendor/github.com/containers/psgo/psgo.go b/vendor/github.com/containers/psgo/psgo.go index 4986c9c71b..30b8b74cec 100644 --- a/vendor/github.com/containers/psgo/psgo.go +++ b/vendor/github.com/containers/psgo/psgo.go @@ -310,6 +310,11 @@ var ( header: "STATE", procFn: processState, }, + { + normal: "stime", + header: "STIME", + procFn: processStartTime, + }, } ) @@ -715,6 +720,15 @@ func processTIME(p *process.Process, ctx *psContext) (string, error) { return fmt.Sprintf("%v", cpu), nil } +// processStartTime returns the start time of process p. +func processStartTime(p *process.Process, ctx *psContext) (string, error) { + sTime, err := p.StartTime() + if err != nil { + return "", err + } + return fmt.Sprintf("%v", sTime), nil +} + // processTTY returns the controlling tty (terminal) of process p. func processTTY(p *process.Process, ctx *psContext) (string, error) { ttyNr, err := strconv.ParseUint(p.Stat.TtyNr, 10, 64) diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index a823bab55c..619e077d0d 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -82,7 +82,7 @@ lint_task: build_script: | echo "deb http://deb.debian.org/debian stretch-backports main" > /etc/apt/sources.list.d/backports.list apt-get update - apt-get install -y libbtrfs-dev libostree-dev libdevmapper-dev + apt-get install -y libbtrfs-dev libdevmapper-dev test_script: make lint # Update metadata on VM images referenced by this repository state diff --git a/vendor/github.com/containers/storage/.golangci.yml b/vendor/github.com/containers/storage/.golangci.yml index bd8ccfcb19..ec4ebb187a 100644 --- a/vendor/github.com/containers/storage/.golangci.yml +++ b/vendor/github.com/containers/storage/.golangci.yml @@ -1,7 +1,5 @@ --- run: - build-tags: - - ostree concurrency: 6 deadline: 5m linters: diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index fa0fddaebd..90e5ca4998 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -29,7 +29,7 @@ GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73 NATIVETAGS := -AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/ostree_tag.sh) +AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS) GO ?= go diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 80138e7146..43ded90625 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.13.4 +1.13.5 diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 032e5b28a7..97222fe7a7 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -26,7 +26,6 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" - "github.com/containers/storage/pkg/ostree" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" units "github.com/docker/go-units" @@ -88,7 +87,6 @@ type overlayOptions struct { imageStores []string quota quota.Quota mountProgram string - ostreeRepo string skipMountHome bool mountOptions string ignoreChownErrors bool @@ -108,7 +106,6 @@ type Driver struct { supportsDType bool usingMetacopy bool locker *locker.Locker - convert map[string]bool } var ( @@ -234,12 +231,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } } - if opts.ostreeRepo != "" { - if err := ostree.CreateOSTreeRepository(opts.ostreeRepo, rootUID, rootGID); err != nil { - return nil, err - } - } - d := &Driver{ name: "overlay", home: home, @@ -251,7 +242,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) usingMetacopy: usingMetacopy, locker: locker.New(), options: *opts, - convert: make(map[string]bool), } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)) @@ -316,24 +306,12 @@ func parseOptions(options []string) (*overlayOptions, error) { return nil, fmt.Errorf("overlay: can't stat program %s: %v", val, err) } o.mountProgram = val - case "overlay2.ostree_repo", "overlay.ostree_repo", ".ostree_repo": - logrus.Debugf("overlay: ostree_repo=%s", val) - if !ostree.OstreeSupport() { - return nil, fmt.Errorf("overlay: ostree_repo specified but support for ostree is missing") - } - o.ostreeRepo = val case ".ignore_chown_errors", "overlay2.ignore_chown_errors", "overlay.ignore_chown_errors": logrus.Debugf("overlay: ignore_chown_errors=%s", val) o.ignoreChownErrors, err = strconv.ParseBool(val) if err != nil { return nil, err } - case "overlay2.skip_mount_home", "overlay.skip_mount_home", ".skip_mount_home": - logrus.Debugf("overlay: skip_mount_home=%s", val) - o.skipMountHome, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } default: return nil, fmt.Errorf("overlay: Unknown option %s", key) } @@ -556,10 +534,6 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr } } - if d.options.ostreeRepo != "" { - d.convert[id] = true - } - return d.create(id, parent, opts) } @@ -766,11 +740,6 @@ func (d *Driver) Remove(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) - // Ignore errors, we don't want to fail if the ostree branch doesn't exist, - if d.options.ostreeRepo != "" { - ostree.DeleteOSTree(d.options.ostreeRepo, id) - } - dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { @@ -1125,13 +1094,6 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) return 0, err } - _, convert := d.convert[id] - if convert { - if err := ostree.ConvertToOSTree(d.options.ostreeRepo, applyDir, id); err != nil { - return 0, err - } - } - return directory.Size(applyDir) } diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index 6c02a45dc2..58a1635ae5 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -11,7 +11,6 @@ import ( "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/ostree" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" "github.com/opencontainers/selinux/go-selinux/label" @@ -51,11 +50,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) case "vfs.imagestore", ".imagestore": d.homes = append(d.homes, strings.Split(val, ",")...) continue - case "vfs.ostree_repo", ".ostree_repo": - if !ostree.OstreeSupport() { - return nil, fmt.Errorf("vfs: ostree_repo specified but support for ostree is missing") - } - d.ostreeRepo = val case "vfs.mountopt": return nil, fmt.Errorf("vfs driver does not support mount options") case ".ignore_chown_errors", "vfs.ignore_chown_errors": @@ -69,15 +63,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) return nil, fmt.Errorf("vfs driver does not support %s options", key) } } - if d.ostreeRepo != "" { - rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return nil, err - } - if err := ostree.CreateOSTreeRepository(d.ostreeRepo, rootUID, rootGID); err != nil { - return nil, err - } - } d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d) d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater) @@ -92,7 +77,6 @@ type Driver struct { name string homes []string idMappings *idtools.IDMappings - ostreeRepo string ignoreChownErrors bool naiveDiff graphdriver.DiffDriver updater graphdriver.LayerIDMapUpdater @@ -191,11 +175,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool } } - if ro && d.ostreeRepo != "" { - if err := ostree.ConvertToOSTree(d.ostreeRepo, dir, id); err != nil { - return err - } - } return nil } @@ -216,10 +195,6 @@ func (d *Driver) dir(id string) string { // Remove deletes the content from the directory for a given id. func (d *Driver) Remove(id string) error { - if d.ostreeRepo != "" { - // Ignore errors, we don't want to fail if the ostree branch doesn't exist, - ostree.DeleteOSTree(d.ostreeRepo, id) - } return system.EnsureRemoveAll(d.dir(id)) } diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod index d8f943d300..934e82ad24 100644 --- a/vendor/github.com/containers/storage/go.mod +++ b/vendor/github.com/containers/storage/go.mod @@ -15,7 +15,6 @@ require ( github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/runc v1.0.0-rc8 github.com/opencontainers/selinux v1.2.2 - github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 github.com/pkg/errors v0.8.1 github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 github.com/sirupsen/logrus v1.4.2 diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum index 159bfdba2a..a0e05dd1df 100644 --- a/vendor/github.com/containers/storage/go.sum +++ b/vendor/github.com/containers/storage/go.sum @@ -35,8 +35,6 @@ github.com/opencontainers/runc v1.0.0-rc8 h1:dDCFes8Hj1r/i5qnypONo5jdOme/8HWZC/a github.com/opencontainers/runc v1.0.0-rc8/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4FtGEe8bFg= github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= -github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= -github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index c53f704219..0910401408 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -85,11 +85,6 @@ type OptionsConfig struct { RemapGroup string `toml:"remap-group"` // Thinpool container options to be handed to thinpool drivers Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"` - // OSTree repository - OstreeRepo string `toml:"ostree_repo"` - - // Do not create a bind mount on the storage home - SkipMountHome string `toml:"skip_mount_home"` // Alternative program to use for the mount of the file system MountProgram string `toml:"mount_program"` diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go index 1fed414f75..228c8cf241 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go @@ -104,7 +104,7 @@ func (l *lockfile) lock(l_type int16, recursive bool) { // If we're the first reference on the lock, we need to open the file again. fd, err := openLock(l.file, l.ro) if err != nil { - panic(fmt.Sprintf("error opening %q", l.file)) + panic(fmt.Sprintf("error opening %q: %v", l.file, err)) } unix.CloseOnExec(fd) l.fd = uintptr(fd) diff --git a/vendor/github.com/containers/storage/pkg/ostree/no_ostree.go b/vendor/github.com/containers/storage/pkg/ostree/no_ostree.go deleted file mode 100644 index bf83ccf25f..0000000000 --- a/vendor/github.com/containers/storage/pkg/ostree/no_ostree.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !ostree !cgo - -package ostree - -func OstreeSupport() bool { - return false -} - -func DeleteOSTree(repoLocation, id string) error { - return nil -} - -func CreateOSTreeRepository(repoLocation string, rootUID int, rootGID int) error { - return nil -} - -func ConvertToOSTree(repoLocation, root, id string) error { - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/ostree/ostree.go b/vendor/github.com/containers/storage/pkg/ostree/ostree.go deleted file mode 100644 index 7d324f2b20..0000000000 --- a/vendor/github.com/containers/storage/pkg/ostree/ostree.go +++ /dev/null @@ -1,198 +0,0 @@ -// +build ostree,cgo - -package ostree - -import ( - "fmt" - "golang.org/x/sys/unix" - "os" - "path/filepath" - "runtime" - "syscall" - "time" - "unsafe" - - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/system" - glib "github.com/ostreedev/ostree-go/pkg/glibobject" - "github.com/ostreedev/ostree-go/pkg/otbuiltin" - "github.com/pkg/errors" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 -// #include -// #include -// #include -// #include -// #include -// #include -import "C" - -func OstreeSupport() bool { - return true -} - -func fixFiles(dir string, usermode bool) (bool, []string, error) { - var SkipOstree = errors.New("skip ostree deduplication") - - var whiteouts []string - - err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { - if !usermode { - stat, ok := info.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("not syscall.Stat_t") - } - - if stat.Rdev == 0 && (stat.Mode&unix.S_IFCHR) != 0 { - whiteouts = append(whiteouts, path) - return nil - } - } - // Skip the ostree deduplication if we encounter a file type that - // ostree does not manage. - return SkipOstree - } - if info.IsDir() { - if usermode { - if err := os.Chmod(path, info.Mode()|0700); err != nil { - return err - } - } - } else if usermode && (info.Mode().IsRegular()) { - if err := os.Chmod(path, info.Mode()|0600); err != nil { - return err - } - } - return nil - }) - if err == SkipOstree { - return true, nil, nil - } - if err != nil { - return false, nil, err - } - return false, whiteouts, nil -} - -// Create prepares the filesystem for the OSTREE driver and copies the directory for the given id under the parent. -func ConvertToOSTree(repoLocation, root, id string) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - repo, err := otbuiltin.OpenRepo(repoLocation) - if err != nil { - return errors.Wrap(err, "could not open the OSTree repository") - } - - skip, whiteouts, err := fixFiles(root, os.Getuid() != 0) - if err != nil { - return errors.Wrap(err, "could not prepare the OSTree directory") - } - if skip { - return nil - } - - if _, err := repo.PrepareTransaction(); err != nil { - return errors.Wrap(err, "could not prepare the OSTree transaction") - } - - if skip { - return nil - } - - commitOpts := otbuiltin.NewCommitOptions() - commitOpts.Timestamp = time.Now() - commitOpts.LinkCheckoutSpeedup = true - commitOpts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" - branch := fmt.Sprintf("containers-storage/%s", id) - - for _, w := range whiteouts { - if err := os.Remove(w); err != nil { - return errors.Wrap(err, "could not delete whiteout file") - } - } - - if _, err := repo.Commit(root, branch, commitOpts); err != nil { - return errors.Wrap(err, "could not commit the layer") - } - - if _, err := repo.CommitTransaction(); err != nil { - return errors.Wrap(err, "could not complete the OSTree transaction") - } - - if err := system.EnsureRemoveAll(root); err != nil { - return errors.Wrap(err, "could not delete layer") - } - - checkoutOpts := otbuiltin.NewCheckoutOptions() - checkoutOpts.RequireHardlinks = true - checkoutOpts.Whiteouts = false - if err := otbuiltin.Checkout(repoLocation, root, branch, checkoutOpts); err != nil { - return errors.Wrap(err, "could not checkout from OSTree") - } - - for _, w := range whiteouts { - if err := unix.Mknod(w, unix.S_IFCHR, 0); err != nil { - return errors.Wrap(err, "could not recreate whiteout file") - } - } - return nil -} - -func CreateOSTreeRepository(repoLocation string, rootUID int, rootGID int) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - _, err := os.Stat(repoLocation) - if err != nil && !os.IsNotExist(err) { - return err - } else if err != nil { - if err := idtools.MkdirAllAs(repoLocation, 0700, rootUID, rootGID); err != nil { - return errors.Wrap(err, "could not create OSTree repository directory: %v") - } - - if _, err := otbuiltin.Init(repoLocation, otbuiltin.NewInitOptions()); err != nil { - return errors.Wrap(err, "could not create OSTree repository") - } - } - return nil -} - -func openRepo(path string) (*C.struct_OstreeRepo, error) { - var cerr *C.GError - cpath := C.CString(path) - defer C.free(unsafe.Pointer(cpath)) - pathc := C.g_file_new_for_path(cpath) - defer C.g_object_unref(C.gpointer(pathc)) - repo := C.ostree_repo_new(pathc) - r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) - if !r { - C.g_object_unref(C.gpointer(repo)) - return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - return repo, nil -} - -func DeleteOSTree(repoLocation, id string) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - repo, err := openRepo(repoLocation) - if err != nil { - return err - } - defer C.g_object_unref(C.gpointer(repo)) - - branch := fmt.Sprintf("containers-storage/%s", id) - - cbranch := C.CString(branch) - defer C.free(unsafe.Pointer(cbranch)) - - var cerr *C.GError - r := glib.GoBool(glib.GBoolean(C.ostree_repo_set_ref_immediate(repo, nil, cbranch, nil, nil, &cerr))) - if !r { - return glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - return nil -} diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 3e7bf62f06..efd46eefbd 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -61,13 +61,6 @@ mountopt = "nodev" # remap-user = "storage" # remap-group = "storage" -# If specified, use OSTree to deduplicate files with the overlay backend. -ostree_repo = "" - -# Set to skip a PRIVATE bind mount on the storage home directory. Only supported by -# certain container storage drivers (overlay). -skip_mount_home = "false" - [storage.options.thinpool] # Storage Options for thinpool diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index af69a4b2d1..6e4bd4ee05 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -3357,12 +3357,6 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { if config.Storage.Options.Size != "" { storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size)) } - if config.Storage.Options.OstreeRepo != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.ostree_repo=%s", config.Storage.Driver, config.Storage.Options.OstreeRepo)) - } - if config.Storage.Options.SkipMountHome != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.skip_mount_home=%s", config.Storage.Driver, config.Storage.Options.SkipMountHome)) - } if config.Storage.Options.MountProgram != "" { storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mount_program=%s", config.Storage.Driver, config.Storage.Options.MountProgram)) } diff --git a/vendor/github.com/coreos/go-iptables/LICENSE b/vendor/github.com/coreos/go-iptables/LICENSE deleted file mode 100644 index 37ec93a14f..0000000000 --- a/vendor/github.com/coreos/go-iptables/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/coreos/go-iptables/NOTICE b/vendor/github.com/coreos/go-iptables/NOTICE deleted file mode 100644 index 23a0ada2fb..0000000000 --- a/vendor/github.com/coreos/go-iptables/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2018 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-iptables/iptables/iptables.go b/vendor/github.com/coreos/go-iptables/iptables/iptables.go deleted file mode 100644 index 2ed875bb58..0000000000 --- a/vendor/github.com/coreos/go-iptables/iptables/iptables.go +++ /dev/null @@ -1,598 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package iptables - -import ( - "bytes" - "fmt" - "io" - "net" - "os/exec" - "regexp" - "strconv" - "strings" - "syscall" -) - -// Adds the output of stderr to exec.ExitError -type Error struct { - exec.ExitError - cmd exec.Cmd - msg string - proto Protocol - exitStatus *int //for overriding -} - -func (e *Error) ExitStatus() int { - if e.exitStatus != nil { - return *e.exitStatus - } - return e.Sys().(syscall.WaitStatus).ExitStatus() -} - -func (e *Error) Error() string { - return fmt.Sprintf("running %v: exit status %v: %v", e.cmd.Args, e.ExitStatus(), e.msg) -} - -// IsNotExist returns true if the error is due to the chain or rule not existing -func (e *Error) IsNotExist() bool { - return e.ExitStatus() == 1 && - (e.msg == fmt.Sprintf("%s: Bad rule (does a matching rule exist in that chain?).\n", getIptablesCommand(e.proto)) || - e.msg == fmt.Sprintf("%s: No chain/target/match by that name.\n", getIptablesCommand(e.proto))) -} - -// Protocol to differentiate between IPv4 and IPv6 -type Protocol byte - -const ( - ProtocolIPv4 Protocol = iota - ProtocolIPv6 -) - -type IPTables struct { - path string - proto Protocol - hasCheck bool - hasWait bool - hasRandomFully bool - v1 int - v2 int - v3 int - mode string // the underlying iptables operating mode, e.g. nf_tables -} - -// Stat represents a structured statistic entry. -type Stat struct { - Packets uint64 `json:"pkts"` - Bytes uint64 `json:"bytes"` - Target string `json:"target"` - Protocol string `json:"prot"` - Opt string `json:"opt"` - Input string `json:"in"` - Output string `json:"out"` - Source *net.IPNet `json:"source"` - Destination *net.IPNet `json:"destination"` - Options string `json:"options"` -} - -// New creates a new IPTables. -// For backwards compatibility, this always uses IPv4, i.e. "iptables". -func New() (*IPTables, error) { - return NewWithProtocol(ProtocolIPv4) -} - -// New creates a new IPTables for the given proto. -// The proto will determine which command is used, either "iptables" or "ip6tables". -func NewWithProtocol(proto Protocol) (*IPTables, error) { - path, err := exec.LookPath(getIptablesCommand(proto)) - if err != nil { - return nil, err - } - vstring, err := getIptablesVersionString(path) - v1, v2, v3, mode, err := extractIptablesVersion(vstring) - - checkPresent, waitPresent, randomFullyPresent := getIptablesCommandSupport(v1, v2, v3) - - ipt := IPTables{ - path: path, - proto: proto, - hasCheck: checkPresent, - hasWait: waitPresent, - hasRandomFully: randomFullyPresent, - v1: v1, - v2: v2, - v3: v3, - mode: mode, - } - return &ipt, nil -} - -// Proto returns the protocol used by this IPTables. -func (ipt *IPTables) Proto() Protocol { - return ipt.proto -} - -// Exists checks if given rulespec in specified table/chain exists -func (ipt *IPTables) Exists(table, chain string, rulespec ...string) (bool, error) { - if !ipt.hasCheck { - return ipt.existsForOldIptables(table, chain, rulespec) - - } - cmd := append([]string{"-t", table, "-C", chain}, rulespec...) - err := ipt.run(cmd...) - eerr, eok := err.(*Error) - switch { - case err == nil: - return true, nil - case eok && eerr.ExitStatus() == 1: - return false, nil - default: - return false, err - } -} - -// Insert inserts rulespec to specified table/chain (in specified pos) -func (ipt *IPTables) Insert(table, chain string, pos int, rulespec ...string) error { - cmd := append([]string{"-t", table, "-I", chain, strconv.Itoa(pos)}, rulespec...) - return ipt.run(cmd...) -} - -// Append appends rulespec to specified table/chain -func (ipt *IPTables) Append(table, chain string, rulespec ...string) error { - cmd := append([]string{"-t", table, "-A", chain}, rulespec...) - return ipt.run(cmd...) -} - -// AppendUnique acts like Append except that it won't add a duplicate -func (ipt *IPTables) AppendUnique(table, chain string, rulespec ...string) error { - exists, err := ipt.Exists(table, chain, rulespec...) - if err != nil { - return err - } - - if !exists { - return ipt.Append(table, chain, rulespec...) - } - - return nil -} - -// Delete removes rulespec in specified table/chain -func (ipt *IPTables) Delete(table, chain string, rulespec ...string) error { - cmd := append([]string{"-t", table, "-D", chain}, rulespec...) - return ipt.run(cmd...) -} - -// List rules in specified table/chain -func (ipt *IPTables) List(table, chain string) ([]string, error) { - args := []string{"-t", table, "-S", chain} - return ipt.executeList(args) -} - -// List rules (with counters) in specified table/chain -func (ipt *IPTables) ListWithCounters(table, chain string) ([]string, error) { - args := []string{"-t", table, "-v", "-S", chain} - return ipt.executeList(args) -} - -// ListChains returns a slice containing the name of each chain in the specified table. -func (ipt *IPTables) ListChains(table string) ([]string, error) { - args := []string{"-t", table, "-S"} - - result, err := ipt.executeList(args) - if err != nil { - return nil, err - } - - // Iterate over rules to find all default (-P) and user-specified (-N) chains. - // Chains definition always come before rules. - // Format is the following: - // -P OUTPUT ACCEPT - // -N Custom - var chains []string - for _, val := range result { - if strings.HasPrefix(val, "-P") || strings.HasPrefix(val, "-N") { - chains = append(chains, strings.Fields(val)[1]) - } else { - break - } - } - return chains, nil -} - -// Stats lists rules including the byte and packet counts -func (ipt *IPTables) Stats(table, chain string) ([][]string, error) { - args := []string{"-t", table, "-L", chain, "-n", "-v", "-x"} - lines, err := ipt.executeList(args) - if err != nil { - return nil, err - } - - appendSubnet := func(addr string) string { - if strings.IndexByte(addr, byte('/')) < 0 { - if strings.IndexByte(addr, '.') < 0 { - return addr + "/128" - } - return addr + "/32" - } - return addr - } - - ipv6 := ipt.proto == ProtocolIPv6 - - rows := [][]string{} - for i, line := range lines { - // Skip over chain name and field header - if i < 2 { - continue - } - - // Fields: - // 0=pkts 1=bytes 2=target 3=prot 4=opt 5=in 6=out 7=source 8=destination 9=options - line = strings.TrimSpace(line) - fields := strings.Fields(line) - - // The ip6tables verbose output cannot be naively split due to the default "opt" - // field containing 2 single spaces. - if ipv6 { - // Check if field 6 is "opt" or "source" address - dest := fields[6] - ip, _, _ := net.ParseCIDR(dest) - if ip == nil { - ip = net.ParseIP(dest) - } - - // If we detected a CIDR or IP, the "opt" field is empty.. insert it. - if ip != nil { - f := []string{} - f = append(f, fields[:4]...) - f = append(f, " ") // Empty "opt" field for ip6tables - f = append(f, fields[4:]...) - fields = f - } - } - - // Adjust "source" and "destination" to include netmask, to match regular - // List output - fields[7] = appendSubnet(fields[7]) - fields[8] = appendSubnet(fields[8]) - - // Combine "options" fields 9... into a single space-delimited field. - options := fields[9:] - fields = fields[:9] - fields = append(fields, strings.Join(options, " ")) - rows = append(rows, fields) - } - return rows, nil -} - -// ParseStat parses a single statistic row into a Stat struct. The input should -// be a string slice that is returned from calling the Stat method. -func (ipt *IPTables) ParseStat(stat []string) (parsed Stat, err error) { - // For forward-compatibility, expect at least 10 fields in the stat - if len(stat) < 10 { - return parsed, fmt.Errorf("stat contained fewer fields than expected") - } - - // Convert the fields that are not plain strings - parsed.Packets, err = strconv.ParseUint(stat[0], 0, 64) - if err != nil { - return parsed, fmt.Errorf(err.Error(), "could not parse packets") - } - parsed.Bytes, err = strconv.ParseUint(stat[1], 0, 64) - if err != nil { - return parsed, fmt.Errorf(err.Error(), "could not parse bytes") - } - _, parsed.Source, err = net.ParseCIDR(stat[7]) - if err != nil { - return parsed, fmt.Errorf(err.Error(), "could not parse source") - } - _, parsed.Destination, err = net.ParseCIDR(stat[8]) - if err != nil { - return parsed, fmt.Errorf(err.Error(), "could not parse destination") - } - - // Put the fields that are strings - parsed.Target = stat[2] - parsed.Protocol = stat[3] - parsed.Opt = stat[4] - parsed.Input = stat[5] - parsed.Output = stat[6] - parsed.Options = stat[9] - - return parsed, nil -} - -// StructuredStats returns statistics as structured data which may be further -// parsed and marshaled. -func (ipt *IPTables) StructuredStats(table, chain string) ([]Stat, error) { - rawStats, err := ipt.Stats(table, chain) - if err != nil { - return nil, err - } - - structStats := []Stat{} - for _, rawStat := range rawStats { - stat, err := ipt.ParseStat(rawStat) - if err != nil { - return nil, err - } - structStats = append(structStats, stat) - } - - return structStats, nil -} - -func (ipt *IPTables) executeList(args []string) ([]string, error) { - var stdout bytes.Buffer - if err := ipt.runWithOutput(args, &stdout); err != nil { - return nil, err - } - - rules := strings.Split(stdout.String(), "\n") - - // strip trailing newline - if len(rules) > 0 && rules[len(rules)-1] == "" { - rules = rules[:len(rules)-1] - } - - // nftables mode doesn't return an error code when listing a non-existent - // chain. Patch that up. - if len(rules) == 0 && ipt.mode == "nf_tables" { - v := 1 - return nil, &Error{ - cmd: exec.Cmd{Args: args}, - msg: fmt.Sprintf("%s: No chain/target/match by that name.\n", getIptablesCommand(ipt.proto)), - proto: ipt.proto, - exitStatus: &v, - } - } - - for i, rule := range rules { - rules[i] = filterRuleOutput(rule) - } - - return rules, nil -} - -// NewChain creates a new chain in the specified table. -// If the chain already exists, it will result in an error. -func (ipt *IPTables) NewChain(table, chain string) error { - return ipt.run("-t", table, "-N", chain) -} - -const existsErr = 1 - -// ClearChain flushed (deletes all rules) in the specified table/chain. -// If the chain does not exist, a new one will be created -func (ipt *IPTables) ClearChain(table, chain string) error { - err := ipt.NewChain(table, chain) - - eerr, eok := err.(*Error) - switch { - case err == nil: - return nil - case eok && eerr.ExitStatus() == existsErr: - // chain already exists. Flush (clear) it. - return ipt.run("-t", table, "-F", chain) - default: - return err - } -} - -// RenameChain renames the old chain to the new one. -func (ipt *IPTables) RenameChain(table, oldChain, newChain string) error { - return ipt.run("-t", table, "-E", oldChain, newChain) -} - -// DeleteChain deletes the chain in the specified table. -// The chain must be empty -func (ipt *IPTables) DeleteChain(table, chain string) error { - return ipt.run("-t", table, "-X", chain) -} - -// ChangePolicy changes policy on chain to target -func (ipt *IPTables) ChangePolicy(table, chain, target string) error { - return ipt.run("-t", table, "-P", chain, target) -} - -// Check if the underlying iptables command supports the --random-fully flag -func (ipt *IPTables) HasRandomFully() bool { - return ipt.hasRandomFully -} - -// Return version components of the underlying iptables command -func (ipt *IPTables) GetIptablesVersion() (int, int, int) { - return ipt.v1, ipt.v2, ipt.v3 -} - -// run runs an iptables command with the given arguments, ignoring -// any stdout output -func (ipt *IPTables) run(args ...string) error { - return ipt.runWithOutput(args, nil) -} - -// runWithOutput runs an iptables command with the given arguments, -// writing any stdout output to the given writer -func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error { - args = append([]string{ipt.path}, args...) - if ipt.hasWait { - args = append(args, "--wait") - } else { - fmu, err := newXtablesFileLock() - if err != nil { - return err - } - ul, err := fmu.tryLock() - if err != nil { - return err - } - defer ul.Unlock() - } - - var stderr bytes.Buffer - cmd := exec.Cmd{ - Path: ipt.path, - Args: args, - Stdout: stdout, - Stderr: &stderr, - } - - if err := cmd.Run(); err != nil { - switch e := err.(type) { - case *exec.ExitError: - return &Error{*e, cmd, stderr.String(), ipt.proto, nil} - default: - return err - } - } - - return nil -} - -// getIptablesCommand returns the correct command for the given protocol, either "iptables" or "ip6tables". -func getIptablesCommand(proto Protocol) string { - if proto == ProtocolIPv6 { - return "ip6tables" - } else { - return "iptables" - } -} - -// Checks if iptables has the "-C" and "--wait" flag -func getIptablesCommandSupport(v1 int, v2 int, v3 int) (bool, bool, bool) { - return iptablesHasCheckCommand(v1, v2, v3), iptablesHasWaitCommand(v1, v2, v3), iptablesHasRandomFully(v1, v2, v3) -} - -// getIptablesVersion returns the first three components of the iptables version -// and the operating mode (e.g. nf_tables or legacy) -// e.g. "iptables v1.3.66" would return (1, 3, 66, legacy, nil) -func extractIptablesVersion(str string) (int, int, int, string, error) { - versionMatcher := regexp.MustCompile(`v([0-9]+)\.([0-9]+)\.([0-9]+)(?:\s+\((\w+))?`) - result := versionMatcher.FindStringSubmatch(str) - if result == nil { - return 0, 0, 0, "", fmt.Errorf("no iptables version found in string: %s", str) - } - - v1, err := strconv.Atoi(result[1]) - if err != nil { - return 0, 0, 0, "", err - } - - v2, err := strconv.Atoi(result[2]) - if err != nil { - return 0, 0, 0, "", err - } - - v3, err := strconv.Atoi(result[3]) - if err != nil { - return 0, 0, 0, "", err - } - - mode := "legacy" - if result[4] != "" { - mode = result[4] - } - return v1, v2, v3, mode, nil -} - -// Runs "iptables --version" to get the version string -func getIptablesVersionString(path string) (string, error) { - cmd := exec.Command(path, "--version") - var out bytes.Buffer - cmd.Stdout = &out - err := cmd.Run() - if err != nil { - return "", err - } - return out.String(), nil -} - -// Checks if an iptables version is after 1.4.11, when --check was added -func iptablesHasCheckCommand(v1 int, v2 int, v3 int) bool { - if v1 > 1 { - return true - } - if v1 == 1 && v2 > 4 { - return true - } - if v1 == 1 && v2 == 4 && v3 >= 11 { - return true - } - return false -} - -// Checks if an iptables version is after 1.4.20, when --wait was added -func iptablesHasWaitCommand(v1 int, v2 int, v3 int) bool { - if v1 > 1 { - return true - } - if v1 == 1 && v2 > 4 { - return true - } - if v1 == 1 && v2 == 4 && v3 >= 20 { - return true - } - return false -} - -// Checks if an iptables version is after 1.6.2, when --random-fully was added -func iptablesHasRandomFully(v1 int, v2 int, v3 int) bool { - if v1 > 1 { - return true - } - if v1 == 1 && v2 > 6 { - return true - } - if v1 == 1 && v2 == 6 && v3 >= 2 { - return true - } - return false -} - -// Checks if a rule specification exists for a table -func (ipt *IPTables) existsForOldIptables(table, chain string, rulespec []string) (bool, error) { - rs := strings.Join(append([]string{"-A", chain}, rulespec...), " ") - args := []string{"-t", table, "-S"} - var stdout bytes.Buffer - err := ipt.runWithOutput(args, &stdout) - if err != nil { - return false, err - } - return strings.Contains(stdout.String(), rs), nil -} - -// counterRegex is the regex used to detect nftables counter format -var counterRegex = regexp.MustCompile(`^\[([0-9]+):([0-9]+)\] `) - -// filterRuleOutput works around some inconsistencies in output. -// For example, when iptables is in legacy vs. nftables mode, it produces -// different results. -func filterRuleOutput(rule string) string { - out := rule - - // work around an output difference in nftables mode where counters - // are output in iptables-save format, rather than iptables -S format - // The string begins with "[0:0]" - // - // Fixes #49 - if groups := counterRegex.FindStringSubmatch(out); groups != nil { - // drop the brackets - out = out[len(groups[0]):] - out = fmt.Sprintf("%s -c %s %s", out, groups[1], groups[2]) - } - - return out -} diff --git a/vendor/github.com/coreos/go-iptables/iptables/lock.go b/vendor/github.com/coreos/go-iptables/iptables/lock.go deleted file mode 100644 index a88e92b4e4..0000000000 --- a/vendor/github.com/coreos/go-iptables/iptables/lock.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package iptables - -import ( - "os" - "sync" - "syscall" -) - -const ( - // In earlier versions of iptables, the xtables lock was implemented - // via a Unix socket, but now flock is used via this lockfile: - // http://git.netfilter.org/iptables/commit/?id=aa562a660d1555b13cffbac1e744033e91f82707 - // Note the LSB-conforming "/run" directory does not exist on old - // distributions, so assume "/var" is symlinked - xtablesLockFilePath = "/var/run/xtables.lock" - - defaultFilePerm = 0600 -) - -type Unlocker interface { - Unlock() error -} - -type nopUnlocker struct{} - -func (_ nopUnlocker) Unlock() error { return nil } - -type fileLock struct { - // mu is used to protect against concurrent invocations from within this process - mu sync.Mutex - fd int -} - -// tryLock takes an exclusive lock on the xtables lock file without blocking. -// This is best-effort only: if the exclusive lock would block (i.e. because -// another process already holds it), no error is returned. Otherwise, any -// error encountered during the locking operation is returned. -// The returned Unlocker should be used to release the lock when the caller is -// done invoking iptables commands. -func (l *fileLock) tryLock() (Unlocker, error) { - l.mu.Lock() - err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB) - switch err { - case syscall.EWOULDBLOCK: - l.mu.Unlock() - return nopUnlocker{}, nil - case nil: - return l, nil - default: - l.mu.Unlock() - return nil, err - } -} - -// Unlock closes the underlying file, which implicitly unlocks it as well. It -// also unlocks the associated mutex. -func (l *fileLock) Unlock() error { - defer l.mu.Unlock() - return syscall.Close(l.fd) -} - -// newXtablesFileLock opens a new lock on the xtables lockfile without -// acquiring the lock -func newXtablesFileLock() (*fileLock, error) { - fd, err := syscall.Open(xtablesLockFilePath, os.O_CREATE, defaultFilePerm) - if err != nil { - return nil, err - } - return &fileLock{fd: fd}, nil -} diff --git a/vendor/github.com/cri-o/cri-o/internal/oci/container.go b/vendor/github.com/cri-o/cri-o/internal/oci/container.go index 3a95b1c824..f6e5518b09 100644 --- a/vendor/github.com/cri-o/cri-o/internal/oci/container.go +++ b/vendor/github.com/cri-o/cri-o/internal/oci/container.go @@ -73,7 +73,7 @@ type ContainerState struct { Created time.Time `json:"created"` Started time.Time `json:"started,omitempty"` Finished time.Time `json:"finished,omitempty"` - ExitCode int32 `json:"exitCode,omitempty"` + ExitCode *int32 `json:"exitCode,omitempty"` OOMKilled bool `json:"oomKilled,omitempty"` Error string `json:"error,omitempty"` } diff --git a/vendor/github.com/cri-o/cri-o/internal/oci/oci.go b/vendor/github.com/cri-o/cri-o/internal/oci/oci.go index 3ed6ff76a0..573480c343 100644 --- a/vendor/github.com/cri-o/cri-o/internal/oci/oci.go +++ b/vendor/github.com/cri-o/cri-o/internal/oci/oci.go @@ -172,15 +172,15 @@ func (r *Runtime) WaitContainerStateStopped(ctx context.Context, c *Container) ( return nil } -func (r *Runtime) newRuntimeImpl(c *Container) (RuntimeImpl, error) { +func (r *Runtime) getRuntimeHandler(handler string) (*config.RuntimeHandler, error) { // Define the current runtime handler as the default runtime handler. rh := r.config.Runtimes[r.config.DefaultRuntime] // Override the current runtime handler with the runtime handler // corresponding to the runtime handler key provided with this // specific container. - if c.runtimeHandler != "" { - runtimeHandler, err := r.ValidateRuntimeHandler(c.runtimeHandler) + if handler != "" { + runtimeHandler, err := r.ValidateRuntimeHandler(handler) if err != nil { return nil, err } @@ -188,6 +188,39 @@ func (r *Runtime) newRuntimeImpl(c *Container) (RuntimeImpl, error) { rh = runtimeHandler } + return rh, nil +} + +// PrivelegedRuntimeHandler returns a boolean value configured for the +// runtimeHandler indicating if devices on the host are passed/not passed +// to a container running as privileged. +func (r *Runtime) PrivilegedWithoutHostDevices(handler string) (bool, error) { + rh, err := r.getRuntimeHandler(handler) + if err != nil { + return false, err + } + + return rh.PrivilegedWithoutHostDevices, nil +} + +// ContainerRuntimeType returns the type of runtime configured. +// This is needed when callers need to do specific work for oci vs vm +// containers, like monitor an oci container's conmon. +func (r *Runtime) ContainerRuntimeType(c *Container) (string, error) { + rh, err := r.getRuntimeHandler(c.runtimeHandler) + if err != nil { + return "", err + } + + return rh.RuntimeType, nil +} + +func (r *Runtime) newRuntimeImpl(c *Container) (RuntimeImpl, error) { + rh, err := r.getRuntimeHandler(c.runtimeHandler) + if err != nil { + return nil, err + } + if rh.RuntimeType == config.RuntimeTypeVM { return newRuntimeVM(rh.RuntimePath), nil } diff --git a/vendor/github.com/cri-o/cri-o/internal/oci/runtime_oci.go b/vendor/github.com/cri-o/cri-o/internal/oci/runtime_oci.go index 2d661c9a51..6ca34ec6ea 100644 --- a/vendor/github.com/cri-o/cri-o/internal/oci/runtime_oci.go +++ b/vendor/github.com/cri-o/cri-o/internal/oci/runtime_oci.go @@ -53,6 +53,7 @@ func newRuntimeOCI(r *Runtime, handler *config.RuntimeHandler) RuntimeImpl { if handler.RuntimeRoot != "" { runRoot = handler.RuntimeRoot } + return &runtimeOCI{ Runtime: r, path: handler.RuntimePath, @@ -97,7 +98,9 @@ func (r *runtimeOCI) CreateContainer(c *Container, cgroupParent string) (err err "-u", c.id, "-r", r.path, "-b", c.bundlePath, + "--persist-dir", c.dir, "-p", filepath.Join(c.bundlePath, "pidfile"), + "-P", c.conmonPidFilePath(), "-l", c.logPath, "--exit-dir", r.config.ContainerExitsDir, "--socket-dir-path", r.config.ContainerAttachSocketDir, @@ -214,6 +217,7 @@ func (r *runtimeOCI) CreateContainer(c *Container, cgroupParent string) (err err logrus.Errorf("Container creation timeout (%v)", ContainerCreateTimeout) return fmt.Errorf("create container timeout") } + return nil } @@ -626,14 +630,42 @@ func (r *runtimeOCI) DeleteContainer(c *Container) error { defer c.opLock.Unlock() _, err := utils.ExecCmd(r.path, rootFlag, r.root, "delete", "--force", c.id) + return err } +func updateContainerStatusFromExitFile(c *Container) error { + exitFilePath := filepath.Join(c.dir, "exit") + fi, err := os.Stat(exitFilePath) + if err != nil { + return fmt.Errorf("failed to find container exit file for %v: %v", c.id, err) + } + c.state.Finished, err = getFinishedTime(fi) + if err != nil { + return fmt.Errorf("failed to get finished time: %v", err) + } + statusCodeStr, err := ioutil.ReadFile(exitFilePath) + if err != nil { + return fmt.Errorf("failed to read exit file: %v", err) + } + statusCode, err := strconv.Atoi(string(statusCodeStr)) + if err != nil { + return fmt.Errorf("status code conversion failed: %v", err) + } + c.state.ExitCode = utils.Int32Ptr(int32(statusCode)) + return nil +} + // UpdateContainerStatus refreshes the status of the container. func (r *runtimeOCI) UpdateContainerStatus(c *Container) error { c.opLock.Lock() defer c.opLock.Unlock() + if c.state.ExitCode != nil && !c.state.Finished.IsZero() { + logrus.Debugf("Skipping status update for: %+v", c.state) + return nil + } + cmd := exec.Command(r.path, rootFlag, r.root, "state", c.id) if v, found := os.LookupEnv("XDG_RUNTIME_DIR"); found { cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", v)) @@ -647,8 +679,10 @@ func (r *runtimeOCI) UpdateContainerStatus(c *Container) error { // We always populate the fields below so kube can restart/reschedule // containers failing. c.state.Status = ContainerStateStopped - c.state.Finished = time.Now() - c.state.ExitCode = 255 + if err := updateContainerStatusFromExitFile(c); err != nil { + c.state.Finished = time.Now() + c.state.ExitCode = utils.Int32Ptr(255) + } return nil } if err := json.NewDecoder(bytes.NewBuffer(out)).Decode(&c.state); err != nil { @@ -656,7 +690,7 @@ func (r *runtimeOCI) UpdateContainerStatus(c *Container) error { } if c.state.Status == ContainerStateStopped { - exitFilePath := filepath.Join(r.config.ContainerExitsDir, c.id) + exitFilePath := filepath.Join(c.dir, "exit") var fi os.FileInfo err = kwait.ExponentialBackoff( kwait.Backoff{ @@ -675,7 +709,6 @@ func (r *runtimeOCI) UpdateContainerStatus(c *Container) error { }) if err != nil { logrus.Warnf("failed to find container exit file for %v: %v", c.id, err) - c.state.ExitCode = -1 } else { c.state.Finished, err = getFinishedTime(fi) if err != nil { @@ -689,7 +722,7 @@ func (r *runtimeOCI) UpdateContainerStatus(c *Container) error { if err != nil { return fmt.Errorf("status code conversion failed: %v", err) } - c.state.ExitCode = int32(statusCode) + c.state.ExitCode = utils.Int32Ptr(int32(statusCode)) } oomFilePath := filepath.Join(c.bundlePath, "oom") @@ -947,3 +980,60 @@ func prepareProcessExec(c *Container, cmd []string, tty bool) (*os.File, error) } return f, nil } + +// ReadConmonPidFile attempts to read conmon's pid from its pid file +// This function makes no verification that this file should exist +// it is up to the caller to verify that this container has a conmon +func ReadConmonPidFile(c *Container) (int, error) { + contents, err := ioutil.ReadFile(c.conmonPidFilePath()) + if err != nil { + return -1, err + } + // Convert it to an int + conmonPID, err := strconv.Atoi(string(contents)) + if err != nil { + return -1, err + } + return conmonPID, nil +} + +func (c *Container) conmonPidFilePath() string { + return filepath.Join(c.bundlePath, "conmon-pidfile") +} + +// SpoofOOM is a function that sets a container state as though it OOM'd. It's used in situations +// where another process in the container's cgroup (like conmon) OOM'd when it wasn't supposed to, +// allowing us to report to the kubelet that the container OOM'd instead. +func (r *Runtime) SpoofOOM(c *Container) { + ecBytes := []byte{'1', '3', '7'} + + c.opLock.Lock() + defer c.opLock.Unlock() + + c.state.Status = ContainerStateStopped + c.state.Finished = time.Now() + c.state.ExitCode = utils.Int32Ptr(137) + c.state.OOMKilled = true + + oomFilePath := filepath.Join(c.bundlePath, "oom") + oomFile, err := os.Create(oomFilePath) + if err != nil { + logrus.Debugf("unable to write to oom file path %s: %v", oomFilePath, err) + } + oomFile.Close() + + exitFilePath := filepath.Join(r.config.ContainerExitsDir, c.id) + exitFile, err := os.Create(exitFilePath) + if err != nil { + logrus.Debugf("unable to write exit file path %s: %v", exitFilePath, err) + return + } + if _, err := exitFile.Write(ecBytes); err != nil { + logrus.Debugf("failed to write exit code to file %s: %v", exitFilePath, err) + } + exitFile.Close() +} + +func ConmonPath(r *Runtime) string { + return r.config.Conmon +} diff --git a/vendor/github.com/cri-o/cri-o/internal/oci/runtime_vm.go b/vendor/github.com/cri-o/cri-o/internal/oci/runtime_vm.go index 2f7df9f26b..250ac21d38 100644 --- a/vendor/github.com/cri-o/cri-o/internal/oci/runtime_vm.go +++ b/vendor/github.com/cri-o/cri-o/internal/oci/runtime_vm.go @@ -177,7 +177,9 @@ func (r *runtimeVM) startRuntimeDaemon(c *Container) error { r.ctx, newRuntimePath, "", + "", c.BundlePath(), + nil, args..., ) if err != nil { @@ -578,7 +580,8 @@ func (r *runtimeVM) UpdateContainerStatus(c *Container) error { c.state.Status = status c.state.Finished = response.ExitedAt - c.state.ExitCode = int32(response.ExitStatus) + exitCode := int32(response.ExitStatus) + c.state.ExitCode = &exitCode return nil } diff --git a/vendor/github.com/cri-o/cri-o/internal/version/version.go b/vendor/github.com/cri-o/cri-o/internal/version/version.go index 9c37c5a15c..0ce03338da 100644 --- a/vendor/github.com/cri-o/cri-o/internal/version/version.go +++ b/vendor/github.com/cri-o/cri-o/internal/version/version.go @@ -1,23 +1,70 @@ package version import ( + "bufio" + "io/ioutil" "os" "path/filepath" "strings" "github.com/blang/semver" "github.com/google/renameio" + "github.com/pkg/errors" ) // Version is the version of the build. -const Version = "1.16.0-dev" +const Version = "1.16.2" + +// ShouldCrioWipe opens the version file, and parses it and the version string +// If there is a parsing error, then crio should wipe, and the error is returned. +// if parsing is successful, it compares the major and minor versions +// and returns whether the major and minor versions are the same. +// If they differ, then crio should wipe. +func ShouldCrioWipe(versionFileName string) (bool, error) { + return shouldCrioWipe(versionFileName, Version) +} + +// shouldCrioWipe is an internal function for testing purposes +func shouldCrioWipe(versionFileName, versionString string) (bool, error) { + f, err := os.Open(versionFileName) + if err != nil { + return true, errors.Errorf("version file %s not found: %v. Triggering wipe", versionFileName, err) + } + r := bufio.NewReader(f) + versionBytes, err := ioutil.ReadAll(r) + if err != nil { + return true, errors.Errorf("reading version file %s failed: %v. Triggering wipe", versionFileName, err) + } + + // parse the version that was laid down by a previous invocation of crio + var oldVersion semver.Version + if err := oldVersion.UnmarshalJSON(versionBytes); err != nil { + return true, errors.Errorf("version file %s malformatted: %v. Triggering wipe", versionFileName, err) + } + + // parse the version of the current binary + newVersion, err := parseVersionConstant(versionString, "") + if err != nil { + return true, errors.Errorf("version constant %s malformatted: %v. Triggering wipe", versionString, err) + } + + // in every case that the minor and major version are out of sync, + // we want to preform a {down,up}grade. The common case here is newVersion > oldVersion, + // but even in the opposite case, images are out of date and could be wiped + return newVersion.Major != oldVersion.Major || newVersion.Minor != oldVersion.Minor, nil +} // WriteVersionFile writes the version information to a given file // file is the location of the old version file // gitCommit is the current git commit version. It will be added to the file // to aid in debugging, but will not be used to compare versions func WriteVersionFile(file, gitCommit string) error { - current, err := parseVersionConstant(Version, gitCommit) + return writeVersionFile(file, gitCommit, Version) +} + +// writeVersionFile is an internal function for testing purposes +func writeVersionFile(file, gitCommit, version string) error { + current, err := parseVersionConstant(version, gitCommit) // Sanity check-this should never happen if err != nil { return err diff --git a/vendor/github.com/cri-o/cri-o/pkg/config/config.go b/vendor/github.com/cri-o/cri-o/pkg/config/config.go index 3d4770d7dd..68b158856c 100644 --- a/vendor/github.com/cri-o/cri-o/pkg/config/config.go +++ b/vendor/github.com/cri-o/cri-o/pkg/config/config.go @@ -11,8 +11,9 @@ import ( "github.com/BurntSushi/toml" conmonconfig "github.com/containers/conmon/runner/config" - "github.com/containers/image/pkg/sysregistriesv2" - "github.com/containers/image/types" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" + "github.com/containers/libpod/pkg/hooks" "github.com/containers/libpod/pkg/rootless" createconfig "github.com/containers/libpod/pkg/spec" "github.com/containers/storage" @@ -137,6 +138,9 @@ type RootConfig struct { // LogDir is the default log directory where all logs will go unless kubelet // tells us to put them somewhere else. LogDir string `toml:"log_dir"` + + // VersionFile is the location CRI-O will lay down the version file + VersionFile string `toml:"version_file"` } // RuntimeHandler represents each item of the "crio.runtime.runtimes" TOML @@ -145,6 +149,9 @@ type RuntimeHandler struct { RuntimePath string `toml:"runtime_path"` RuntimeType string `toml:"runtime_type"` RuntimeRoot string `toml:"runtime_root"` + // PrivilegedWithoutHostDevices can be used to restrict passing host devices + // to a container running as privileged. + PrivilegedWithoutHostDevices bool `toml:"privileged_without_host_devices"` } // Multiple runtime Handlers in a map @@ -454,6 +461,7 @@ func DefaultConfig() (*Config, error) { Storage: storeOpts.GraphDriverName, StorageOptions: storeOpts.GraphDriverOptions, LogDir: "/var/log/crio/pods", + VersionFile: CrioVersionPath, }, APIConfig: APIConfig{ Listen: CrioSocketPath, @@ -475,7 +483,7 @@ func DefaultConfig() (*Config, error) { ConmonEnv: []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", }, - ConmonCgroup: "pod", + ConmonCgroup: "system.slice", SELinux: selinuxEnabled(), SeccompProfile: "", ApparmorProfile: DefaultApparmorProfile, @@ -490,6 +498,7 @@ func DefaultConfig() (*Config, error) { LogLevel: "error", DefaultSysctls: []string{}, DefaultUlimits: []string{}, + HooksDir: []string{hooks.DefaultDir}, AdditionalDevices: []string{}, }, ImageConfig: ImageConfig{ diff --git a/vendor/github.com/cri-o/cri-o/pkg/config/template.go b/vendor/github.com/cri-o/cri-o/pkg/config/template.go index e558a0ec2f..815509a403 100644 --- a/vendor/github.com/cri-o/cri-o/pkg/config/template.go +++ b/vendor/github.com/cri-o/cri-o/pkg/config/template.go @@ -52,6 +52,9 @@ const templateString = `# The CRI-O configuration file specifies all of the avai # the kubelet. The log directory specified must be an absolute directory. log_dir = "{{ .LogDir }}" +# Location for CRI-O to lay down the version file +version_file = "{{ .VersionFile }}" + # The crio.api table contains settings for the kubelet/gRPC interface. [crio.api] @@ -291,7 +294,9 @@ pause_image = "{{ .PauseImage }}" pause_image_auth_file = "{{ .PauseImageAuthFile }}" # The command to run to have a container stay in the paused state. -# This option supports live configuration reload. +# When explicitly set to "", it will fallback to the entrypoint and command +# specified in the pause image. When commented out, it will fallback to the +# default: "/pause". This option supports live configuration reload. pause_command = "{{ .PauseCommand }}" # Path to the file which decides what sort of policy we use when deciding diff --git a/vendor/github.com/cri-o/cri-o/utils/utils.go b/vendor/github.com/cri-o/cri-o/utils/utils.go index 5692e10c6d..bd6280cacf 100644 --- a/vendor/github.com/cri-o/cri-o/utils/utils.go +++ b/vendor/github.com/cri-o/cri-o/utils/utils.go @@ -306,3 +306,8 @@ func GeneratePasswd(username string, uid, gid uint32, homedir, rootfs, rundir st return passwdFile, nil } + +// Int32Ptr is a utility function to assign to integer pointer variables +func Int32Ptr(i int32) *int32 { + return &i +} diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 851e81014b..d5b6cbbead 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -4,6 +4,7 @@ Aanand Prasad Aaron Davidson Aaron Feng +Aaron Hnatiw Aaron Huslage Aaron L. Xu Aaron Lehmann @@ -44,7 +45,7 @@ Ajey Charantimath ajneu Akash Gupta Akihiro Matsushima -Akihiro Suda +Akihiro Suda Akim Demaille Akira Koyasu Akshay Karle @@ -81,6 +82,7 @@ Alexandre Garnier Alexandre González Alexandre Jomin Alexandru Sfirlogea +Alexei Margasov Alexey Guskov Alexey Kotlyarov Alexey Shamrin @@ -153,6 +155,7 @@ Andy Wilson Anes Hasicic Anil Belur Anil Madhavapeddy +Ankit Jain Ankush Agarwal Anonmily Anran Qiao @@ -184,6 +187,7 @@ Asad Saeeduddin Asbjørn Enge averagehuman Avi Das +Avi Kivity Avi Miller Avi Vaid ayoshitake @@ -507,6 +511,7 @@ Dmitri Shuralyov Dmitry Demeshchuk Dmitry Gusev Dmitry Kononenko +Dmitry Sharshakov Dmitry Shyshkin Dmitry Smirnov Dmitry V. Krivenok @@ -656,6 +661,7 @@ Frederik Loeffert Frederik Nordahl Jul Sabroe Freek Kalter Frieder Bluemle +Fu JinLin Félix Baylac-Jacqué Félix Cantournet Gabe Rosenhouse @@ -688,6 +694,7 @@ Ghislain Bourgeois Giampaolo Mancini Gianluca Borello Gildas Cuisinier +Giovan Isa Musthofa gissehel Giuseppe Mazzotta Gleb Fotengauer-Malinovskiy @@ -898,6 +905,7 @@ Jimmy Cuadra Jimmy Puckett Jimmy Song Jinsoo Park +Jintao Zhang Jiri Appl Jiri Popelka Jiuyue Ma @@ -1079,6 +1087,7 @@ Kunal Kushwaha Kunal Tyagi Kyle Conroy Kyle Linden +Kyle Wuolle kyu Lachlan Coote Lai Jiangshan @@ -1146,6 +1155,7 @@ Luca-Bogdan Grigorescu Lucas Chan Lucas Chi Lucas Molas +Lucas Silvestre Luciano Mores Luis Martínez de Bartolomé Izquierdo Luiz Svoboda @@ -1255,6 +1265,7 @@ Maxim Kulkin Maxim Treskin Maxime Petazzoni Maximiliano Maccanti +Maxwell Meaglith Ma meejah Megan Kostick @@ -1519,6 +1530,7 @@ Quentin Brossard Quentin Perez Quentin Tayssier r0n22 +Radostin Stoyanov Rafal Jeczalik Rafe Colton Raghavendra K T @@ -1743,7 +1755,7 @@ Stefan Berger Stefan J. Wernli Stefan Praszalowicz Stefan S. -Stefan Scherer +Stefan Scherer Stefan Staudenmeyer Stefan Weil Stephan Spindler @@ -1976,6 +1988,7 @@ xamyzhao Xian Chaobo Xianglin Gao Xianlu Bird +Xiao YongBiao XiaoBing Jiang Xiaodong Zhang Xiaoxi He diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE index 0c74e15b05..58b19b6d15 100644 --- a/vendor/github.com/docker/docker/NOTICE +++ b/vendor/github.com/docker/docker/NOTICE @@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). -This product contains software (https://github.com/kr/pty) developed +This product contains software (https://github.com/creack/pty) developed by Keith Rarick, licensed under the MIT License. The following is courtesy of our legal counsel: diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index aa146cdaeb..1565e2af64 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion = "1.40" + DefaultVersion = "1.41" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 1f770c0483..cc2451f033 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.40" +basePath: "/v1.41" info: title: "Docker Engine API" - version: "1.40" + version: "1.41" x-logo: url: "https://docs.docker.com/images/logo-docker-main.png" description: | @@ -49,8 +49,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.40) is used. - For example, calling `/info` is the same as calling `/v1.40/info`. Using the + If you omit the version-prefix, the current version of the API (v1.41) is used. + For example, calling `/info` is the same as calling `/v1.41/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -463,10 +463,6 @@ definitions: type: "array" items: $ref: "#/definitions/DeviceRequest" - DiskQuota: - description: "Disk limit (in bytes)." - type: "integer" - format: "int64" KernelMemory: description: "Kernel memory limit in bytes." type: "integer" @@ -707,6 +703,19 @@ definitions: description: "A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'" items: type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. Dns: type: "array" description: "A list of DNS servers for the container to use." @@ -1145,6 +1154,7 @@ definitions: type: "object" additionalProperties: type: "array" + x-nullable: true items: $ref: "#/definitions/PortBinding" example: @@ -1169,7 +1179,6 @@ definitions: PortBinding represents a binding between a host IP address and a host port. type: "object" - x-nullable: true properties: HostIp: description: "Host IP address that the container's port is mapped to." @@ -2873,6 +2882,18 @@ definitions: type: "object" additionalProperties: type: "string" + # This option is not used by Windows containers + Capabilities: + type: "array" + description: | + A list of kernel capabilities to be available for container (this overrides the default set). + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" NetworkAttachmentSpec: description: | Read-only spec type for non-swarm containers attached to swarm overlay @@ -3266,7 +3287,7 @@ definitions:


- - "ingress" makes the target port accessible on on every node, + - "ingress" makes the target port accessible on every node, regardless of whether there is a task for the service running on that node or not. - "host" bypasses the routing mesh and publish the port directly on @@ -3284,8 +3305,8 @@ definitions: type: "object" properties: Mode: - description: "The mode of resolution to use for internal load balancing - between tasks." + description: | + The mode of resolution to use for internal load balancing between tasks. type: "string" enum: - "vip" @@ -3809,7 +3830,7 @@ definitions: description: | The driver to use for managing cgroups. type: "string" - enum: ["cgroupfs", "systemd"] + enum: ["cgroupfs", "systemd", "none"] default: "cgroupfs" example: "cgroupfs" NEventsListener: @@ -3831,6 +3852,17 @@ definitions: or "Windows Server 2016 Datacenter" type: "string" example: "Alpine Linux v3.5" + OSVersion: + description: | + Version of the host's operating system + +


+ + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "16.04" OSType: description: | Generic type of the operating system of the host, as returned by the @@ -4044,7 +4076,7 @@ definitions: SecurityOptions: description: | List of security features that are enabled on the daemon, such as - apparmor, seccomp, SELinux, and user-namespaces (userns). + apparmor, seccomp, SELinux, user-namespaces (userns), and rootless. Additional configuration options for each security feature may be present, and are included as a comma-separated list of key/value @@ -4057,6 +4089,7 @@ definitions: - "name=seccomp,profile=default" - "name=selinux" - "name=userns" + - "name=rootless" ProductLicense: description: | Reports a summary of the product license on the daemon. @@ -4611,9 +4644,9 @@ paths: parameters: - name: "name" in: "query" - description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." + description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`." type: "string" - pattern: "/?[a-zA-Z0-9_-]+" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - name: "body" in: "body" description: "Container to create" @@ -4840,7 +4873,7 @@ paths: Note that a running container can be _paused_. The `Running` and `Paused` booleans are not mutually exclusive: - When pausing a container (on Linux), the cgroups freezer is used to suspend + When pausing a container (on Linux), the freezer cgroup is used to suspend all processes in the container. Freezing the process requires the process to be running. As a result, paused containers are both `Running` _and_ `Paused`. @@ -5466,7 +5499,7 @@ paths: /containers/{id}/resize: post: summary: "Resize a container TTY" - description: "Resize the TTY for a container. You must restart the container for the resize to take effect." + description: "Resize the TTY for a container." operationId: "ContainerResize" consumes: - "application/octet-stream" @@ -5510,8 +5543,6 @@ paths: description: "no error" 304: description: "container already started" - schema: - $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: @@ -5543,8 +5574,6 @@ paths: description: "no error" 304: description: "container already stopped" - schema: - $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: @@ -5735,9 +5764,9 @@ paths: post: summary: "Pause a container" description: | - Use the cgroups freezer to suspend all processes in a container. + Use the freezer cgroup to suspend all processes in a container. - Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. + Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the freezer cgroup the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. operationId: "ContainerPause" responses: 204: @@ -6220,12 +6249,17 @@ paths: in: "query" description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." type: "string" + - name: "copyUIDGID" + in: "query" + description: "If “1”, “true”, then it will copy UID/GID maps to the dest file or dir" + type: "string" - name: "inputStream" in: "body" required: true description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." schema: type: "string" + format: "binary" tags: ["Container"] /containers/prune: post: @@ -6455,10 +6489,11 @@ paths: type: "string" - name: "networkmode" in: "query" - description: "Sets the networking mode for the run commands during - build. Supported standard values are: `bridge`, `host`, `none`, and - `container:`. Any other value is taken as a custom network's - name to which this container should connect to." + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. type: "string" - name: "Content-type" in: "header" @@ -9110,7 +9145,9 @@ paths: type: "string" RemoteAddrs: description: "Addresses of manager nodes already participating in the swarm." - type: "string" + type: "array" + items: + type: "string" JoinToken: description: "Secret token for joining this swarm." type: "string" @@ -9545,17 +9582,19 @@ paths: type: "integer" - name: "registryAuthFrom" in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. type: "string" - description: "If the X-Registry-Auth header is not specified, this - parameter indicates where to find registry authorization credentials. The - valid values are `spec` and `previous-spec`." + enum: ["spec", "previous-spec"] default: "spec" - name: "rollback" in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. type: "string" - description: "Set to this parameter to `previous` to cause a - server-side rollback to the previous service spec. The supplied spec will be - ignored in this case." - name: "X-Registry-Auth" in: "header" description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go index c909d6ca3e..222d141007 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_changes.go +++ b/vendor/github.com/docker/docker/api/types/container/container_changes.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go index 49efa0f2c0..1ec9c3728b 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_create.go +++ b/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go index ba41edcf3f..f8a606687c 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_top.go +++ b/vendor/github.com/docker/docker/api/types/container/container_top.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go index 7630ae54cd..33addedf77 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go index 9e3910a6b4..94b6a20e15 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ b/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index c710107702..209f33eb91 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -7,9 +7,32 @@ import ( "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/strslice" "github.com/docker/go-connections/nat" - "github.com/docker/go-units" + units "github.com/docker/go-units" ) +// CgroupnsMode represents the cgroup namespace mode of the container +type CgroupnsMode string + +// IsPrivate indicates whether the container uses its own private cgroup namespace +func (c CgroupnsMode) IsPrivate() bool { + return c == "private" +} + +// IsHost indicates whether the container shares the host's cgroup namespace +func (c CgroupnsMode) IsHost() bool { + return c == "host" +} + +// IsEmpty indicates whether the container cgroup namespace mode is unset +func (c CgroupnsMode) IsEmpty() bool { + return c == "" +} + +// Valid indicates whether the cgroup namespace mode is valid +func (c CgroupnsMode) Valid() bool { + return c.IsEmpty() || c.IsPrivate() || c.IsHost() +} + // Isolation represents the isolation technology of a container. The supported // values are platform specific type Isolation string @@ -338,7 +361,6 @@ type Resources struct { Devices []DeviceMapping // List of devices to map inside the container DeviceCgroupRules []string // List of rule to be added to the device cgroup DeviceRequests []DeviceRequest // List of device requests for device drivers - DiskQuota int64 // Disk limit (in bytes) KernelMemory int64 // Kernel memory limit (in bytes) KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) MemoryReservation int64 // Memory soft limit (in bytes) @@ -382,9 +404,10 @@ type HostConfig struct { CapAdd strslice.StrSlice // List of kernel capabilities to add to the container CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container Capabilities []string `json:"Capabilities"` // List of kernel capabilities to be available for container (this overrides the default set) - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for ExtraHosts []string // List of extra hosts GroupAdd []string // List of additional groups that the container process will run as IpcMode IpcMode // IPC namespace to use for the container diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/docker/docker/api/types/error_response_ext.go new file mode 100644 index 0000000000..f84f034cd5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response_ext.go @@ -0,0 +1,6 @@ +package types + +// Error returns the error message +func (e ErrorResponse) Error() string { + return e.Message +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index 1f75403f78..2e24e769c1 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -57,7 +57,7 @@ func ToJSON(a Args) (string, error) { // then the encoded format will use an older legacy format where the values are a // list of strings, instead of a set. // -// Deprecated: Use ToJSON +// Deprecated: do not use in any new code; use ToJSON instead func ToParamWithVersion(version string, a Args) (string, error) { if a.Len() == 0 { return "", nil diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go index d6b354bcdf..b5a7a0c490 100644 --- a/vendor/github.com/docker/docker/api/types/image/image_history.go +++ b/vendor/github.com/docker/docker/api/types/image/image_history.go @@ -1,4 +1,4 @@ -package image +package image // import "github.com/docker/docker/api/types/image" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 8789ad3b32..53e47084c8 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -4,7 +4,7 @@ import ( "encoding/json" "net" - "github.com/opencontainers/image-spec/specs-go/v1" + v1 "github.com/opencontainers/image-spec/specs-go/v1" ) // ServiceConfig stores daemon registry services configuration. diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index 48190c1762..5bbedfcf68 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -67,10 +67,11 @@ type ContainerSpec struct { // The format of extra hosts on swarmkit is specified in: // http://man7.org/linux/man-pages/man5/hosts.5.html // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` - Configs []*ConfigReference `json:",omitempty"` - Isolation container.Isolation `json:",omitempty"` - Sysctls map[string]string `json:",omitempty"` + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` + Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` + Capabilities []string `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go index 1fdc9b0436..e45045866a 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: plugin.proto -// DO NOT EDIT! /* Package runtime is a generated protocol buffer package. @@ -38,6 +37,7 @@ type PluginSpec struct { Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` } func (m *PluginSpec) Reset() { *m = PluginSpec{} } @@ -73,6 +73,13 @@ func (m *PluginSpec) GetDisabled() bool { return false } +func (m *PluginSpec) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + // PluginPrivilege describes a permission the user has to accept // upon installing a plugin. type PluginPrivilege struct { @@ -160,6 +167,21 @@ func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { } i++ } + if len(m.Env) > 0 { + for _, s := range m.Env { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } @@ -208,24 +230,6 @@ func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -255,6 +259,12 @@ func (m *PluginSpec) Size() (n int) { if m.Disabled { n += 2 } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } return n } @@ -429,6 +439,35 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } } m.Disabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPlugin(dAtA[iNdEx:]) @@ -695,18 +734,21 @@ var ( func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } var fileDescriptorPlugin = []byte{ - // 196 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, - 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b, - 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, - 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12, - 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35, - 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c, - 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a, - 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab, - 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, - 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33, - 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79, - 0x0c, 0x01, 0x00, 0x00, + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, + 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, + 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, + 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, + 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, + 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, + 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, + 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, + 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, + 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, + 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, + 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, + 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, + 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, + 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, + 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto index 6d63b7783f..9ef169046b 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -9,6 +9,7 @@ message PluginSpec { string remote = 2; repeated PluginPrivilege privileges = 3; bool disabled = 4; + repeated string env = 5; } // PluginPrivilege describes a permission the user has to accept diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index a39ffcb7be..4cf9a95ff2 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -39,6 +39,7 @@ type ImageInspect struct { Author string Config *container.Config Architecture string + Variant string `json:",omitempty"` Os string OsVersion string `json:",omitempty"` Size int64 @@ -177,6 +178,7 @@ type Info struct { NEventsListener int KernelVersion string OperatingSystem string + OSVersion string OSType string Architecture string IndexServerAddress string diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go index f12e48612c..0c3772d3ad 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volume_create.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume_create.go @@ -1,4 +1,4 @@ -package volume +package volume // import "github.com/docker/docker/api/types/volume" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go index 020198f737..45c3c1c9ae 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volume_list.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume_list.go @@ -1,4 +1,4 @@ -package volume +package volume // import "github.com/docker/docker/api/types/volume" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 317aac1409..0649a69cc7 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -81,6 +81,15 @@ type Client struct { customHTTPHeaders map[string]string // manualOverride is set to true when the version was set by users. manualOverride bool + + // negotiateVersion indicates if the client should automatically negotiate + // the API version to use when making requests. API version negotiation is + // performed on the first request, after which negotiated is set to "true" + // so that subsequent requests do not re-negotiate. + negotiateVersion bool + + // negotiated indicates that API version negotiation took place + negotiated bool } // CheckRedirect specifies the policy for dealing with redirect responses: @@ -169,8 +178,11 @@ func (cli *Client) Close() error { // getAPIPath returns the versioned request path to call the api. // It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(p string, query url.Values) string { +func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { var apiPath string + if cli.negotiateVersion && !cli.negotiated { + cli.NegotiateAPIVersion(ctx) + } if cli.version != "" { v := strings.TrimPrefix(cli.version, "v") apiPath = path.Join(cli.basePath, "/v"+v, p) @@ -186,19 +198,31 @@ func (cli *Client) ClientVersion() string { } // NegotiateAPIVersion queries the API and updates the version to match the -// API version. Any errors are silently ignored. +// API version. Any errors are silently ignored. If a manual override is in place, +// either through the `DOCKER_API_VERSION` environment variable, or if the client +// was initialized with a fixed version (`opts.WithVersion(xx)`), no negotiation +// will be performed. func (cli *Client) NegotiateAPIVersion(ctx context.Context) { - ping, _ := cli.Ping(ctx) - cli.NegotiateAPIVersionPing(ping) + if !cli.manualOverride { + ping, _ := cli.Ping(ctx) + cli.negotiateAPIVersionPing(ping) + } } // NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion -// if the ping version is less than the default version. +// if the ping version is less than the default version. If a manual override is +// in place, either through the `DOCKER_API_VERSION` environment variable, or if +// the client was initialized with a fixed version (`opts.WithVersion(xx)`), no +// negotiation is performed. func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { - if cli.manualOverride { - return + if !cli.manualOverride { + cli.negotiateAPIVersionPing(p) } +} +// negotiateAPIVersionPing queries the API and updates the version to match the +// API version. Any errors are silently ignored. +func (cli *Client) negotiateAPIVersionPing(p types.Ping) { // try the latest version before versioning headers existed if p.APIVersion == "" { p.APIVersion = "1.24" @@ -213,6 +237,12 @@ func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { if versions.LessThan(p.APIVersion, cli.version) { cli.version = p.APIVersion } + + // Store the results, so that automatic API version negotiation (if enabled) + // won't be performed on the next request. + if cli.negotiateVersion { + cli.negotiated = true + } } // DaemonHost returns the host address used by the client @@ -222,7 +252,8 @@ func (cli *Client) DaemonHost() string { // HTTPClient returns a copy of the HTTP client bound to the server func (cli *Client) HTTPClient() *http.Client { - return &*cli.client + c := *cli.client + return &c } // ParseHostURL parses a url string, validates the string is a host url, and diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index 1e7a63a9c0..c099d80e2a 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -35,6 +35,7 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis } if options.Filters.Len() > 0 { + //lint:ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) if err != nil { diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go index 6e56538955..f347cadf14 100644 --- a/vendor/github.com/docker/docker/client/events.go +++ b/vendor/github.com/docker/docker/client/events.go @@ -90,6 +90,7 @@ func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url } if options.Filters.Len() > 0 { + //lint:ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 8609982739..e77084af64 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -23,7 +23,7 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu return types.HijackedResponse{}, err } - apiPath := cli.getAPIPath(path, query) + apiPath := cli.getAPIPath(ctx, path, query) req, err := http.NewRequest("POST", apiPath, bodyEncoded) if err != nil { return types.HijackedResponse{}, err @@ -87,6 +87,8 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto // Server hijacks the connection, error 'connection closed' expected resp, err := clientconn.Do(req) + + //lint:ignore SA1019 for connecting to old (pre go1.8) daemons if err != httputil.ErrPersistEOF { if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index 4fa8c006b2..a5bc4b095f 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -24,6 +24,7 @@ func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions } } if optionFilters.Len() > 0 { + //lint:ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) if err != nil { return images, err diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go index 7130c1364e..8ca7eb6128 100644 --- a/vendor/github.com/docker/docker/client/network_list.go +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -13,6 +13,7 @@ import ( func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { query := url.Values{} if options.Filters.Len() > 0 { + //lint:ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go index 33921867bf..6f77f0955f 100644 --- a/vendor/github.com/docker/docker/client/options.go +++ b/vendor/github.com/docker/docker/client/options.go @@ -6,6 +6,7 @@ import ( "net/http" "os" "path/filepath" + "time" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" @@ -102,6 +103,14 @@ func WithHTTPClient(client *http.Client) Opt { } } +// WithTimeout configures the time limit for requests made by the HTTP client +func WithTimeout(timeout time.Duration) Opt { + return func(c *Client) error { + c.client.Timeout = timeout + return nil + } +} + // WithHTTPHeaders overrides the client default http headers func WithHTTPHeaders(headers map[string]string) Opt { return func(c *Client) error { @@ -150,3 +159,14 @@ func WithVersion(version string) Opt { return nil } } + +// WithAPIVersionNegotiation enables automatic API version negotiation for the client. +// With this option enabled, the client automatically negotiates the API version +// to use when making requests. API version negotiation is performed on the first +// request; subsequent requests will not re-negotiate. +func WithAPIVersionNegotiation() Opt { + return func(c *Client) error { + c.negotiateVersion = true + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index 4553e0fb71..90f39ec14f 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -31,6 +31,8 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { // Server handled the request, so parse the response return parsePingResponse(cli, serverResp) } + } else if IsErrConnectionFailed(err) { + return ping, err } req, err = cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil) diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go index 8285cecd6e..a51c930e6d 100644 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -15,6 +15,7 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P query := url.Values{} if filter.Len() > 0 { + //lint:ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, filter) if err != nil { return plugins, err diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index 0afe26d588..144c416369 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -50,15 +50,6 @@ func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, b return cli.sendRequest(ctx, "POST", path, query, body, headers) } -// put sends an http request to the docker API using the method PUT. -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { - body, headers, err := encodeBody(obj, headers) - if err != nil { - return serverResponse{}, err - } - return cli.sendRequest(ctx, "PUT", path, query, body, headers) -} - // putRaw sends an http request to the docker API using the method PUT. func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { return cli.sendRequest(ctx, "PUT", path, query, body, headers) @@ -115,7 +106,7 @@ func (cli *Client) buildRequest(method, path string, body io.Reader, headers hea } func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { - req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) + req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers) if err != nil { return serverResponse{}, err } @@ -178,7 +169,13 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp // this is localised - for example in French the error would be // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { - err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") + // Checks if client is running with elevated privileges + if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { + err = errors.Wrap(err, "In the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect.") + } else { + f.Close() + err = errors.Wrap(err, "This error may indicate that the docker daemon is not running.") + } } return serverResp, errors.Wrap(err, "error during connect") diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index 620fc6cff7..56bfe55b71 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -9,7 +9,7 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go index 2380d56382..d68fc2b986 100644 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -15,6 +15,7 @@ func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumet query := url.Values{} if filter.Len() > 0 { + //lint:ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, filter) if err != nil { return volumes, err diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go index ac9bf6d33e..07552f1cc1 100644 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -4,6 +4,7 @@ import ( "fmt" "net/http" + containerderrors "github.com/containerd/containerd/errdefs" "github.com/docker/distribution/registry/api/errcode" "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" @@ -47,6 +48,10 @@ func GetHTTPErrorStatusCode(err error) int { if statusCode != http.StatusInternalServerError { return statusCode } + statusCode = statusCodeFromContainerdError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } statusCode = statusCodeFromDistributionError(err) if statusCode != http.StatusInternalServerError { return statusCode @@ -136,9 +141,6 @@ func statusCodeFromGRPCError(err error) int { case codes.Unavailable: // code 14 return http.StatusServiceUnavailable default: - if e, ok := err.(causer); ok { - return statusCodeFromGRPCError(e.Cause()) - } // codes.Canceled(1) // codes.Unknown(2) // codes.DeadlineExceeded(4) @@ -163,10 +165,27 @@ func statusCodeFromDistributionError(err error) int { } case errcode.ErrorCoder: return errs.ErrorCode().Descriptor().HTTPStatusCode - default: - if e, ok := err.(causer); ok { - return statusCodeFromDistributionError(e.Cause()) - } } return http.StatusInternalServerError } + +// statusCodeFromContainerdError returns status code for containerd errors when +// consumed directly (not through gRPC) +func statusCodeFromContainerdError(err error) int { + switch { + case containerderrors.IsInvalidArgument(err): + return http.StatusBadRequest + case containerderrors.IsNotFound(err): + return http.StatusNotFound + case containerderrors.IsAlreadyExists(err): + return http.StatusConflict + case containerderrors.IsFailedPrecondition(err): + return http.StatusPreconditionFailed + case containerderrors.IsUnavailable(err): + return http.StatusServiceUnavailable + case containerderrors.IsNotImplemented(err): + return http.StatusNotImplemented + default: + return http.StatusInternalServerError + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md new file mode 100644 index 0000000000..7307d9694f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go new file mode 100644 index 0000000000..cbcf865323 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -0,0 +1,1294 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +var unpigzPath string + +func init() { + if path, err := exec.LookPath("unpigz"); err != nil { + logrus.Debug("unpigz binary not found in PATH, falling back to go gzip library") + } else { + logrus.Debugf("Using unpigz binary found at path %s", path) + unpigzPath = path + } +} + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *idtools.Identity + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + } +) + +// Archiver implements the Archiver interface and allows the reuse of most utility functions of +// this package with a pluggable Untar function. Also, to facilitate the passing of specific id +// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + IDMapping *idtools.IdentityMapping +} + +// NewDefaultArchiver returns a new Archiver without any IdentityMapping +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + defer rdr.Close() + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) +} + +func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { + if unpigzPath == "" { + return gzip.NewReader(buf) + } + + disablePigzEnv := os.Getenv("MOBY_DISABLE_PIGZ") + if disablePigzEnv != "" { + if disablePigz, err := strconv.ParseBool(disablePigzEnv); err != nil { + return nil, err + } else if disablePigz { + return gzip.NewReader(buf) + } + } + + return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) +} + +func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { + return ioutils.NewReadCloserWrapper(readBuf, func() error { + cancel() + return readBuf.Close() + }) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + ctx, cancel := context.WithCancel(context.Background()) + + gzReader, err := gzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + ctx, cancel := context.WithCancel(context.Background()) + + xzReader, err := xzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + hdr.Name = canonicalTarName(name, fi.IsDir()) + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + return nil +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IdentityMapping *idtools.IdentityMapping + ChownOpts *idtools.Identity + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IdentityMapping: idMapping, + ChownOpts: chownOpts, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) string { + name = CanonicalTarNameForPath(name) + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + //check whether the file is overlayfs whiteout + //if yes, skip re-mapping container ID mappings. + isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + } + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use system.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + // On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP || err == syscall.EPERM { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + // EPERM occurs if modifying xattrs is not allowed. This can + // happen when running in userns with restrictions (ChromeOS). + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + ) + ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = pm.Matches(relFilePath) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMapping.RootPair() + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + if err := remapIDs(idMapping, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMapping.UIDs(), + GIDMaps: archiver.IDMapping.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMapping.UIDs(), + GIDMaps: archiver.IDMapping.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this Archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.IDMapping.RootPair() + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + return err + } + + r, w := io.Pipe() + errC := make(chan error, 1) + + go func() { + defer close(errC) + + errC <- func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + if err := remapIDs(archiver.IDMapping, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }() + }() + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// IdentityMapping returns the IdentityMapping of the archiver. +func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { + return archiver.IDMapping +} + +func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { + ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, err + } + + // Ensure the command has exited before we clean anything up + done := make(chan struct{}) + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + close(done) + }() + + return ioutils.NewReadCloserWrapper(pipeR, func() error { + // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as + // cmd.Wait waits for any non-file stdout/stderr/stdin to close. + err := pipeR.Close() + <-done + return err + }), nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go new file mode 100644 index 0000000000..0601f7b0d1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -0,0 +1,261 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containerd/continuity/fs" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + return overlayWhiteoutConverter{inUserNS: inUserNS} + } + return nil +} + +type overlayWhiteoutConverter struct { + inUserNS bool +} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return +} + +func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + if err != nil { + if c.inUserNS { + if err = replaceDirWithOverlayOpaque(dir); err != nil { + return false, errors.Wrapf(err, "replaceDirWithOverlayOpaque(%q) failed", dir) + } + } else { + return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir) + } + } + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + if c.inUserNS { + // Ubuntu and a few distros support overlayfs in userns. + // + // Although we can't call mknod directly in userns (at least on bionic kernel 4.15), + // we can still create 0,0 char device using mknodChar0Overlay(). + // + // NOTE: we don't need this hack for the containerd snapshotter+unpack model. + if err := mknodChar0Overlay(originalPath); err != nil { + return false, errors.Wrapf(err, "failed to mknodChar0UserNS(%q)", originalPath) + } + } else { + return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath) + } + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} + +// mknodChar0Overlay creates 0,0 char device by mounting overlayfs and unlinking. +// This function can be used for creating 0,0 char device in userns on Ubuntu. +// +// Steps: +// * Mkdir lower,upper,merged,work +// * Create lower/dummy +// * Mount overlayfs +// * Unlink merged/dummy +// * Unmount overlayfs +// * Make sure a 0,0 char device is created as upper/dummy +// * Rename upper/dummy to cleansedOriginalPath +func mknodChar0Overlay(cleansedOriginalPath string) error { + dir := filepath.Dir(cleansedOriginalPath) + tmp, err := ioutil.TempDir(dir, "mc0o") + if err != nil { + return errors.Wrapf(err, "failed to create a tmp directory under %s", dir) + } + defer os.RemoveAll(tmp) + lower := filepath.Join(tmp, "l") + upper := filepath.Join(tmp, "u") + work := filepath.Join(tmp, "w") + merged := filepath.Join(tmp, "m") + for _, s := range []string{lower, upper, work, merged} { + if err := os.MkdirAll(s, 0700); err != nil { + return errors.Wrapf(err, "failed to mkdir %s", s) + } + } + dummyBase := "d" + lowerDummy := filepath.Join(lower, dummyBase) + if err := ioutil.WriteFile(lowerDummy, []byte{}, 0600); err != nil { + return errors.Wrapf(err, "failed to create a dummy lower file %s", lowerDummy) + } + mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work) + // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead. + if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil { + return errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged) + } + mergedDummy := filepath.Join(merged, dummyBase) + if err := os.Remove(mergedDummy); err != nil { + syscall.Unmount(merged, 0) + return errors.Wrapf(err, "failed to unlink %s", mergedDummy) + } + if err := syscall.Unmount(merged, 0); err != nil { + return errors.Wrapf(err, "failed to unmount %s", merged) + } + upperDummy := filepath.Join(upper, dummyBase) + if err := isChar0(upperDummy); err != nil { + return err + } + if err := os.Rename(upperDummy, cleansedOriginalPath); err != nil { + return errors.Wrapf(err, "failed to rename %s to %s", upperDummy, cleansedOriginalPath) + } + return nil +} + +func isChar0(path string) error { + osStat, err := os.Stat(path) + if err != nil { + return errors.Wrapf(err, "failed to stat %s", path) + } + st, ok := osStat.Sys().(*syscall.Stat_t) + if !ok { + return errors.Errorf("got unsupported stat for %s", path) + } + if os.FileMode(st.Mode)&syscall.S_IFMT != syscall.S_IFCHR { + return errors.Errorf("%s is not a character device, got mode=%d", path, st.Mode) + } + if st.Rdev != 0 { + return errors.Errorf("%s is not a 0,0 character device, got Rdev=%d", path, st.Rdev) + } + return nil +} + +// replaceDirWithOverlayOpaque replaces path with a new directory with trusted.overlay.opaque +// xattr. The contents of the directory are preserved. +func replaceDirWithOverlayOpaque(path string) error { + if path == "/" { + return errors.New("replaceDirWithOverlayOpaque: path must not be \"/\"") + } + dir := filepath.Dir(path) + tmp, err := ioutil.TempDir(dir, "rdwoo") + if err != nil { + return errors.Wrapf(err, "failed to create a tmp directory under %s", dir) + } + defer os.RemoveAll(tmp) + // newPath is a new empty directory crafted with trusted.overlay.opaque xattr. + // we copy the content of path into newPath, remove path, and rename newPath to path. + newPath, err := createDirWithOverlayOpaque(tmp) + if err != nil { + return errors.Wrapf(err, "createDirWithOverlayOpaque(%q) failed", tmp) + } + if err := fs.CopyDir(newPath, path); err != nil { + return errors.Wrapf(err, "CopyDir(%q, %q) failed", newPath, path) + } + if err := os.RemoveAll(path); err != nil { + return err + } + return os.Rename(newPath, path) +} + +// createDirWithOverlayOpaque creates a directory with trusted.overlay.opaque xattr, +// without calling setxattr, so as to allow creating opaque dir in userns on Ubuntu. +func createDirWithOverlayOpaque(tmp string) (string, error) { + lower := filepath.Join(tmp, "l") + upper := filepath.Join(tmp, "u") + work := filepath.Join(tmp, "w") + merged := filepath.Join(tmp, "m") + for _, s := range []string{lower, upper, work, merged} { + if err := os.MkdirAll(s, 0700); err != nil { + return "", errors.Wrapf(err, "failed to mkdir %s", s) + } + } + dummyBase := "d" + lowerDummy := filepath.Join(lower, dummyBase) + if err := os.MkdirAll(lowerDummy, 0700); err != nil { + return "", errors.Wrapf(err, "failed to create a dummy lower directory %s", lowerDummy) + } + mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work) + // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead. + if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil { + return "", errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged) + } + mergedDummy := filepath.Join(merged, dummyBase) + if err := os.Remove(mergedDummy); err != nil { + syscall.Unmount(merged, 0) + return "", errors.Wrapf(err, "failed to rmdir %s", mergedDummy) + } + // upperDummy becomes a 0,0-char device file here + if err := os.Mkdir(mergedDummy, 0700); err != nil { + syscall.Unmount(merged, 0) + return "", errors.Wrapf(err, "failed to mkdir %s", mergedDummy) + } + // upperDummy becomes a directory with trusted.overlay.opaque xattr + // (but can't be verified in userns) + if err := syscall.Unmount(merged, 0); err != nil { + return "", errors.Wrapf(err, "failed to unmount %s", merged) + } + upperDummy := filepath.Join(upper, dummyBase) + return upperDummy, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go new file mode 100644 index 0000000000..65a73354c4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter { + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go similarity index 52% rename from vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go rename to vendor/github.com/docker/docker/pkg/archive/archive_unix.go index bb6bf71452..d626336032 100644 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -1,29 +1,21 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - // +build !windows -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "errors" "os" "path/filepath" + "strings" "syscall" "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" "golang.org/x/sys/unix" ) -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - return p, nil // already unix-style -} - // fixVolumePathPrefix does platform specific processing to ensure that if // the path being passed in is not in a volume path format, convert it to one. func fixVolumePathPrefix(srcPath string) string { @@ -35,7 +27,36 @@ func fixVolumePathPrefix(srcPath string) string { // can't use filepath.Join(srcPath,include) because this will clean away // a trailing "." or "/" which may be important. func getWalkRoot(srcPath string, include string) string { - return srcPath + string(filepath.Separator) + include + return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) string { + return p // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert + } + } + + return } func getInodeFromStat(stat interface{}) (inode uint64, err error) { @@ -48,7 +69,7 @@ func getInodeFromStat(stat interface{}) (inode uint64, err error) { return } -func getFileIdentity(stat interface{}) (idtools.Identity, error) { +func getFileUIDGID(stat interface{}) (idtools.Identity, error) { s, ok := stat.(*syscall.Stat_t) if !ok { @@ -57,21 +78,38 @@ func getFileIdentity(stat interface{}) (idtools.Identity, error) { return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil } -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - s, ok := stat.(*syscall.Stat_t) + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } - if ok { - // Currently go does not fill in the major/minors - if s.Mode&unix.S_IFBLK != 0 || - s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert - hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert + return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err } } - - return + return nil } diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go similarity index 62% rename from vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go rename to vendor/github.com/docker/docker/pkg/archive/archive_windows.go index 33c1dff039..ae6b89fd71 100644 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_windows.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go @@ -1,35 +1,14 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" - "fmt" "os" "path/filepath" - "strings" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/longpath" ) -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - // windows: convert windows style relative path with backslashes - // into forward slashes. Since windows does not allow '/' or '\' - // in file names, it is mostly safe to replace however we must - // check just in case - if strings.Contains(p, "/") { - //lint:ignore ST1005 Windows should be capitalized :) - return "", fmt.Errorf("Windows path contains forward slash: %s", p) - } - return strings.Replace(p, string(os.PathSeparator), "/", -1), nil -} - // fixVolumePathPrefix does platform specific processing to ensure that if // the path being passed in is not in a volume path format, convert it to one. func fixVolumePathPrefix(srcPath string) string { @@ -42,20 +21,17 @@ func getWalkRoot(srcPath string, include string) string { return filepath.Join(srcPath, include) } -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Inode in stat on Windows - return -} - -func getFileIdentity(stat interface{}) (idtools.Identity, error) { - // no notion of file ownership mapping yet on Windows - return idtools.Identity{}, nil +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) string { + return filepath.ToSlash(p) } // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { - // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) permPart := perm & os.ModePerm noPermPart := perm &^ os.ModePerm // Add the x bit: make everything +x from windows @@ -69,3 +45,23 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) ( // do nothing. no notion of Rdev, Nlink in stat on Windows return } + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.Identity, error) { + // no notion of file ownership mapping yet on Windows + return idtools.Identity{UID: 0, GID: 0}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go new file mode 100644 index 0000000000..aedb91b035 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -0,0 +1,445 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar doesn't have sub-second mtime precision. The go tar +// writer (1.10+) does when using PAX format, but we round times to seconds +// to ensure archives have the same hashes for backwards compatibility. +// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4. +// +// Non-sub-second is problematic when we apply changes via tar +// files. We handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a.Equal(b) || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + !bytes.Equal(oldChild.capability, newChild.capability) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go new file mode 100644 index 0000000000..f8792b3d4e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -0,0 +1,286 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go new file mode 100644 index 0000000000..ba744741cd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go new file mode 100644 index 0000000000..06217b7161 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -0,0 +1,43 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + "syscall" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size or modification time for dirs, its not a good + // measure of change. See https://github.com/moby/moby/issues/9874 + // for a description of the issue with modification time, and + // https://github.com/moby/moby/pull/11422 for the change. + // (Note that in the Windows implementation of this function, + // modification time IS taken as a change). See + // https://github.com/moby/moby/pull/37982 for more information. + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go new file mode 100644 index 0000000000..9906685e4b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go @@ -0,0 +1,34 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Note there is slight difference between the Linux and Windows + // implementations here. Due to https://github.com/moby/moby/issues/9874, + // and the fix at https://github.com/moby/moby/pull/11422, Linux does not + // consider a change to the directory time as a change. Windows on NTFS + // does. See https://github.com/moby/moby/pull/37982 for more information. + + if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go new file mode 100644 index 0000000000..57fddac078 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -0,0 +1,480 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in the separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { + // Ensure paths are in platform semantics + cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) + originalPath = strings.Replace(originalPath, "/", string(sep), -1) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath, sep) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(sep) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) { + cleanedPath += string(sep) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string, sep byte) bool { + return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string, sep byte) bool { + return len(path) > 0 && path[len(path)-1] == sep +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(filepath.FromSlash(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(os.PathSeparator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + opts := TarResourceRebaseOpts(sourceBase, rebaseName) + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + return TarWithOptions(sourceDir, opts) +} + +// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase +// parameters to be sent to TarWithOptions (the TarOptions struct) +func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { + filter := []string{sourceBase} + return &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + } +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Stat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path, os.PathSeparator): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + // srcContent tar stream, as served by TarWithOptions(), is + // definitely in PAX format, but tar.Next() mistakenly guesses it + // as USTAR, which creates a problem: if the newBase is >100 + // characters long, WriteHeader() returns an error like + // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...". + // + // To fix, set the format to PAX here. See docker/for-linux issue #484. + hdr.Format = tar.FormatPAX + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// TODO @gupta-ak. These might have to be changed in the future to be +// continuity driver aware as well to support LCOW. + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path, os.PathSeparator) && + filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && + !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path, os.PathSeparator) && + !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go new file mode 100644 index 0000000000..3958364f5b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go new file mode 100644 index 0000000000..a878d1bac4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go new file mode 100644 index 0000000000..27897e6ab7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -0,0 +1,260 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(idMapping, srcHdr); err != nil { + return 0, err + } + + if err := createTarFile(path, dest, srcHdr, srcData, !options.NoLchown, nil, options.InUserNS); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + if runtime.GOOS != "windows" { + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) + } + + if decompress { + decompLayer, err := DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompLayer.Close() + layer = decompLayer + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go new file mode 100644 index 0000000000..495db809e9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/pkg/archive" + "github.com/sirupsen/logrus" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go new file mode 100644 index 0000000000..797143ee84 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = (1 << 30) - 2 + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go new file mode 100644 index 0000000000..f58bf227fd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go similarity index 85% rename from vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go rename to vendor/github.com/docker/docker/pkg/archive/whiteouts.go index a61c22a083..4c072a87ee 100644 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/whiteouts.go +++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go @@ -1,8 +1,4 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package archive +package archive // import "github.com/docker/docker/pkg/archive" // Whiteouts are files with a special meaning for the layered filesystem. // Docker uses AUFS whiteout files inside exported archives. In other diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go new file mode 100644 index 0000000000..85435694cf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go index 47ecd0c092..5e6310fdcd 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -5,24 +5,8 @@ import ( "os" "path/filepath" "strings" - - "github.com/docker/docker/pkg/idtools" ) -// GetStatic returns the home directory for the current user without calling -// os/user.Current(). This is useful for static-linked binary on glibc-based -// system, because a call to os/user.Current() in a static binary leads to -// segfault due to a glibc issue that won't be fixed in a short term. -// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) -func GetStatic() (string, error) { - uid := os.Getuid() - usr, err := idtools.LookupUID(uid) - if err != nil { - return "", err - } - return usr.Home, nil -} - // GetRuntimeDir returns XDG_RUNTIME_DIR. // XDG_RUNTIME_DIR is typically configured via pam_systemd. // GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go index f0a363dedf..67ab9e9b31 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -6,12 +6,6 @@ import ( "errors" ) -// GetStatic is not needed for non-linux systems. -// (Precisely, it is needed only for glibc-based linux systems.) -func GetStatic() (string, error) { - return "", errors.New("homedir.GetStatic() is not supported on this system") -} - // GetRuntimeDir is unsupported on non-linux system. func GetRuntimeDir() (string, error) { return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go index d85e124488..284e8be7ca 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -4,8 +4,7 @@ package homedir // import "github.com/docker/docker/pkg/homedir" import ( "os" - - "github.com/opencontainers/runc/libcontainer/user" + "os/user" ) // Key returns the env var name for the user's home dir based on @@ -17,11 +16,13 @@ func Key() string { // Get returns the home directory of the current user with the help of // environment variables depending on the target operating system. // Returned path should be used with "path/filepath" to form new paths. +// If compiling statically, ensure the osusergo build tag is used. +// If needing to do nss lookups, do not compile statically. func Get() string { home := os.Getenv(Key()) if home == "" { - if u, err := user.CurrentUser(); err == nil { - return u.Home + if u, err := user.Current(); err == nil { + return u.HomeDir } } return home diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go index 230422eac8..b3af7a4226 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -4,7 +4,6 @@ import ( "bufio" "fmt" "os" - "sort" "strconv" "strings" ) @@ -203,8 +202,6 @@ func (i *IdentityMapping) GIDs() []IDMap { func createIDMap(subidRanges ranges) []IDMap { idMap := []IDMap{} - // sort the ranges by lowest ID first - sort.Sort(subidRanges) containerID := 0 for _, idrange := range subidRanges { idMap = append(idMap, IDMap{ diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index fb239743a0..3981ff64d3 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -59,7 +59,7 @@ func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting paths = append(paths, dirPath) } } - if err := system.MkdirAll(path, mode, ""); err != nil { + if err := system.MkdirAll(path, mode); err != nil { return err } } else { diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go index 4ae38a1b17..35ede0fffa 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -11,7 +11,7 @@ import ( // Ownership is handled elsewhere, but in the future could be support here // too. func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode, ""); err != nil { + if err := system.MkdirAll(path, mode); err != nil { return err } return nil diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go index d4bbf3c9dc..87514b643d 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -128,8 +128,9 @@ func (bp *BytesPipe) Read(p []byte) (n int, err error) { bp.mu.Lock() if bp.bufLen == 0 { if bp.closeErr != nil { + err := bp.closeErr bp.mu.Unlock() - return 0, bp.closeErr + return 0, err } bp.wait.Wait() if bp.bufLen == 0 && bp.closeErr != nil { diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go similarity index 59% rename from vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go rename to vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go index 99a32ae05e..6d66408984 100644 --- a/vendor/github.com/fsouza/go-dockerclient/internal/jsonmessage/jsonmessage.go +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -1,20 +1,15 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package jsonmessage +package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" import ( "encoding/json" "fmt" "io" - "os" "strings" "time" + "github.com/docker/docker/pkg/term" units "github.com/docker/go-units" - "github.com/fsouza/go-dockerclient/internal/term" - gotty "github.com/ijc/Gotty" + "github.com/morikuni/aec" ) // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to @@ -144,100 +139,34 @@ type JSONMessage struct { Stream string `json:"stream,omitempty"` Status string `json:"status,omitempty"` Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` // deprecated + ProgressMessage string `json:"progress,omitempty"` //deprecated ID string `json:"id,omitempty"` From string `json:"from,omitempty"` Time int64 `json:"time,omitempty"` TimeNano int64 `json:"timeNano,omitempty"` Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` // deprecated + ErrorMessage string `json:"error,omitempty"` //deprecated // Aux contains out-of-band data, such as digests for push signing and image id after building. Aux *json.RawMessage `json:"aux,omitempty"` } -/* Satisfied by gotty.TermInfo as well as noTermInfo from below */ -type termInfo interface { - Parse(attr string, params ...interface{}) (string, error) -} - -type noTermInfo struct{} // canary used when no terminfo. - -func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) { - return "", fmt.Errorf("noTermInfo") +func clearLine(out io.Writer) { + eraseMode := aec.EraseModes.All + cl := aec.EraseLine(eraseMode) + fmt.Fprint(out, cl) } -func clearLine(out io.Writer, ti termInfo) error { - // el2 (clear whole line) is not exposed by terminfo. - - // First clear line from beginning to cursor - if attr, err := ti.Parse("el1"); err == nil { - _, err = fmt.Fprintf(out, "%s", attr) - if err != nil { - return err - } - } else { - _, err := fmt.Fprintf(out, "\x1b[1K") - if err != nil { - return err - } - } - // Then clear line from cursor to end - if attr, err := ti.Parse("el"); err == nil { - _, err = fmt.Fprintf(out, "%s", attr) - if err != nil { - return err - } - } else { - _, err := fmt.Fprintf(out, "\x1b[K") - if err != nil { - return err - } - } - - return nil +func cursorUp(out io.Writer, l uint) { + fmt.Fprint(out, aec.Up(l)) } -func cursorUp(out io.Writer, ti termInfo, l int) error { - if l == 0 { // Should never be the case, but be tolerant - return nil - } - if attr, err := ti.Parse("cuu", l); err == nil { - _, err = fmt.Fprintf(out, "%s", attr) - if err != nil { - return err - } - } else { - _, err := fmt.Fprintf(out, "\x1b[%dA", l) - if err != nil { - return err - } - } - return nil +func cursorDown(out io.Writer, l uint) { + fmt.Fprint(out, aec.Down(l)) } -func cursorDown(out io.Writer, ti termInfo, l int) error { - if l == 0 { // Should never be the case, but be tolerant - return nil - } - if attr, err := ti.Parse("cud", l); err == nil { - _, err = fmt.Fprintf(out, "%s", attr) - if err != nil { - return err - } - } else { - _, err := fmt.Fprintf(out, "\x1b[%dB", l) - if err != nil { - return err - } - } - - return nil -} - -// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out` -// is a terminal. If this is the case, it will erase the entire current line -// when displaying the progressbar. -func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { +// Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the +// entire current line when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { if jm.Error != nil { if jm.Error.Code == 401 { return fmt.Errorf("authentication is required") @@ -245,59 +174,32 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { return jm.Error } var endl string - if termInfo != nil && jm.Stream == "" && jm.Progress != nil { - clearLine(out, termInfo) + if isTerminal && jm.Stream == "" && jm.Progress != nil { + clearLine(out) endl = "\r" - _, err := fmt.Fprint(out, endl) - if err != nil { - return err - } - } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal + fmt.Fprintf(out, endl) + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal return nil } if jm.TimeNano != 0 { - _, err := fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) - if err != nil { - return err - } + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) } else if jm.Time != 0 { - _, err := fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) - if err != nil { - return err - } + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) } if jm.ID != "" { - _, err := fmt.Fprintf(out, "%s: ", jm.ID) - if err != nil { - return err - } + fmt.Fprintf(out, "%s: ", jm.ID) } if jm.From != "" { - _, err := fmt.Fprintf(out, "(from %s) ", jm.From) - if err != nil { - return err - } + fmt.Fprintf(out, "(from %s) ", jm.From) } - if jm.Progress != nil && termInfo != nil { - _, err := fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - if err != nil { - return err - } - } else if jm.ProgressMessage != "" { // deprecated - _, err := fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - if err != nil { - return err - } + if jm.Progress != nil && isTerminal { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { //deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) } else if jm.Stream != "" { - _, err := fmt.Fprintf(out, "%s%s", jm.Stream, endl) - if err != nil { - return err - } + fmt.Fprintf(out, "%s%s", jm.Stream, endl) } else { - _, err := fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - if err != nil { - return err - } + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) } return nil } @@ -308,25 +210,11 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { var ( dec = json.NewDecoder(in) - ids = make(map[string]int) + ids = make(map[string]uint) ) - var termInfo termInfo - - if isTerminal { - term := os.Getenv("TERM") - if term == "" { - term = "vt102" - } - - var err error - if termInfo, err = gotty.OpenTermInfo(term); err != nil { - termInfo = &noTermInfo{} - } - } - for { - diff := 0 + var diff uint var jm JSONMessage if err := dec.Decode(&jm); err != nil { if err == io.EOF { @@ -354,20 +242,15 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, // when we output something that's not // accounted for in the map, such as a line // with no ID. - line = len(ids) + line = uint(len(ids)) ids[jm.ID] = line - if termInfo != nil { - _, err := fmt.Fprintf(out, "\n") - if err != nil { - return err - } + if isTerminal { + fmt.Fprintf(out, "\n") } } - diff = len(ids) - line - if termInfo != nil { - if err := cursorUp(out, termInfo, diff); err != nil { - return err - } + diff = uint(len(ids)) - line + if isTerminal { + cursorUp(out, diff) } } else { // When outputting something that isn't progress @@ -375,13 +258,11 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, // don't want progress entries from some previous // operation to be updated (for example, pull -a // with multiple tags). - ids = make(map[string]int) + ids = make(map[string]uint) } - err := jm.Display(out, termInfo) - if jm.ID != "" && termInfo != nil { - if err := cursorDown(out, termInfo, diff); err != nil { - return err - } + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal { + cursorDown(out, diff) } if err != nil { return err diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go index 4afd63c427..be0631c630 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mount.go +++ b/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -102,13 +102,13 @@ func Mounted(mountpoint string) (bool, error) { // specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See // flags.go for supported option flags. func Mount(device, target, mType, options string) error { - flag, _ := parseOptions(options) + flag, data := parseOptions(options) if flag&REMOUNT != REMOUNT { if mounted, err := Mounted(target); err != nil || mounted { return err } } - return ForceMount(device, target, mType, options) + return mount(device, target, mType, uintptr(flag), data) } // ForceMount will mount a filesystem according to the specified configuration, diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go index 36c89dc1a2..307b93459d 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -13,8 +13,7 @@ import ( "unsafe" ) -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts. +//parseMountTable returns information about mounted filesystems func parseMountTable(filter FilterFunc) ([]*Info, error) { var rawEntries *C.struct_statfs @@ -37,7 +36,7 @@ func parseMountTable(filter FilterFunc) ([]*Info, error) { if filter != nil { // filter out entries we're not interested in - skip, stop = filter(p) + skip, stop = filter(&mountinfo) if skip { continue } diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go index 8a100f0bc8..db3882874a 100644 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -3,49 +3,49 @@ package mount // import "github.com/docker/docker/pkg/mount" // MakeShared ensures a mounted filesystem has the SHARED mount option enabled. // See the supported options in flags.go for further reference. func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "shared") + return ensureMountedAs(mountPoint, SHARED) } // MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. // See the supported options in flags.go for further reference. func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "rshared") + return ensureMountedAs(mountPoint, RSHARED) } // MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. // See the supported options in flags.go for further reference. func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "private") + return ensureMountedAs(mountPoint, PRIVATE) } // MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option // enabled. See the supported options in flags.go for further reference. func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "rprivate") + return ensureMountedAs(mountPoint, RPRIVATE) } // MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. // See the supported options in flags.go for further reference. func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "slave") + return ensureMountedAs(mountPoint, SLAVE) } // MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. // See the supported options in flags.go for further reference. func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "rslave") + return ensureMountedAs(mountPoint, RSLAVE) } // MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option // enabled. See the supported options in flags.go for further reference. func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "unbindable") + return ensureMountedAs(mountPoint, UNBINDABLE) } // MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount // option enabled. See the supported options in flags.go for further reference. func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "runbindable") + return ensureMountedAs(mountPoint, RUNBINDABLE) } // MakeMount ensures that the file or directory given is a mount point, @@ -59,13 +59,13 @@ func MakeMount(mnt string) error { return nil } - return Mount(mnt, mnt, "none", "bind") + return mount(mnt, mnt, "none", uintptr(BIND), "") } -func ensureMountedAs(mountPoint, options string) error { - if err := MakeMount(mountPoint); err != nil { +func ensureMountedAs(mnt string, flags int) error { + if err := MakeMount(mnt); err != nil { return err } - return ForceMount("", mountPoint, "none", options) + return mount("", mnt, "none", uintptr(flags), "") } diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go index 483364c9cf..f3f46055ef 100644 --- a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go @@ -22,7 +22,6 @@ var ( "busy", "charming", "clever", - "cocky", "cool", "compassionate", "competent", @@ -202,6 +201,9 @@ var ( // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose "bose", + // Katherine Louise Bouman is an imaging scientist and Assistant Professor of Computer Science at the California Institute of Technology. She researches computational methods for imaging, and developed an algorithm that made possible the picture first visualization of a black hole using the Event Horizon Telescope. - https://en.wikipedia.org/wiki/Katie_Bouman + "bouman", + // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville "boyd", @@ -585,9 +587,6 @@ var ( // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf "mestorf", - // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky - "minsky", - // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani "mirzakhani", @@ -702,6 +701,9 @@ var ( // Mildred Sanderson - American mathematician best known for Sanderson's theorem concerning modular invariants. https://en.wikipedia.org/wiki/Mildred_Sanderson "sanderson", + // Satoshi Nakamoto is the name used by the unknown person or group of people who developed bitcoin, authored the bitcoin white paper, and created and deployed bitcoin's original reference implementation. https://en.wikipedia.org/wiki/Satoshi_Nakamoto + "satoshi", + // Adi Shamir - Israeli cryptographer whose numerous inventions and contributions to cryptography include the Ferge Fiat Shamir identification scheme, the Rivest Shamir Adleman (RSA) public-key cryptosystem, the Shamir's secret sharing scheme, the breaking of the Merkle-Hellman cryptosystem, the TWINKLE and TWIRL factoring devices and the discovery of differential cryptanalysis (with Eli Biham). https://en.wikipedia.org/wiki/Adi_Shamir "shamir", @@ -732,9 +734,6 @@ var ( // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence "spence", - // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman - "stallman", - // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker "stonebraker", diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go deleted file mode 100644 index 94780ef610..0000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build !windows - -// Package kernel provides helper function to get, parse and compare kernel -// versions for different platforms. -package kernel // import "github.com/docker/docker/pkg/parsers/kernel" - -import ( - "errors" - "fmt" -) - -// VersionInfo holds information about the kernel. -type VersionInfo struct { - Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) - Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) - Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) - Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) -} - -func (k *VersionInfo) String() string { - return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) -} - -// CompareKernelVersion compares two kernel.VersionInfo structs. -// Returns -1 if a < b, 0 if a == b, 1 it a > b -func CompareKernelVersion(a, b VersionInfo) int { - if a.Kernel < b.Kernel { - return -1 - } else if a.Kernel > b.Kernel { - return 1 - } - - if a.Major < b.Major { - return -1 - } else if a.Major > b.Major { - return 1 - } - - if a.Minor < b.Minor { - return -1 - } else if a.Minor > b.Minor { - return 1 - } - - return 0 -} - -// ParseRelease parses a string and creates a VersionInfo based on it. -func ParseRelease(release string) (*VersionInfo, error) { - var ( - kernel, major, minor, parsed int - flavor, partial string - ) - - // Ignore error from Sscanf to allow an empty flavor. Instead, just - // make sure we got all the version numbers. - parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) - if parsed < 2 { - return nil, errors.New("Can't parse kernel version " + release) - } - - // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 - parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) - if parsed < 1 { - flavor = partial - } - - return &VersionInfo{ - Kernel: kernel, - Major: major, - Minor: minor, - Flavor: flavor, - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go deleted file mode 100644 index 6e599eebcc..0000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build darwin - -// Package kernel provides helper function to get, parse and compare kernel -// versions for different platforms. -package kernel // import "github.com/docker/docker/pkg/parsers/kernel" - -import ( - "fmt" - "os/exec" - "strings" - - "github.com/mattn/go-shellwords" -) - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - release, err := getRelease() - if err != nil { - return nil, err - } - - return ParseRelease(release) -} - -// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version -func getRelease() (string, error) { - cmd := exec.Command("system_profiler", "SPSoftwareDataType") - osName, err := cmd.Output() - if err != nil { - return "", err - } - - var release string - data := strings.Split(string(osName), "\n") - for _, line := range data { - if strings.Contains(line, "Kernel Version") { - // It has the format like ' Kernel Version: Darwin 14.5.0' - content := strings.SplitN(line, ":", 2) - if len(content) != 2 { - return "", fmt.Errorf("Kernel Version is invalid") - } - - prettyNames, err := shellwords.Parse(content[1]) - if err != nil { - return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) - } - - if len(prettyNames) != 2 { - return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") - } - release = prettyNames[1] - } - } - - return release, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go deleted file mode 100644 index 8a9aa31225..0000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build linux freebsd openbsd - -// Package kernel provides helper function to get, parse and compare kernel -// versions for different platforms. -package kernel // import "github.com/docker/docker/pkg/parsers/kernel" - -import ( - "bytes" - - "github.com/sirupsen/logrus" -) - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - uts, err := uname() - if err != nil { - return nil, err - } - - // Remove the \x00 from the release for Atoi to parse correctly - return ParseRelease(string(uts.Release[:bytes.IndexByte(uts.Release[:], 0)])) -} - -// CheckKernelVersion checks if current kernel is newer than (or equal to) -// the given version. -func CheckKernelVersion(k, major, minor int) bool { - if v, err := GetKernelVersion(); err != nil { - logrus.Warnf("error getting kernel version: %s", err) - } else { - if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { - return false - } - } - return true -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go deleted file mode 100644 index b7b15a1fd2..0000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go +++ /dev/null @@ -1,51 +0,0 @@ -package kernel // import "github.com/docker/docker/pkg/parsers/kernel" - -import ( - "fmt" - - "golang.org/x/sys/windows" - "golang.org/x/sys/windows/registry" -) - -// VersionInfo holds information about the kernel. -type VersionInfo struct { - kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) - major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) - minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) - build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) -} - -func (k *VersionInfo) String() string { - return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) -} - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - - KVI := &VersionInfo{"Unknown", 0, 0, 0} - - k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) - if err != nil { - return KVI, err - } - defer k.Close() - - blex, _, err := k.GetStringValue("BuildLabEx") - if err != nil { - return KVI, err - } - KVI.kvi = blex - - // Important - docker.exe MUST be manifested for this API to return - // the correct information. - dwVersion, err := windows.GetVersion() - if err != nil { - return KVI, err - } - - KVI.major = int(dwVersion & 0xFF) - KVI.minor = int((dwVersion & 0XFF00) >> 8) - KVI.build = int((dwVersion & 0xFFFF0000) >> 16) - - return KVI, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go deleted file mode 100644 index 212ff4502b..0000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go +++ /dev/null @@ -1,17 +0,0 @@ -package kernel // import "github.com/docker/docker/pkg/parsers/kernel" - -import "golang.org/x/sys/unix" - -// Utsname represents the system name structure. -// It is passthrough for unix.Utsname in order to make it portable with -// other platforms where it is not available. -type Utsname unix.Utsname - -func uname() (*unix.Utsname, error) { - uts := &unix.Utsname{} - - if err := unix.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go deleted file mode 100644 index b2139b60e8..0000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go +++ /dev/null @@ -1,14 +0,0 @@ -package kernel // import "github.com/docker/docker/pkg/parsers/kernel" - -import ( - "golang.org/x/sys/unix" -) - -func uname() (*unix.Utsname, error) { - uts := &unix.Utsname{} - - if err := unix.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go deleted file mode 100644 index 97906e4cd7..0000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !linux - -package kernel // import "github.com/docker/docker/pkg/parsers/kernel" - -import ( - "errors" -) - -// Utsname represents the system name structure. -// It is defined here to make it portable as it is available on linux but not -// on windows. -type Utsname struct { - Release [65]byte -} - -func uname() (*Utsname, error) { - return nil, errors.New("Kernel version detection is available only on linux") -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/github.com/docker/docker/pkg/parsers/parsers.go new file mode 100644 index 0000000000..068e524807 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/parsers.go @@ -0,0 +1,97 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers // import "github.com/docker/docker/pkg/parsers" + +import ( + "fmt" + "strconv" + "strings" +) + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParseUintListMaximum parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. Values larger than `maximum` cause an error if max is non zero, +// in order to stop the map becoming excessively large. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintListMaximum(val string, maximum int) (map[int]bool, error) { + return parseUintList(val, maximum) +} + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + return parseUintList(val, 0) +} + +func parseUintList(val string, maximum int) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + if maximum != 0 && v > maximum { + return nil, fmt.Errorf("value of out range, maximum is %d", maximum) + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + if maximum != 0 && max > maximum { + return nil, fmt.Errorf("value of out range, maximum is %d", maximum) + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go index 46339c282f..3b978fd3b5 100644 --- a/vendor/github.com/docker/docker/pkg/pools/pools.go +++ b/vendor/github.com/docker/docker/pkg/pools/pools.go @@ -72,6 +72,7 @@ func (bp *bufferPool) Get() []byte { } func (bp *bufferPool) Put(b []byte) { + //nolint:staticcheck // TODO changing this to a pointer makes tests fail. Investigate if we should change or not (otherwise remove this TODO) bp.pool.Put(b) } diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go similarity index 93% rename from vendor/github.com/docker/docker/pkg/system/filesys.go rename to vendor/github.com/docker/docker/pkg/system/filesys_unix.go index adeb163052..dcee3e9f98 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go @@ -8,14 +8,14 @@ import ( "path/filepath" ) -// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. +// MkdirAllWithACL is a wrapper for os.MkdirAll on unix systems. func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { - return MkdirAll(path, perm, sddl) + return os.MkdirAll(path, perm) } // MkdirAll creates a directory named path along with any necessary parents, // with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode, sddl string) error { +func MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) } diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go index 3049ff38a4..7cebd6efc0 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -26,9 +26,10 @@ func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { return mkdirall(path, true, sddl) } -// MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, _ os.FileMode, sddl string) error { - return mkdirall(path, false, sddl) +// MkdirAll implementation that is volume path aware for Windows. It can be used +// as a drop-in replacement for os.MkdirAll() +func MkdirAll(path string, _ os.FileMode) error { + return mkdirall(path, false, "") } // mkdirall is a custom version of os.MkdirAll modified for use on Windows diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go index 7f67501285..f303aa9063 100644 --- a/vendor/github.com/docker/docker/pkg/system/init_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -3,6 +3,7 @@ package system // import "github.com/docker/docker/pkg/system" import ( "os" + "github.com/Microsoft/hcsshim/osversion" "github.com/sirupsen/logrus" ) @@ -15,10 +16,10 @@ var ( containerdRuntimeSupported = false ) -// InitLCOW sets whether LCOW is supported or not +// InitLCOW sets whether LCOW is supported or not. Requires RS5+ func InitLCOW(experimental bool) { v := GetOSVersion() - if experimental && v.Build >= 16299 { + if experimental && v.Build >= osversion.RS5 { lcowSupported = true } } diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go index d79e8b0765..cd060eff24 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - "github.com/docker/go-units" + units "github.com/docker/go-units" ) // ReadMemInfo retrieves memory statistics of the host system and returns a @@ -27,6 +27,7 @@ func ReadMemInfo() (*MemInfo, error) { func parseMemInfo(reader io.Reader) (*MemInfo, error) { meminfo := &MemInfo{} scanner := bufio.NewScanner(reader) + memAvailable := int64(-1) for scanner.Scan() { // Expected format: ["MemTotal:", "1234", "kB"] parts := strings.Fields(scanner.Text()) @@ -48,6 +49,8 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) { meminfo.MemTotal = bytes case "MemFree:": meminfo.MemFree = bytes + case "MemAvailable:": + memAvailable = bytes case "SwapTotal:": meminfo.SwapTotal = bytes case "SwapFree:": @@ -55,6 +58,9 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) { } } + if memAvailable != -1 { + meminfo.MemFree = memAvailable + } // Handle errors that may have occurred during the reading of the file. if err := scanner.Err(); err != nil { diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go index a3d957afab..64e892289a 100644 --- a/vendor/github.com/docker/docker/pkg/system/path.go +++ b/vendor/github.com/docker/docker/pkg/system/path.go @@ -5,8 +5,6 @@ import ( "path/filepath" "runtime" "strings" - - "github.com/containerd/continuity/pathdriver" ) const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" @@ -27,6 +25,12 @@ func DefaultPathEnv(os string) string { } +// PathVerifier defines the subset of a PathDriver that CheckSystemDriveAndRemoveDriveLetter +// actually uses in order to avoid system depending on containerd/continuity. +type PathVerifier interface { + IsAbs(string) bool +} + // CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, // is the system drive. // On Linux: this is a no-op. @@ -42,7 +46,7 @@ func DefaultPathEnv(os string) string { // a --> a // /a --> \a // d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) { +func CheckSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { if runtime.GOOS != "windows" || LCOWSupported() { return path, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go index 98c9eb18d1..17d5d131a3 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -8,7 +8,8 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { mode: s.Mode, uid: s.Uid, gid: s.Gid, - rdev: s.Rdev, + // the type is 32bit on mips + rdev: uint64(s.Rdev), // nolint: unconvert mtim: s.Mtim}, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go deleted file mode 100644 index 756b92d1e6..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go +++ /dev/null @@ -1,13 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go index 4ae92fa6c7..eb19f9c850 100644 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -55,7 +55,6 @@ var ( ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") procGetVersionExW = modkernel32.NewProc("GetVersionExW") - procGetProductInfo = modkernel32.NewProc("GetProductInfo") procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW") procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") ) @@ -85,7 +84,7 @@ type osVersionInfoEx struct { } // GetOSVersion gets the operating system version on Windows. Note that -// docker.exe must be manifested to get the correct version information. +// dockerd.exe must be manifested to get the correct version information. func GetOSVersion() OSVersion { var err error osv := OSVersion{} @@ -118,22 +117,6 @@ func IsWindowsClient() bool { return osviex.ProductType == verNTWorkstation } -// IsIoTCore returns true if the currently running image is based off of -// Windows 10 IoT Core. -// @engine maintainers - this function should not be removed or modified as it -// is used to enforce licensing restrictions on Windows. -func IsIoTCore() bool { - var returnedProductType uint32 - r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) - if r1 == 0 { - logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) - return false - } - const productIoTUAP = 0x0000007B - const productIoTUAPCommercial = 0x00000083 - return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial -} - // Unmount is a platform-specific helper function to call // the unmount syscall. Not supported on Windows func Unmount(dest string) error { diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go deleted file mode 100644 index 0afe854589..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go +++ /dev/null @@ -1,25 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - atFdCwd := unix.AT_FDCWD - - var _path *byte - _path, err := unix.BytePtrFromString(path) - if err != nil { - return err - } - if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go similarity index 58% rename from vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go rename to vendor/github.com/docker/docker/pkg/system/utimes_unix.go index ed1b9fad59..61ba8c474c 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go @@ -1,8 +1,9 @@ +// +build linux freebsd + package system // import "github.com/docker/docker/pkg/system" import ( "syscall" - "unsafe" "golang.org/x/sys/unix" ) @@ -10,13 +11,12 @@ import ( // LUtimesNano is used to change access and modification time of the specified path. // It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { - var _path *byte - _path, err := unix.BytePtrFromString(path) - if err != nil { - return err + uts := []unix.Timespec{ + unix.NsecToTimespec(syscall.TimespecToNsec(ts[0])), + unix.NsecToTimespec(syscall.TimespecToNsec(ts[1])), } - - if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { + err := unix.UtimesNanoAt(unix.AT_FDCWD, path, uts, unix.AT_SYMLINK_NOFOLLOW) + if err != nil && err != unix.ENOSYS { return err } diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go index a3c3db1315..6e83b59e90 100644 --- a/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go @@ -7,7 +7,7 @@ import ( "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE "github.com/Azure/go-ansiterm/winterm" - "github.com/docker/docker/pkg/term/windows" + windowsconsole "github.com/docker/docker/pkg/term/windows" ) // State holds the console mode for the terminal. diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go index 3e5593ca6a..7e8f265d47 100644 --- a/vendor/github.com/docker/docker/pkg/term/windows/windows.go +++ b/vendor/github.com/docker/docker/pkg/term/windows/windows.go @@ -1,3 +1,4 @@ +// +build windows // These files implement ANSI-aware input and output streams for use by the Docker Windows client. // When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create // and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. @@ -9,7 +10,7 @@ import ( "os" "sync" - "github.com/Azure/go-ansiterm" + ansiterm "github.com/Azure/go-ansiterm" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/docker/docker/profiles/seccomp/default.json b/vendor/github.com/docker/docker/profiles/seccomp/default.json deleted file mode 100644 index 4508c3ccce..0000000000 --- a/vendor/github.com/docker/docker/profiles/seccomp/default.json +++ /dev/null @@ -1,794 +0,0 @@ -{ - "defaultAction": "SCMP_ACT_ERRNO", - "archMap": [ - { - "architecture": "SCMP_ARCH_X86_64", - "subArchitectures": [ - "SCMP_ARCH_X86", - "SCMP_ARCH_X32" - ] - }, - { - "architecture": "SCMP_ARCH_AARCH64", - "subArchitectures": [ - "SCMP_ARCH_ARM" - ] - }, - { - "architecture": "SCMP_ARCH_MIPS64", - "subArchitectures": [ - "SCMP_ARCH_MIPS", - "SCMP_ARCH_MIPS64N32" - ] - }, - { - "architecture": "SCMP_ARCH_MIPS64N32", - "subArchitectures": [ - "SCMP_ARCH_MIPS", - "SCMP_ARCH_MIPS64" - ] - }, - { - "architecture": "SCMP_ARCH_MIPSEL64", - "subArchitectures": [ - "SCMP_ARCH_MIPSEL", - "SCMP_ARCH_MIPSEL64N32" - ] - }, - { - "architecture": "SCMP_ARCH_MIPSEL64N32", - "subArchitectures": [ - "SCMP_ARCH_MIPSEL", - "SCMP_ARCH_MIPSEL64" - ] - }, - { - "architecture": "SCMP_ARCH_S390X", - "subArchitectures": [ - "SCMP_ARCH_S390" - ] - } - ], - "syscalls": [ - { - "names": [ - "accept", - "accept4", - "access", - "adjtimex", - "alarm", - "bind", - "brk", - "capget", - "capset", - "chdir", - "chmod", - "chown", - "chown32", - "clock_getres", - "clock_gettime", - "clock_nanosleep", - "close", - "connect", - "copy_file_range", - "creat", - "dup", - "dup2", - "dup3", - "epoll_create", - "epoll_create1", - "epoll_ctl", - "epoll_ctl_old", - "epoll_pwait", - "epoll_wait", - "epoll_wait_old", - "eventfd", - "eventfd2", - "execve", - "execveat", - "exit", - "exit_group", - "faccessat", - "fadvise64", - "fadvise64_64", - "fallocate", - "fanotify_mark", - "fchdir", - "fchmod", - "fchmodat", - "fchown", - "fchown32", - "fchownat", - "fcntl", - "fcntl64", - "fdatasync", - "fgetxattr", - "flistxattr", - "flock", - "fork", - "fremovexattr", - "fsetxattr", - "fstat", - "fstat64", - "fstatat64", - "fstatfs", - "fstatfs64", - "fsync", - "ftruncate", - "ftruncate64", - "futex", - "futimesat", - "getcpu", - "getcwd", - "getdents", - "getdents64", - "getegid", - "getegid32", - "geteuid", - "geteuid32", - "getgid", - "getgid32", - "getgroups", - "getgroups32", - "getitimer", - "getpeername", - "getpgid", - "getpgrp", - "getpid", - "getppid", - "getpriority", - "getrandom", - "getresgid", - "getresgid32", - "getresuid", - "getresuid32", - "getrlimit", - "get_robust_list", - "getrusage", - "getsid", - "getsockname", - "getsockopt", - "get_thread_area", - "gettid", - "gettimeofday", - "getuid", - "getuid32", - "getxattr", - "inotify_add_watch", - "inotify_init", - "inotify_init1", - "inotify_rm_watch", - "io_cancel", - "ioctl", - "io_destroy", - "io_getevents", - "io_pgetevents", - "ioprio_get", - "ioprio_set", - "io_setup", - "io_submit", - "ipc", - "kill", - "lchown", - "lchown32", - "lgetxattr", - "link", - "linkat", - "listen", - "listxattr", - "llistxattr", - "_llseek", - "lremovexattr", - "lseek", - "lsetxattr", - "lstat", - "lstat64", - "madvise", - "memfd_create", - "mincore", - "mkdir", - "mkdirat", - "mknod", - "mknodat", - "mlock", - "mlock2", - "mlockall", - "mmap", - "mmap2", - "mprotect", - "mq_getsetattr", - "mq_notify", - "mq_open", - "mq_timedreceive", - "mq_timedsend", - "mq_unlink", - "mremap", - "msgctl", - "msgget", - "msgrcv", - "msgsnd", - "msync", - "munlock", - "munlockall", - "munmap", - "nanosleep", - "newfstatat", - "_newselect", - "open", - "openat", - "pause", - "pipe", - "pipe2", - "poll", - "ppoll", - "prctl", - "pread64", - "preadv", - "preadv2", - "prlimit64", - "pselect6", - "pwrite64", - "pwritev", - "pwritev2", - "read", - "readahead", - "readlink", - "readlinkat", - "readv", - "recv", - "recvfrom", - "recvmmsg", - "recvmsg", - "remap_file_pages", - "removexattr", - "rename", - "renameat", - "renameat2", - "restart_syscall", - "rmdir", - "rt_sigaction", - "rt_sigpending", - "rt_sigprocmask", - "rt_sigqueueinfo", - "rt_sigreturn", - "rt_sigsuspend", - "rt_sigtimedwait", - "rt_tgsigqueueinfo", - "sched_getaffinity", - "sched_getattr", - "sched_getparam", - "sched_get_priority_max", - "sched_get_priority_min", - "sched_getscheduler", - "sched_rr_get_interval", - "sched_setaffinity", - "sched_setattr", - "sched_setparam", - "sched_setscheduler", - "sched_yield", - "seccomp", - "select", - "semctl", - "semget", - "semop", - "semtimedop", - "send", - "sendfile", - "sendfile64", - "sendmmsg", - "sendmsg", - "sendto", - "setfsgid", - "setfsgid32", - "setfsuid", - "setfsuid32", - "setgid", - "setgid32", - "setgroups", - "setgroups32", - "setitimer", - "setpgid", - "setpriority", - "setregid", - "setregid32", - "setresgid", - "setresgid32", - "setresuid", - "setresuid32", - "setreuid", - "setreuid32", - "setrlimit", - "set_robust_list", - "setsid", - "setsockopt", - "set_thread_area", - "set_tid_address", - "setuid", - "setuid32", - "setxattr", - "shmat", - "shmctl", - "shmdt", - "shmget", - "shutdown", - "sigaltstack", - "signalfd", - "signalfd4", - "sigreturn", - "socket", - "socketcall", - "socketpair", - "splice", - "stat", - "stat64", - "statfs", - "statfs64", - "statx", - "symlink", - "symlinkat", - "sync", - "sync_file_range", - "syncfs", - "sysinfo", - "tee", - "tgkill", - "time", - "timer_create", - "timer_delete", - "timerfd_create", - "timerfd_gettime", - "timerfd_settime", - "timer_getoverrun", - "timer_gettime", - "timer_settime", - "times", - "tkill", - "truncate", - "truncate64", - "ugetrlimit", - "umask", - "uname", - "unlink", - "unlinkat", - "utime", - "utimensat", - "utimes", - "vfork", - "vmsplice", - "wait4", - "waitid", - "waitpid", - "write", - "writev" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "ptrace" - ], - "action": "SCMP_ACT_ALLOW", - "args": null, - "comment": "", - "includes": { - "minKernel": "4.8" - }, - "excludes": {} - }, - { - "names": [ - "personality" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 0, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "personality" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 8, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "personality" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 131072, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "personality" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 131080, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "personality" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 4294967295, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": {} - }, - { - "names": [ - "sync_file_range2" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "arches": [ - "ppc64le" - ] - }, - "excludes": {} - }, - { - "names": [ - "arm_fadvise64_64", - "arm_sync_file_range", - "sync_file_range2", - "breakpoint", - "cacheflush", - "set_tls" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "arches": [ - "arm", - "arm64" - ] - }, - "excludes": {} - }, - { - "names": [ - "arch_prctl" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "arches": [ - "amd64", - "x32" - ] - }, - "excludes": {} - }, - { - "names": [ - "modify_ldt" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "arches": [ - "amd64", - "x32", - "x86" - ] - }, - "excludes": {} - }, - { - "names": [ - "s390_pci_mmio_read", - "s390_pci_mmio_write", - "s390_runtime_instr" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "arches": [ - "s390", - "s390x" - ] - }, - "excludes": {} - }, - { - "names": [ - "open_by_handle_at" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "caps": [ - "CAP_DAC_READ_SEARCH" - ] - }, - "excludes": {} - }, - { - "names": [ - "bpf", - "clone", - "fanotify_init", - "lookup_dcookie", - "mount", - "name_to_handle_at", - "perf_event_open", - "quotactl", - "setdomainname", - "sethostname", - "setns", - "syslog", - "umount", - "umount2", - "unshare" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "caps": [ - "CAP_SYS_ADMIN" - ] - }, - "excludes": {} - }, - { - "names": [ - "clone" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 2080505856, - "valueTwo": 0, - "op": "SCMP_CMP_MASKED_EQ" - } - ], - "comment": "", - "includes": {}, - "excludes": { - "caps": [ - "CAP_SYS_ADMIN" - ], - "arches": [ - "s390", - "s390x" - ] - } - }, - { - "names": [ - "clone" - ], - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 1, - "value": 2080505856, - "valueTwo": 0, - "op": "SCMP_CMP_MASKED_EQ" - } - ], - "comment": "s390 parameter ordering for clone is different", - "includes": { - "arches": [ - "s390", - "s390x" - ] - }, - "excludes": { - "caps": [ - "CAP_SYS_ADMIN" - ] - } - }, - { - "names": [ - "reboot" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "caps": [ - "CAP_SYS_BOOT" - ] - }, - "excludes": {} - }, - { - "names": [ - "chroot" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "caps": [ - "CAP_SYS_CHROOT" - ] - }, - "excludes": {} - }, - { - "names": [ - "delete_module", - "init_module", - "finit_module", - "query_module" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "caps": [ - "CAP_SYS_MODULE" - ] - }, - "excludes": {} - }, - { - "names": [ - "acct" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "caps": [ - "CAP_SYS_PACCT" - ] - }, - "excludes": {} - }, - { - "names": [ - "kcmp", - "process_vm_readv", - "process_vm_writev", - "ptrace" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "caps": [ - "CAP_SYS_PTRACE" - ] - }, - "excludes": {} - }, - { - "names": [ - "iopl", - "ioperm" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "caps": [ - "CAP_SYS_RAWIO" - ] - }, - "excludes": {} - }, - { - "names": [ - "settimeofday", - "stime", - "clock_settime" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "caps": [ - "CAP_SYS_TIME" - ] - }, - "excludes": {} - }, - { - "names": [ - "vhangup" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "caps": [ - "CAP_SYS_TTY_CONFIG" - ] - }, - "excludes": {} - }, - { - "names": [ - "get_mempolicy", - "mbind", - "set_mempolicy" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "caps": [ - "CAP_SYS_NICE" - ] - }, - "excludes": {} - }, - { - "names": [ - "syslog" - ], - "action": "SCMP_ACT_ALLOW", - "args": [], - "comment": "", - "includes": { - "caps": [ - "CAP_SYSLOG" - ] - }, - "excludes": {} - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/profiles/seccomp/generate.go b/vendor/github.com/docker/docker/profiles/seccomp/generate.go deleted file mode 100644 index 32f22bb375..0000000000 --- a/vendor/github.com/docker/docker/profiles/seccomp/generate.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build ignore - -package main - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/profiles/seccomp" -) - -// saves the default seccomp profile as a json file so people can use it as a -// base for their own custom profiles -func main() { - wd, err := os.Getwd() - if err != nil { - panic(err) - } - f := filepath.Join(wd, "default.json") - - // write the default profile to the file - b, err := json.MarshalIndent(seccomp.DefaultProfile(), "", "\t") - if err != nil { - panic(err) - } - - if err := ioutil.WriteFile(f, b, 0644); err != nil { - panic(err) - } -} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go deleted file mode 100644 index 9f222a6eec..0000000000 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go +++ /dev/null @@ -1,191 +0,0 @@ -// +build linux - -package seccomp // import "github.com/docker/docker/profiles/seccomp" - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/parsers/kernel" - specs "github.com/opencontainers/runtime-spec/specs-go" - libseccomp "github.com/seccomp/libseccomp-golang" -) - -//go:generate go run -tags 'seccomp' generate.go - -// GetDefaultProfile returns the default seccomp profile. -func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) { - return setupSeccomp(DefaultProfile(), rs) -} - -// LoadProfile takes a json string and decodes the seccomp profile. -func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) { - var config types.Seccomp - if err := json.Unmarshal([]byte(body), &config); err != nil { - return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err) - } - return setupSeccomp(&config, rs) -} - -var nativeToSeccomp = map[string]types.Arch{ - "amd64": types.ArchX86_64, - "arm64": types.ArchAARCH64, - "mips64": types.ArchMIPS64, - "mips64n32": types.ArchMIPS64N32, - "mipsel64": types.ArchMIPSEL64, - "mipsel64n32": types.ArchMIPSEL64N32, - "s390x": types.ArchS390X, -} - -// inSlice tests whether a string is contained in a slice of strings or not. -// Comparison is case sensitive -func inSlice(slice []string, s string) bool { - for _, ss := range slice { - if s == ss { - return true - } - } - return false -} - -func setupSeccomp(config *types.Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) { - if config == nil { - return nil, nil - } - - // No default action specified, no syscalls listed, assume seccomp disabled - if config.DefaultAction == "" && len(config.Syscalls) == 0 { - return nil, nil - } - - newConfig := &specs.LinuxSeccomp{} - - var arch string - var native, err = libseccomp.GetNativeArch() - if err == nil { - arch = native.String() - } - - if len(config.Architectures) != 0 && len(config.ArchMap) != 0 { - return nil, errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") - } - - // if config.Architectures == 0 then libseccomp will figure out the architecture to use - if len(config.Architectures) != 0 { - for _, a := range config.Architectures { - newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a)) - } - } - - if len(config.ArchMap) != 0 { - for _, a := range config.ArchMap { - seccompArch, ok := nativeToSeccomp[arch] - if ok { - if a.Arch == seccompArch { - newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a.Arch)) - for _, sa := range a.SubArches { - newConfig.Architectures = append(newConfig.Architectures, specs.Arch(sa)) - } - break - } - } - } - } - - newConfig.DefaultAction = specs.LinuxSeccompAction(config.DefaultAction) - -Loop: - // Loop through all syscall blocks and convert them to libcontainer format after filtering them - for _, call := range config.Syscalls { - if len(call.Excludes.Arches) > 0 { - if inSlice(call.Excludes.Arches, arch) { - continue Loop - } - } - if len(call.Excludes.Caps) > 0 { - for _, c := range call.Excludes.Caps { - if inSlice(rs.Process.Capabilities.Bounding, c) { - continue Loop - } - } - } - if call.Excludes.MinKernel != "" { - if ok, err := kernelGreaterEqualThan(call.Excludes.MinKernel); err != nil { - return nil, err - } else if ok { - continue Loop - } - } - if len(call.Includes.Arches) > 0 { - if !inSlice(call.Includes.Arches, arch) { - continue Loop - } - } - if len(call.Includes.Caps) > 0 { - for _, c := range call.Includes.Caps { - if !inSlice(rs.Process.Capabilities.Bounding, c) { - continue Loop - } - } - } - if call.Includes.MinKernel != "" { - if ok, err := kernelGreaterEqualThan(call.Includes.MinKernel); err != nil { - return nil, err - } else if !ok { - continue Loop - } - } - - if call.Name != "" && len(call.Names) != 0 { - return nil, errors.New("'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") - } - - if call.Name != "" { - newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Name, call.Action, call.Args)) - } - - for _, n := range call.Names { - newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(n, call.Action, call.Args)) - } - } - - return newConfig, nil -} - -func createSpecsSyscall(name string, action types.Action, args []*types.Arg) specs.LinuxSyscall { - newCall := specs.LinuxSyscall{ - Names: []string{name}, - Action: specs.LinuxSeccompAction(action), - } - - // Loop through all the arguments of the syscall and convert them - for _, arg := range args { - newArg := specs.LinuxSeccompArg{ - Index: arg.Index, - Value: arg.Value, - ValueTwo: arg.ValueTwo, - Op: specs.LinuxSeccompOperator(arg.Op), - } - - newCall.Args = append(newCall.Args, newArg) - } - return newCall -} - -var currentKernelVersion *kernel.VersionInfo - -func kernelGreaterEqualThan(v string) (bool, error) { - version, err := kernel.ParseRelease(v) - if err != nil { - return false, err - } - if currentKernelVersion == nil { - currentKernelVersion, err = kernel.GetKernelVersion() - if err != nil { - return false, err - } - } - return kernel.CompareKernelVersion(*version, *currentKernelVersion) <= 0, nil -} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go deleted file mode 100644 index 36bc4ea9d4..0000000000 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go +++ /dev/null @@ -1,670 +0,0 @@ -// +build linux,seccomp - -package seccomp // import "github.com/docker/docker/profiles/seccomp" - -import ( - "github.com/docker/docker/api/types" - "golang.org/x/sys/unix" -) - -func arches() []types.Architecture { - return []types.Architecture{ - { - Arch: types.ArchX86_64, - SubArches: []types.Arch{types.ArchX86, types.ArchX32}, - }, - { - Arch: types.ArchAARCH64, - SubArches: []types.Arch{types.ArchARM}, - }, - { - Arch: types.ArchMIPS64, - SubArches: []types.Arch{types.ArchMIPS, types.ArchMIPS64N32}, - }, - { - Arch: types.ArchMIPS64N32, - SubArches: []types.Arch{types.ArchMIPS, types.ArchMIPS64}, - }, - { - Arch: types.ArchMIPSEL64, - SubArches: []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64N32}, - }, - { - Arch: types.ArchMIPSEL64N32, - SubArches: []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64}, - }, - { - Arch: types.ArchS390X, - SubArches: []types.Arch{types.ArchS390}, - }, - } -} - -// DefaultProfile defines the whitelist for the default seccomp profile. -func DefaultProfile() *types.Seccomp { - syscalls := []*types.Syscall{ - { - Names: []string{ - "accept", - "accept4", - "access", - "adjtimex", - "alarm", - "bind", - "brk", - "capget", - "capset", - "chdir", - "chmod", - "chown", - "chown32", - "clock_getres", - "clock_gettime", - "clock_nanosleep", - "close", - "connect", - "copy_file_range", - "creat", - "dup", - "dup2", - "dup3", - "epoll_create", - "epoll_create1", - "epoll_ctl", - "epoll_ctl_old", - "epoll_pwait", - "epoll_wait", - "epoll_wait_old", - "eventfd", - "eventfd2", - "execve", - "execveat", - "exit", - "exit_group", - "faccessat", - "fadvise64", - "fadvise64_64", - "fallocate", - "fanotify_mark", - "fchdir", - "fchmod", - "fchmodat", - "fchown", - "fchown32", - "fchownat", - "fcntl", - "fcntl64", - "fdatasync", - "fgetxattr", - "flistxattr", - "flock", - "fork", - "fremovexattr", - "fsetxattr", - "fstat", - "fstat64", - "fstatat64", - "fstatfs", - "fstatfs64", - "fsync", - "ftruncate", - "ftruncate64", - "futex", - "futimesat", - "getcpu", - "getcwd", - "getdents", - "getdents64", - "getegid", - "getegid32", - "geteuid", - "geteuid32", - "getgid", - "getgid32", - "getgroups", - "getgroups32", - "getitimer", - "getpeername", - "getpgid", - "getpgrp", - "getpid", - "getppid", - "getpriority", - "getrandom", - "getresgid", - "getresgid32", - "getresuid", - "getresuid32", - "getrlimit", - "get_robust_list", - "getrusage", - "getsid", - "getsockname", - "getsockopt", - "get_thread_area", - "gettid", - "gettimeofday", - "getuid", - "getuid32", - "getxattr", - "inotify_add_watch", - "inotify_init", - "inotify_init1", - "inotify_rm_watch", - "io_cancel", - "ioctl", - "io_destroy", - "io_getevents", - "io_pgetevents", - "ioprio_get", - "ioprio_set", - "io_setup", - "io_submit", - "ipc", - "kill", - "lchown", - "lchown32", - "lgetxattr", - "link", - "linkat", - "listen", - "listxattr", - "llistxattr", - "_llseek", - "lremovexattr", - "lseek", - "lsetxattr", - "lstat", - "lstat64", - "madvise", - "memfd_create", - "mincore", - "mkdir", - "mkdirat", - "mknod", - "mknodat", - "mlock", - "mlock2", - "mlockall", - "mmap", - "mmap2", - "mprotect", - "mq_getsetattr", - "mq_notify", - "mq_open", - "mq_timedreceive", - "mq_timedsend", - "mq_unlink", - "mremap", - "msgctl", - "msgget", - "msgrcv", - "msgsnd", - "msync", - "munlock", - "munlockall", - "munmap", - "nanosleep", - "newfstatat", - "_newselect", - "open", - "openat", - "pause", - "pipe", - "pipe2", - "poll", - "ppoll", - "prctl", - "pread64", - "preadv", - "preadv2", - "prlimit64", - "pselect6", - "pwrite64", - "pwritev", - "pwritev2", - "read", - "readahead", - "readlink", - "readlinkat", - "readv", - "recv", - "recvfrom", - "recvmmsg", - "recvmsg", - "remap_file_pages", - "removexattr", - "rename", - "renameat", - "renameat2", - "restart_syscall", - "rmdir", - "rt_sigaction", - "rt_sigpending", - "rt_sigprocmask", - "rt_sigqueueinfo", - "rt_sigreturn", - "rt_sigsuspend", - "rt_sigtimedwait", - "rt_tgsigqueueinfo", - "sched_getaffinity", - "sched_getattr", - "sched_getparam", - "sched_get_priority_max", - "sched_get_priority_min", - "sched_getscheduler", - "sched_rr_get_interval", - "sched_setaffinity", - "sched_setattr", - "sched_setparam", - "sched_setscheduler", - "sched_yield", - "seccomp", - "select", - "semctl", - "semget", - "semop", - "semtimedop", - "send", - "sendfile", - "sendfile64", - "sendmmsg", - "sendmsg", - "sendto", - "setfsgid", - "setfsgid32", - "setfsuid", - "setfsuid32", - "setgid", - "setgid32", - "setgroups", - "setgroups32", - "setitimer", - "setpgid", - "setpriority", - "setregid", - "setregid32", - "setresgid", - "setresgid32", - "setresuid", - "setresuid32", - "setreuid", - "setreuid32", - "setrlimit", - "set_robust_list", - "setsid", - "setsockopt", - "set_thread_area", - "set_tid_address", - "setuid", - "setuid32", - "setxattr", - "shmat", - "shmctl", - "shmdt", - "shmget", - "shutdown", - "sigaltstack", - "signalfd", - "signalfd4", - "sigreturn", - "socket", - "socketcall", - "socketpair", - "splice", - "stat", - "stat64", - "statfs", - "statfs64", - "statx", - "symlink", - "symlinkat", - "sync", - "sync_file_range", - "syncfs", - "sysinfo", - "tee", - "tgkill", - "time", - "timer_create", - "timer_delete", - "timerfd_create", - "timerfd_gettime", - "timerfd_settime", - "timer_getoverrun", - "timer_gettime", - "timer_settime", - "times", - "tkill", - "truncate", - "truncate64", - "ugetrlimit", - "umask", - "uname", - "unlink", - "unlinkat", - "utime", - "utimensat", - "utimes", - "vfork", - "vmsplice", - "wait4", - "waitid", - "waitpid", - "write", - "writev", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Names: []string{"ptrace"}, - Action: types.ActAllow, - Includes: types.Filter{ - MinKernel: "4.8", - }, - }, - { - Names: []string{"personality"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0x0, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"personality"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0x0008, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"personality"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0x20000, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"personality"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0x20008, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"personality"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0xffffffff, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{ - "sync_file_range2", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Arches: []string{"ppc64le"}, - }, - }, - { - Names: []string{ - "arm_fadvise64_64", - "arm_sync_file_range", - "sync_file_range2", - "breakpoint", - "cacheflush", - "set_tls", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Arches: []string{"arm", "arm64"}, - }, - }, - { - Names: []string{ - "arch_prctl", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Arches: []string{"amd64", "x32"}, - }, - }, - { - Names: []string{ - "modify_ldt", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Arches: []string{"amd64", "x32", "x86"}, - }, - }, - { - Names: []string{ - "s390_pci_mmio_read", - "s390_pci_mmio_write", - "s390_runtime_instr", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Arches: []string{"s390", "s390x"}, - }, - }, - { - Names: []string{ - "open_by_handle_at", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_DAC_READ_SEARCH"}, - }, - }, - { - Names: []string{ - "bpf", - "clone", - "fanotify_init", - "lookup_dcookie", - "mount", - "name_to_handle_at", - "perf_event_open", - "quotactl", - "setdomainname", - "sethostname", - "setns", - "syslog", - "umount", - "umount2", - "unshare", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_ADMIN"}, - }, - }, - { - Names: []string{ - "clone", - }, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET, - ValueTwo: 0, - Op: types.OpMaskedEqual, - }, - }, - Excludes: types.Filter{ - Caps: []string{"CAP_SYS_ADMIN"}, - Arches: []string{"s390", "s390x"}, - }, - }, - { - Names: []string{ - "clone", - }, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 1, - Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET, - ValueTwo: 0, - Op: types.OpMaskedEqual, - }, - }, - Comment: "s390 parameter ordering for clone is different", - Includes: types.Filter{ - Arches: []string{"s390", "s390x"}, - }, - Excludes: types.Filter{ - Caps: []string{"CAP_SYS_ADMIN"}, - }, - }, - { - Names: []string{ - "reboot", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_BOOT"}, - }, - }, - { - Names: []string{ - "chroot", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_CHROOT"}, - }, - }, - { - Names: []string{ - "delete_module", - "init_module", - "finit_module", - "query_module", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_MODULE"}, - }, - }, - { - Names: []string{ - "acct", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_PACCT"}, - }, - }, - { - Names: []string{ - "kcmp", - "process_vm_readv", - "process_vm_writev", - "ptrace", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_PTRACE"}, - }, - }, - { - Names: []string{ - "iopl", - "ioperm", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_RAWIO"}, - }, - }, - { - Names: []string{ - "settimeofday", - "stime", - "clock_settime", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_TIME"}, - }, - }, - { - Names: []string{ - "vhangup", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_TTY_CONFIG"}, - }, - }, - { - Names: []string{ - "get_mempolicy", - "mbind", - "set_mempolicy", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_NICE"}, - }, - }, - { - Names: []string{ - "syslog", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYSLOG"}, - }, - }, - } - - return &types.Seccomp{ - DefaultAction: types.ActErrno, - ArchMap: arches(), - Syscalls: syscalls, - } -} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go deleted file mode 100644 index 67e06401f1..0000000000 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build linux,!seccomp - -package seccomp // import "github.com/docker/docker/profiles/seccomp" - -import ( - "github.com/docker/docker/api/types" -) - -// DefaultProfile returns a nil pointer on unsupported systems. -func DefaultProfile() *types.Seccomp { - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/.gitattributes b/vendor/github.com/fsouza/go-dockerclient/.gitattributes new file mode 100644 index 0000000000..6313b56c57 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf diff --git a/vendor/github.com/fsouza/go-dockerclient/.gitignore b/vendor/github.com/fsouza/go-dockerclient/.gitignore index ef22245eae..5f6b48eae0 100644 --- a/vendor/github.com/fsouza/go-dockerclient/.gitignore +++ b/vendor/github.com/fsouza/go-dockerclient/.gitignore @@ -1,4 +1,2 @@ # temporary symlink for testing testing/data/symlink -Gopkg.lock -vendor/ diff --git a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml new file mode 100644 index 0000000000..aa3ab39e5f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml @@ -0,0 +1,29 @@ +run: + deadline: 5m + + skip-dirs: + - internal + +issues: + exclude-rules: + - path: _test\.go + linters: + - bodyclose + - goconst + - gosec + - scopelint + - path: testing[/\\].+\.go + linters: + - gosec + +linters: + enable-all: true + disable: + - dupl + - errcheck + - funlen + - gochecknoglobals + - gocognit + - goconst + - lll + - wsl diff --git a/vendor/github.com/fsouza/go-dockerclient/.travis.yml b/vendor/github.com/fsouza/go-dockerclient/.travis.yml index 74e54aa778..060f9e6a89 100644 --- a/vendor/github.com/fsouza/go-dockerclient/.travis.yml +++ b/vendor/github.com/fsouza/go-dockerclient/.travis.yml @@ -1,23 +1,21 @@ dist: xenial language: go go: - - 1.11.x - 1.12.x + - 1.13.x os: - linux - osx - windows env: matrix: - - GOARCH=amd64 DEP_TOOL=mod GO111MODULE=on - - GOARCH=386 DEP_TOOL=mod GO111MODULE=on - - GOARCH=amd64 DEP_TOOL=dep GO111MODULE=off - - GOARCH=386 DEP_TOOL=dep GO111MODULE=off + - GOARCH=amd64 + - GOARCH=386 global: - GOPROXY=https://proxy.golang.org + - GO111MODULE=on install: - travis-scripts/win-setup.bash - - make testdeps DEP_TOOL=${DEP_TOOL} script: - travis_wait 25 travis-scripts/run-tests.bash services: @@ -26,8 +24,8 @@ matrix: fast_finish: true exclude: - os: osx - env: GOARCH=386 DEP_TOOL=dep GO111MODULE=off + env: GOARCH=386 - os: osx - env: GOARCH=386 DEP_TOOL=mod GO111MODULE=on + env: GOARCH=386 allow_failures: - os: windows diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS index 4e9f7b8e8a..663410f959 100644 --- a/vendor/github.com/fsouza/go-dockerclient/AUTHORS +++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS @@ -15,6 +15,7 @@ Andrews Medina Andrey Sibiryov Andy Goldstein Anirudh Aithal +Antoine Brechon Antonio Murdaca Artem Sidorenko Arthur Rodrigues @@ -51,6 +52,7 @@ Damien Lespiau Damon Wang Dan Williams Daniel, Dao Quang Minh +Daniel Black Daniel Garcia Daniel Hess Daniel Hiltgen @@ -117,6 +119,7 @@ Kevin Xu Kim, Hirokuni Kostas Lekkas Kyle Allan +Kyle Quest Yunhee Lee Liron Levin Lior Yankovich @@ -185,6 +188,7 @@ Tim Schindler Timothy St. Clair Tobi Knaup Tom Wilkie +Tomas Knappek Tonic ttyh061 upccup diff --git a/vendor/github.com/fsouza/go-dockerclient/Gopkg.toml b/vendor/github.com/fsouza/go-dockerclient/Gopkg.toml deleted file mode 100644 index 4be9ee73aa..0000000000 --- a/vendor/github.com/fsouza/go-dockerclient/Gopkg.toml +++ /dev/null @@ -1,23 +0,0 @@ -[[constraint]] - name = "github.com/Microsoft/go-winio" - version = "v0.4.11" - -[[constraint]] - branch = "master" - name = "github.com/docker/docker" - -[[constraint]] - name = "github.com/docker/go-units" - version = "v0.3.3" - -[[constraint]] - name = "github.com/google/go-cmp" - version = "v0.2.0" - -[[constraint]] - name = "github.com/gorilla/mux" - version = "v1.6.2 - v1.7.0" - -[[override]] - name = "github.com/docker/libnetwork" - revision = "19279f0492417475b6bfbd0aa529f73e8f178fb5" diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile index be71a3c26b..e0880ff67b 100644 --- a/vendor/github.com/fsouza/go-dockerclient/Makefile +++ b/vendor/github.com/fsouza/go-dockerclient/Makefile @@ -1,41 +1,27 @@ .PHONY: \ all \ - staticcheck \ + lint \ fmt \ - fmtcheck \ pretest \ test \ integration -DEP_TOOL ?= mod - all: test -staticcheck: - GO111MODULE=off go get honnef.co/go/tools/cmd/staticcheck - staticcheck ./... - -fmtcheck: - if [ -z "$${SKIP_FMT_CHECK}" ]; then [ -z "$$(gofumpt -s -d . | tee /dev/stderr)" ]; fi +lint: + cd /tmp && GO111MODULE=on go get github.com/golangci/golangci-lint/cmd/golangci-lint@latest + golangci-lint run fmt: GO111MODULE=off go get mvdan.cc/gofumpt gofumpt -s -w . -testdeps: -ifeq ($(DEP_TOOL), dep) - GO111MODULE=off go get -u github.com/golang/dep/cmd/dep - dep ensure -v -else - go mod download -endif - -pretest: staticcheck fmtcheck +pretest: lint gotest: go test -race -vet all ./... -test: testdeps pretest gotest +test: pretest gotest integration: go test -tags docker_integration -run TestIntegration -v diff --git a/vendor/github.com/fsouza/go-dockerclient/README.md b/vendor/github.com/fsouza/go-dockerclient/README.md index 501b967f7e..b7af7d0b9f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/README.md +++ b/vendor/github.com/fsouza/go-dockerclient/README.md @@ -26,6 +26,12 @@ feature may get implemented/merged. For new projects, using the official SDK is probably more appropriate as go-dockerclient lags behind the official SDK. +When using the official SDK, keep in mind that because of how the its +dependencies are organized, you may need some extra steps in order to be able +to import it in your projects (see +[#784](https://github.com/fsouza/go-dockerclient/issues/784) and +[moby/moby#28269](https://github.com/moby/moby/issues/28269)). + ## Example ```go @@ -85,7 +91,8 @@ func main() { If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables `DOCKER_HOST`, -`DOCKER_TLS_VERIFY`, `DOCKER_CERT_PATH`, you can use NewClientFromEnv. +`DOCKER_TLS_VERIFY`, `DOCKER_CERT_PATH`, `DOCKER_API_VERSION`, you can use +NewClientFromEnv. ```go @@ -111,19 +118,20 @@ All development commands can be seen in the [Makefile](Makefile). Commited code must pass: -* [staticcheck](https://staticcheck.io/) +* [golangci-lint](integration_unix_test.go) * [gofumpt](https://github.com/mvdan/gofumpt) * [go test](https://golang.org/cmd/go/#hdr-Test_packages) Running ``make test`` will check all of these. You can reformat the code with ``make fmt``. -## Vendoring / Modules +## Modules + +go-dockerclient supports Go modules. -go-dockerclient supports [dep](https://github.com/golang/dep/) for vendoring -and can also be installed as a module. If you're using dep or Go modules, you -should be able to pick go-dockerclient releases and get the proper -dependencies. +If you're using dep, you can check the [releases +page](https://github.com/fsouza/go-dockerclient/releases) for the latest +release fully compatible with dep. With other vendoring tools, users might need to specify go-dockerclient's dependencies manually. diff --git a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml index 6f49a8b841..d9f374f50a 100644 --- a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml +++ b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml @@ -5,20 +5,10 @@ clone_folder: c:\gopath\src\github.com\fsouza\go-dockerclient environment: GOPATH: c:\gopath GOPROXY: https://proxy.golang.org - SKIP_FMT_CHECK: 1 + GO111MODULE: on matrix: - - GOVERSION: &go111 "1.11.10" - DEP_TOOL: dep - GO111MODULE: off - - GOVERSION: &go112 "1.12.5" - DEP_TOOL: dep - GO111MODULE: off - - GOVERSION: *go111 - DEP_TOOL: mod - GO111MODULE: on - - GOVERSION: *go112 - DEP_TOOL: mod - GO111MODULE: on + - GOVERSION: "1.12.10" + - GOVERSION: "1.13.1" install: - choco install make - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% @@ -26,8 +16,8 @@ install: - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.zip - 7z x go%GOVERSION%.windows-amd64.zip -y -oC:\ > NUL build_script: - - make testdeps DEP_TOOL=%DEP_TOOL% + - make pretest test_script: - - make pretest gotest DEP_TOOL=%DEP_TOOL% + - make gotest matrix: fast_finish: true diff --git a/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/fsouza/go-dockerclient/auth.go index e7de977012..eb1a317163 100644 --- a/vendor/github.com/fsouza/go-dockerclient/auth.go +++ b/vendor/github.com/fsouza/go-dockerclient/auth.go @@ -12,6 +12,7 @@ import ( "fmt" "io" "io/ioutil" + "net/http" "os" "path" "strings" @@ -93,9 +94,11 @@ func NewAuthConfigurationsFromFile(path string) (*AuthConfigurations, error) { func cfgPaths(dockerConfigEnv string, homeEnv string) []string { var paths []string if dockerConfigEnv != "" { + paths = append(paths, path.Join(dockerConfigEnv, "plaintext-passwords.json")) paths = append(paths, path.Join(dockerConfigEnv, "config.json")) } if homeEnv != "" { + paths = append(paths, path.Join(homeEnv, ".docker", "plaintext-passwords.json")) paths = append(paths, path.Join(homeEnv, ".docker", "config.json")) paths = append(paths, path.Join(homeEnv, ".dockercfg")) } @@ -217,7 +220,7 @@ func (c *Client) AuthCheck(conf *AuthConfiguration) (AuthStatus, error) { if conf == nil { return authStatus, errors.New("conf is nil") } - resp, err := c.do("POST", "/auth", doOptions{data: conf}) + resp, err := c.do(http.MethodPost, "/auth", doOptions{data: conf}) if err != nil { return authStatus, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go index 9953e32537..825ba38abb 100644 --- a/vendor/github.com/fsouza/go-dockerclient/client.go +++ b/vendor/github.com/fsouza/go-dockerclient/client.go @@ -32,8 +32,8 @@ import ( "time" "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/stdcopy" - "github.com/fsouza/go-dockerclient/internal/jsonmessage" ) const ( @@ -54,7 +54,9 @@ var ( ErrInactivityTimeout = errors.New("inactivity time exceeded timeout") apiVersion112, _ = NewAPIVersion("1.12") + apiVersion118, _ = NewAPIVersion("1.18") apiVersion119, _ = NewAPIVersion("1.19") + apiVersion121, _ = NewAPIVersion("1.21") apiVersion124, _ = NewAPIVersion("1.24") apiVersion125, _ = NewAPIVersion("1.25") apiVersion135, _ = NewAPIVersion("1.35") @@ -262,16 +264,19 @@ func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString stri } // NewClientFromEnv returns a Client instance ready for communication created from -// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH. +// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH, +// and DOCKER_API_VERSION. // // See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. // See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. +// See https://github.com/moby/moby/blob/28d7dba41d0c0d9c7f0dafcc79d3c59f2b3f5dc3/client/options.go#L51 func NewClientFromEnv() (*Client, error) { - client, err := NewVersionedClientFromEnv("") + apiVersionString := os.Getenv("DOCKER_API_VERSION") + client, err := NewVersionedClientFromEnv(apiVersionString) if err != nil { return nil, err } - client.SkipServerVersionCheck = true + client.SkipServerVersionCheck = apiVersionString == "" return client, nil } @@ -395,7 +400,7 @@ func (c *Client) Ping() error { // See https://goo.gl/wYfgY1 for more details. func (c *Client) PingWithContext(ctx context.Context) error { path := "/_ping" - resp, err := c.do("GET", path, doOptions{context: ctx}) + resp, err := c.do(http.MethodGet, path, doOptions{context: ctx}) if err != nil { return err } @@ -407,7 +412,7 @@ func (c *Client) PingWithContext(ctx context.Context) error { } func (c *Client) getServerAPIVersionString() (version string, err error) { - resp, err := c.do("GET", "/version", doOptions{}) + resp, err := c.do(http.MethodGet, "/version", doOptions{}) if err != nil { return "", err } @@ -463,7 +468,7 @@ func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, e req.Header.Set("User-Agent", userAgent) if doOptions.data != nil { req.Header.Set("Content-Type", "application/json") - } else if method == "POST" { + } else if method == http.MethodPost { req.Header.Set("Content-Type", "plain/text") } @@ -518,7 +523,7 @@ func chooseError(ctx context.Context, err error) error { } func (c *Client) stream(method, path string, streamOptions streamOptions) error { - if (method == "POST" || method == "PUT") && streamOptions.in == nil { + if (method == http.MethodPost || method == http.MethodPut) && streamOptions.in == nil { streamOptions.in = bytes.NewReader(nil) } if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { @@ -527,12 +532,25 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error return err } } - req, err := http.NewRequest(method, c.getURL(path), streamOptions.in) + return c.streamURL(method, c.getURL(path), streamOptions) +} + +func (c *Client) streamURL(method, url string, streamOptions streamOptions) error { + if (method == http.MethodPost || method == http.MethodPut) && streamOptions.in == nil { + streamOptions.in = bytes.NewReader(nil) + } + if !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return err + } + } + req, err := http.NewRequest(method, url, streamOptions.in) if err != nil { return err } req.Header.Set("User-Agent", userAgent) - if method == "POST" { + if method == http.MethodPost { req.Header.Set("Content-Type", "plain/text") } for key, val := range streamOptions.headers { @@ -591,6 +609,7 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error return chooseError(subCtx, err) } + defer resp.Body.Close() } else { if resp, err = c.HTTPClient.Do(req.WithContext(subCtx)); err != nil { if strings.Contains(err.Error(), "connection refused") { @@ -598,11 +617,11 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error } return chooseError(subCtx, err) } + defer resp.Body.Close() if streamOptions.reqSent != nil { close(streamOptions.reqSent) } } - defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode >= 400 { return newError(resp) } @@ -761,9 +780,10 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (Close errs := make(chan error, 1) quit := make(chan struct{}) go func() { - //lint:ignore SA1019 this is needed here + //nolint:staticcheck clientconn := httputil.NewClientConn(dial, nil) defer clientconn.Close() + //nolint:bodyclose clientconn.Do(req) if hijackOptions.success != nil { hijackOptions.success <- struct{}{} @@ -858,6 +878,29 @@ func (c *Client) getURL(path string) string { return fmt.Sprintf("%s%s", urlStr, path) } +func (c *Client) getPath(basepath string, opts interface{}) (string, error) { + queryStr, requiredAPIVersion := queryStringVersion(opts) + return c.pathVersionCheck(basepath, queryStr, requiredAPIVersion) +} + +func (c *Client) pathVersionCheck(basepath, queryStr string, requiredAPIVersion APIVersion) (string, error) { + urlStr := strings.TrimRight(c.endpointURL.String(), "/") + if c.endpointURL.Scheme == unixProtocol || c.endpointURL.Scheme == namedPipeProtocol { + urlStr = "" + } + if c.requestedAPIVersion != nil { + if c.requestedAPIVersion.GreaterThanOrEqualTo(requiredAPIVersion) { + return fmt.Sprintf("%s/v%s%s?%s", urlStr, c.requestedAPIVersion, basepath, queryStr), nil + } + return "", fmt.Errorf("API %s requires version %s, requested version %s is insufficient", + basepath, requiredAPIVersion, c.requestedAPIVersion) + } + if requiredAPIVersion != nil { + return fmt.Sprintf("%s/v%s%s?%s", urlStr, requiredAPIVersion, basepath, queryStr), nil + } + return fmt.Sprintf("%s%s?%s", urlStr, basepath, queryStr), nil +} + // getFakeNativeURL returns the URL needed to make an HTTP request over a UNIX // domain socket to the given path. func (c *Client) getFakeNativeURL(path string) string { @@ -874,17 +917,18 @@ func (c *Client) getFakeNativeURL(path string) string { return fmt.Sprintf("%s%s", urlStr, path) } -func queryString(opts interface{}) string { +func queryStringVersion(opts interface{}) (string, APIVersion) { if opts == nil { - return "" + return "", nil } value := reflect.ValueOf(opts) if value.Kind() == reflect.Ptr { value = value.Elem() } if value.Kind() != reflect.Struct { - return "" + return "", nil } + var apiVersion APIVersion items := url.Values(map[string][]string{}) for i := 0; i < value.NumField(); i++ { field := value.Type().Field(i) @@ -897,53 +941,80 @@ func queryString(opts interface{}) string { } else if key == "-" { continue } - addQueryStringValue(items, key, value.Field(i)) + if addQueryStringValue(items, key, value.Field(i)) { + verstr := field.Tag.Get("ver") + if verstr != "" { + ver, _ := NewAPIVersion(verstr) + if apiVersion == nil { + apiVersion = ver + } else if ver.GreaterThan(apiVersion) { + apiVersion = ver + } + } + } } - return items.Encode() + return items.Encode(), apiVersion } -func addQueryStringValue(items url.Values, key string, v reflect.Value) { +func queryString(opts interface{}) string { + s, _ := queryStringVersion(opts) + return s +} + +func addQueryStringValue(items url.Values, key string, v reflect.Value) bool { switch v.Kind() { case reflect.Bool: if v.Bool() { items.Add(key, "1") + return true } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: if v.Int() > 0 { items.Add(key, strconv.FormatInt(v.Int(), 10)) + return true } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: if v.Uint() > 0 { items.Add(key, strconv.FormatUint(v.Uint(), 10)) + return true } case reflect.Float32, reflect.Float64: if v.Float() > 0 { items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64)) + return true } case reflect.String: if v.String() != "" { items.Add(key, v.String()) + return true } case reflect.Ptr: if !v.IsNil() { if b, err := json.Marshal(v.Interface()); err == nil { items.Add(key, string(b)) + return true } } case reflect.Map: if len(v.MapKeys()) > 0 { if b, err := json.Marshal(v.Interface()); err == nil { items.Add(key, string(b)) + return true } } case reflect.Array, reflect.Slice: vLen := v.Len() + var valuesAdded int if vLen > 0 { for i := 0; i < vLen; i++ { - addQueryStringValue(items, key, v.Index(i)) + if addQueryStringValue(items, key, v.Index(i)) { + valuesAdded++ + } } } + return valuesAdded > 0 } + return false } // Error represents failures in the API. It represents a failure from the API. diff --git a/vendor/github.com/fsouza/go-dockerclient/client_windows.go b/vendor/github.com/fsouza/go-dockerclient/client_windows.go index 63d97ec653..731d5c9628 100644 --- a/vendor/github.com/fsouza/go-dockerclient/client_windows.go +++ b/vendor/github.com/fsouza/go-dockerclient/client_windows.go @@ -32,7 +32,8 @@ func (c *Client) initializeNativeClient(trFunc func() *http.Transport) { return } namedPipePath := c.endpointURL.Path - dialFunc := func(network, addr string) (net.Conn, error) { + //nolint:unparam + dialFunc := func(_, addr string) (net.Conn, error) { timeout := namedPipeConnectTimeout return winio.DialPipe(namedPipePath, &timeout) } diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go index e40c9c2e44..0a8ab361cb 100644 --- a/vendor/github.com/fsouza/go-dockerclient/container.go +++ b/vendor/github.com/fsouza/go-dockerclient/container.go @@ -53,6 +53,7 @@ type APIMount struct { Mode string `json:"Mode,omitempty" yaml:"Mode,omitempty" toml:"Mode,omitempty"` RW bool `json:"RW,omitempty" yaml:"RW,omitempty" toml:"RW,omitempty"` Propagation string `json:"Propagation,omitempty" yaml:"Propagation,omitempty" toml:"Propagation,omitempty"` + Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` } // APIContainers represents each container in the list returned by @@ -84,7 +85,7 @@ type NetworkList struct { // See https://goo.gl/kaOHGw for more details. func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) { path := "/containers/json?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } @@ -473,6 +474,12 @@ type Container struct { RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty" toml:"RestartCount,omitempty"` AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty" toml:"AppArmorProfile,omitempty"` + + MountLabel string `json:"MountLabel,omitempty" yaml:"MountLabel,omitempty" toml:"MountLabel,omitempty"` + ProcessLabel string `json:"ProcessLabel,omitempty" yaml:"ProcessLabel,omitempty" toml:"ProcessLabel,omitempty"` + Platform string `json:"Platform,omitempty" yaml:"Platform,omitempty" toml:"Platform,omitempty"` + SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty" toml:"SizeRw,omitempty"` + SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty" toml:"SizeRootFs,omitempty"` } // UpdateContainerOptions specify parameters to the UpdateContainer function. @@ -499,7 +506,7 @@ type UpdateContainerOptions struct { // // See https://goo.gl/Y6fXUy for more details. func (c *Client) UpdateContainer(id string, opts UpdateContainerOptions) error { - resp, err := c.do("POST", fmt.Sprintf("/containers/"+id+"/update"), doOptions{ + resp, err := c.do(http.MethodPost, fmt.Sprintf("/containers/"+id+"/update"), doOptions{ data: opts, forceJSON: true, context: opts.Context, @@ -527,7 +534,7 @@ type RenameContainerOptions struct { // // See https://goo.gl/46inai for more details. func (c *Client) RenameContainer(opts RenameContainerOptions) error { - resp, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{ + resp, err := c.do(http.MethodPost, fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{ context: opts.Context, }) if err != nil { @@ -548,13 +555,14 @@ func (c *Client) InspectContainer(id string) (*Container, error) { // The context object can be used to cancel the inspect request. // // See https://goo.gl/FaI5JT for more details. +//nolint:golint func (c *Client) InspectContainerWithContext(id string, ctx context.Context) (*Container, error) { return c.inspectContainer(id, doOptions{context: ctx}) } func (c *Client) inspectContainer(id string, opts doOptions) (*Container, error) { path := "/containers/" + id + "/json" - resp, err := c.do("GET", path, opts) + resp, err := c.do(http.MethodGet, path, opts) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchContainer{ID: id} @@ -574,7 +582,7 @@ func (c *Client) inspectContainer(id string, opts doOptions) (*Container, error) // See https://goo.gl/15KKzh for more details. func (c *Client) ContainerChanges(id string) ([]Change, error) { path := "/containers/" + id + "/changes" - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchContainer{ID: id} @@ -610,7 +618,7 @@ type CreateContainerOptions struct { func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) { path := "/containers/create?" + queryString(opts) resp, err := c.do( - "POST", + http.MethodPost, path, doOptions{ data: struct { @@ -728,6 +736,7 @@ type HostConfig struct { Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty" toml:"Binds,omitempty"` CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty" toml:"CapAdd,omitempty"` CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty" toml:"CapDrop,omitempty"` + Capabilities []string `json:"Capabilities,omitempty" yaml:"Capabilities,omitempty" toml:"Capabilities,omitempty"` // Mutually exclusive w.r.t. CapAdd and CapDrop API v1.40 GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty" toml:"GroupAdd,omitempty"` ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty" toml:"ContainerIDFile,omitempty"` LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty" toml:"LxcConf,omitempty"` @@ -741,6 +750,8 @@ type HostConfig struct { UsernsMode string `json:"UsernsMode,omitempty" yaml:"UsernsMode,omitempty" toml:"UsernsMode,omitempty"` NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty" toml:"NetworkMode,omitempty"` IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty" toml:"IpcMode,omitempty"` + Isolation string `json:"Isolation,omitempty" yaml:"Isolation,omitempty" toml:"Isolation,omitempty"` // Windows only + ConsoleSize [2]int `json:"ConsoleSize,omitempty" yaml:"ConsoleSize,omitempty" toml:"ConsoleSize,omitempty"` // Windows only height x width PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty" toml:"PidMode,omitempty"` UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty" toml:"UTSMode,omitempty"` RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty" toml:"RestartPolicy,omitempty"` @@ -748,6 +759,7 @@ type HostConfig struct { DeviceCgroupRules []string `json:"DeviceCgroupRules,omitempty" yaml:"DeviceCgroupRules,omitempty" toml:"DeviceCgroupRules,omitempty"` LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty" toml:"LogConfig,omitempty"` SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty" toml:"SecurityOpt,omitempty"` + CgroupnsMode string `json:"CgroupnsMode,omitempty" yaml:"CgroupnsMode,omitempty" toml:"CgroupnsMode,omitempty"` // v1.40+ Cgroup string `json:"Cgroup,omitempty" yaml:"Cgroup,omitempty" toml:"Cgroup,omitempty"` CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty" toml:"CgroupParent,omitempty"` Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty" toml:"Memory,omitempty"` @@ -783,6 +795,8 @@ type HostConfig struct { IOMaximumBandwidth int64 `json:"IOMaximumBandwidth,omitempty" yaml:"IOMaximumBandwidth,omitempty"` IOMaximumIOps int64 `json:"IOMaximumIOps,omitempty" yaml:"IOMaximumIOps,omitempty"` Mounts []HostMount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` + MaskedPaths []string `json:"MaskedPaths,omitempty" yaml:"MaskedPaths,omitempty" toml:"MaskedPaths,omitempty"` + ReadonlyPaths []string `json:"ReadonlyPaths,omitempty" yaml:"ReadonlyPaths,omitempty" toml:"ReadonlyPaths,omitempty"` Runtime string `json:"Runtime,omitempty" yaml:"Runtime,omitempty" toml:"Runtime,omitempty"` Init bool `json:",omitempty" yaml:",omitempty"` Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty" toml:"Privileged,omitempty"` @@ -819,6 +833,7 @@ func (c *Client) StartContainer(id string, hostConfig *HostConfig) error { // API 1.24 or greater. // // See https://goo.gl/fbOSZy for more details. +//nolint:golint func (c *Client) StartContainerWithContext(id string, hostConfig *HostConfig, ctx context.Context) error { return c.startContainer(id, hostConfig, doOptions{context: ctx}) } @@ -832,7 +847,7 @@ func (c *Client) startContainer(id string, hostConfig *HostConfig, opts doOption opts.data = hostConfig opts.forceJSON = true } - resp, err := c.do("POST", path, opts) + resp, err := c.do(http.MethodPost, path, opts) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchContainer{ID: id, Err: err} @@ -859,13 +874,14 @@ func (c *Client) StopContainer(id string, timeout uint) error { // container request. // // See https://goo.gl/R9dZcV for more details. +//nolint:golint func (c *Client) StopContainerWithContext(id string, timeout uint, ctx context.Context) error { return c.stopContainer(id, timeout, doOptions{context: ctx}) } func (c *Client) stopContainer(id string, timeout uint, opts doOptions) error { path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout) - resp, err := c.do("POST", path, opts) + resp, err := c.do(http.MethodPost, path, opts) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchContainer{ID: id} @@ -885,7 +901,7 @@ func (c *Client) stopContainer(id string, timeout uint, opts doOptions) error { // See https://goo.gl/MrAKQ5 for more details. func (c *Client) RestartContainer(id string, timeout uint) error { path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout) - resp, err := c.do("POST", path, doOptions{}) + resp, err := c.do(http.MethodPost, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchContainer{ID: id} @@ -901,7 +917,7 @@ func (c *Client) RestartContainer(id string, timeout uint) error { // See https://goo.gl/D1Yaii for more details. func (c *Client) PauseContainer(id string) error { path := fmt.Sprintf("/containers/%s/pause", id) - resp, err := c.do("POST", path, doOptions{}) + resp, err := c.do(http.MethodPost, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchContainer{ID: id} @@ -917,7 +933,7 @@ func (c *Client) PauseContainer(id string) error { // See https://goo.gl/sZ2faO for more details. func (c *Client) UnpauseContainer(id string) error { path := fmt.Sprintf("/containers/%s/unpause", id) - resp, err := c.do("POST", path, doOptions{}) + resp, err := c.do(http.MethodPost, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchContainer{ID: id} @@ -947,7 +963,7 @@ func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) { args = fmt.Sprintf("?ps_args=%s", psArgs) } path := fmt.Sprintf("/containers/%s/top%s", id, args) - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return result, &NoSuchContainer{ID: id} @@ -1115,7 +1131,7 @@ func (c *Client) Stats(opts StatsOptions) (retErr error) { reqSent := make(chan struct{}) go func() { defer close(errC) - err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{ + err := c.stream(http.MethodGet, fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{ rawJSONStream: true, useJSONDecoder: true, stdout: writeCloser, @@ -1183,7 +1199,7 @@ type KillContainerOptions struct { // See https://goo.gl/JnTxXZ for more details. func (c *Client) KillContainer(opts KillContainerOptions) error { path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { e, ok := err.(*Error) if !ok { @@ -1224,7 +1240,7 @@ type RemoveContainerOptions struct { // See https://goo.gl/hL5IPC for more details. func (c *Client) RemoveContainer(opts RemoveContainerOptions) error { path := "/containers/" + opts.ID + "?" + queryString(opts) - resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchContainer{ID: opts.ID} @@ -1253,7 +1269,7 @@ type UploadToContainerOptions struct { func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error { url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) - return c.stream("PUT", url, streamOptions{ + return c.stream(http.MethodPut, url, streamOptions{ in: opts.InputStream, context: opts.Context, }) @@ -1276,7 +1292,7 @@ type DownloadFromContainerOptions struct { func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error { url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) - return c.stream("GET", url, streamOptions{ + return c.stream(http.MethodGet, url, streamOptions{ setRawTerminal: true, stdout: opts.OutputStream, inactivityTimeout: opts.InactivityTimeout, @@ -1309,7 +1325,7 @@ func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error { return errors.New("go-dockerclient: CopyFromContainer is no longer available in Docker >= 1.12, use DownloadFromContainer instead") } url := fmt.Sprintf("/containers/%s/copy", opts.Container) - resp, err := c.do("POST", url, doOptions{ + resp, err := c.do(http.MethodPost, url, doOptions{ data: opts, context: opts.Context, }) @@ -1337,12 +1353,13 @@ func (c *Client) WaitContainer(id string) (int, error) { // inspect request. // // See https://goo.gl/4AGweZ for more details. +//nolint:golint func (c *Client) WaitContainerWithContext(id string, ctx context.Context) (int, error) { return c.waitContainer(id, doOptions{context: ctx}) } func (c *Client) waitContainer(id string, opts doOptions) (int, error) { - resp, err := c.do("POST", "/containers/"+id+"/wait", opts) + resp, err := c.do(http.MethodPost, "/containers/"+id+"/wait", opts) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return 0, &NoSuchContainer{ID: id} @@ -1376,7 +1393,7 @@ type CommitContainerOptions struct { // See https://goo.gl/CzIguf for more details. func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) { path := "/commit?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ data: opts.Run, context: opts.Context, }) @@ -1411,6 +1428,9 @@ type AttachToContainerOptions struct { // to unexpected behavior. Success chan struct{} + // Override the key sequence for detaching a container. + DetachKeys string + // Use raw terminal? Usually true when the container contains a TTY. RawTerminal bool `qs:"-"` @@ -1450,7 +1470,7 @@ func (c *Client) AttachToContainerNonBlocking(opts AttachToContainerOptions) (Cl return nil, &NoSuchContainer{ID: opts.Container} } path := "/containers/" + opts.Container + "/attach?" + queryString(opts) - return c.hijack("POST", path, hijackOptions{ + return c.hijack(http.MethodPost, path, hijackOptions{ success: opts.Success, setRawTerminal: opts.RawTerminal, in: opts.InputStream, @@ -1500,7 +1520,7 @@ func (c *Client) Logs(opts LogsOptions) error { opts.Tail = "all" } path := "/containers/" + opts.Container + "/logs?" + queryString(opts) - return c.stream("GET", path, streamOptions{ + return c.stream(http.MethodGet, path, streamOptions{ setRawTerminal: opts.RawTerminal, stdout: opts.OutputStream, stderr: opts.ErrorStream, @@ -1516,7 +1536,7 @@ func (c *Client) ResizeContainerTTY(id string, height, width int) error { params := make(url.Values) params.Set("h", strconv.Itoa(height)) params.Set("w", strconv.Itoa(width)) - resp, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), doOptions{}) + resp, err := c.do(http.MethodPost, "/containers/"+id+"/resize?"+params.Encode(), doOptions{}) if err != nil { return err } @@ -1544,7 +1564,7 @@ func (c *Client) ExportContainer(opts ExportContainerOptions) error { return &NoSuchContainer{ID: opts.ID} } url := fmt.Sprintf("/containers/%s/export", opts.ID) - return c.stream("GET", url, streamOptions{ + return c.stream(http.MethodGet, url, streamOptions{ setRawTerminal: true, stdout: opts.OutputStream, inactivityTimeout: opts.InactivityTimeout, @@ -1573,7 +1593,7 @@ type PruneContainersResults struct { // See https://goo.gl/wnkgDT for more details. func (c *Client) PruneContainers(opts PruneContainersOptions) (*PruneContainersResults, error) { path := "/containers/prune?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/distribution.go b/vendor/github.com/fsouza/go-dockerclient/distribution.go index d0f8ce74cc..6e5e12f7dd 100644 --- a/vendor/github.com/fsouza/go-dockerclient/distribution.go +++ b/vendor/github.com/fsouza/go-dockerclient/distribution.go @@ -6,6 +6,7 @@ package docker import ( "encoding/json" + "net/http" "github.com/docker/docker/api/types/registry" ) @@ -13,7 +14,7 @@ import ( // InspectDistribution returns image digest and platform information by contacting the registry func (c *Client) InspectDistribution(name string) (*registry.DistributionInspect, error) { path := "/distribution/" + name + "/json" - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/event.go b/vendor/github.com/fsouza/go-dockerclient/event.go index 3a3364d9d1..6de7c55357 100644 --- a/vendor/github.com/fsouza/go-dockerclient/event.go +++ b/vendor/github.com/fsouza/go-dockerclient/event.go @@ -178,7 +178,7 @@ func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error { return nil } -func (eventState *eventMonitoringState) disableEventMonitoring() error { +func (eventState *eventMonitoringState) disableEventMonitoring() { eventState.Lock() defer eventState.Unlock() @@ -191,7 +191,6 @@ func (eventState *eventMonitoringState) disableEventMonitoring() error { close(eventState.C) close(eventState.errC) } - return nil } func (eventState *eventMonitoringState) monitorEvents(c *Client) { @@ -330,17 +329,18 @@ func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan if err != nil { return err } - //lint:ignore SA1019 this is needed here + //nolint:staticcheck conn := httputil.NewClientConn(dial, nil) - req, err := http.NewRequest("GET", uri, nil) + req, err := http.NewRequest(http.MethodGet, uri, nil) if err != nil { return err } + //nolint:bodyclose res, err := conn.Do(req) if err != nil { return err } - //lint:ignore SA1019 ClientConn is needed here + //nolint:staticcheck go func(res *http.Response, conn *httputil.ClientConn) { defer conn.Close() defer res.Body.Close() diff --git a/vendor/github.com/fsouza/go-dockerclient/exec.go b/vendor/github.com/fsouza/go-dockerclient/exec.go index d804b10b88..48d1ad349f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/exec.go +++ b/vendor/github.com/fsouza/go-dockerclient/exec.go @@ -30,6 +30,7 @@ type CreateExecOptions struct { Container string `json:"Container,omitempty" yaml:"Container,omitempty" toml:"Container,omitempty"` User string `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"` WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty" toml:"WorkingDir,omitempty"` + DetachKeys string `json:"DetachKeys,omitempty" yaml:"DetachKeys,omitempty" toml:"DetachKeys,omitempty"` Context context.Context `json:"-"` AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty" toml:"AttachStdin,omitempty"` AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty" toml:"AttachStdout,omitempty"` @@ -50,7 +51,7 @@ func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) { return nil, errors.New("exec configuration WorkingDir is only supported in API#1.35 and above") } path := fmt.Sprintf("/containers/%s/exec", opts.Container) - resp, err := c.do("POST", path, doOptions{data: opts, context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{data: opts, context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchContainer{ID: opts.Container} @@ -119,7 +120,7 @@ func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWa path := fmt.Sprintf("/exec/%s/start", id) if opts.Detach { - resp, err := c.do("POST", path, doOptions{data: opts, context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{data: opts, context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchExec{ID: id} @@ -130,7 +131,7 @@ func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWa return nil, nil } - return c.hijack("POST", path, hijackOptions{ + return c.hijack(http.MethodPost, path, hijackOptions{ success: opts.Success, setRawTerminal: opts.RawTerminal, in: opts.InputStream, @@ -151,7 +152,7 @@ func (c *Client) ResizeExecTTY(id string, height, width int) error { params.Set("w", strconv.Itoa(width)) path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode()) - resp, err := c.do("POST", path, doOptions{}) + resp, err := c.do(http.MethodPost, path, doOptions{}) if err != nil { return err } @@ -192,7 +193,7 @@ type ExecInspect struct { // See https://goo.gl/ctMUiW for more details func (c *Client) InspectExec(id string) (*ExecInspect, error) { path := fmt.Sprintf("/exec/%s/json", id) - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchExec{ID: id} diff --git a/vendor/github.com/fsouza/go-dockerclient/go.mod b/vendor/github.com/fsouza/go-dockerclient/go.mod index b4e75afc8e..cb0ef690c4 100644 --- a/vendor/github.com/fsouza/go-dockerclient/go.mod +++ b/vendor/github.com/fsouza/go-dockerclient/go.mod @@ -1,26 +1,26 @@ module github.com/fsouza/go-dockerclient -go 1.11 +go 1.12 require ( - github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 - github.com/Microsoft/go-winio v0.4.12 + github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect + github.com/Microsoft/go-winio v0.4.14 + github.com/Microsoft/hcsshim v0.8.6 // indirect + github.com/containerd/containerd v1.3.0 // indirect github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect - github.com/docker/docker v0.7.3-0.20190309235953-33c3200e0d16 + github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.4.0 github.com/gogo/protobuf v1.2.1 // indirect github.com/golang/protobuf v1.3.0 // indirect - github.com/google/go-cmp v0.3.0 - github.com/gorilla/mux v1.7.2 - github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd + github.com/google/go-cmp v0.3.1 + github.com/gorilla/mux v1.7.3 + github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect github.com/opencontainers/runc v0.1.1 // indirect - github.com/pkg/errors v0.8.1 // indirect - github.com/sirupsen/logrus v1.3.0 // indirect - golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 - golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect - golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa + golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad + google.golang.org/grpc v1.22.0 // indirect gotest.tools v2.2.0+incompatible // indirect ) diff --git a/vendor/github.com/fsouza/go-dockerclient/go.sum b/vendor/github.com/fsouza/go-dockerclient/go.sum index bc93f2daed..c4df2e9569 100644 --- a/vendor/github.com/fsouza/go-dockerclient/go.sum +++ b/vendor/github.com/fsouza/go-dockerclient/go.sum @@ -1,33 +1,44 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= -github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/docker v0.7.3-0.20190309235953-33c3200e0d16 h1:dmUn0SuGx7unKFwxyeQ/oLUHhEfZosEDrpmYM+6MTuc= -github.com/docker/docker v0.7.3-0.20190309235953-33c3200e0d16/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce h1:H3csZuxZESJeeEiOxq4YXPNmLFbjl7u2qVBrAAGX/sA= +github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/gorilla/mux v1.7.1 h1:Dw4jY2nghMMRsh1ol8dv1axHkDwMQK2DHerMNJsIpJU= -github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= -github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd h1:anPrsicrIi2ColgWTVPk+TrN42hJIWlfPHSBP9S0ZkM= -github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd/go.mod h1:3LVOLeyx9XVvwPgrt2be44XgSqndprz1G18rSk8KD84= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= @@ -38,23 +49,38 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/sirupsen/logrus v1.3.0 h1:hI/7Q+DtNZ2kINb6qt/lS+IyXnHQe9e90POfeewL/ME= -github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa h1:lqti/xP+yD/6zH5TqEwx2MilNIJY5Vbc6Qr8J3qyPIQ= -golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/fsouza/go-dockerclient/image.go b/vendor/github.com/fsouza/go-dockerclient/image.go index f9e1c6f045..5f72d66457 100644 --- a/vendor/github.com/fsouza/go-dockerclient/image.go +++ b/vendor/github.com/fsouza/go-dockerclient/image.go @@ -88,7 +88,7 @@ var ( // InputStream are provided in BuildImageOptions ErrMultipleContexts = errors.New("image build may not be provided BOTH context dir and input stream") - // ErrMustSpecifyNames is the error rreturned when the Names field on + // ErrMustSpecifyNames is the error returned when the Names field on // ExportImagesOptions is nil or empty ErrMustSpecifyNames = errors.New("must specify at least one name to export") ) @@ -109,7 +109,7 @@ type ListImagesOptions struct { // See https://goo.gl/BVzauZ for more details. func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) { path := "/images/json?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } @@ -129,13 +129,14 @@ type ImageHistory struct { Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Tags,omitempty"` CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty" toml:"CreatedBy,omitempty"` Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"` + Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty" toml:"Comment,omitempty"` } // ImageHistory returns the history of the image by its name or ID. // // See https://goo.gl/fYtxQa for more details. func (c *Client) ImageHistory(name string) ([]ImageHistory, error) { - resp, err := c.do("GET", "/images/"+name+"/history", doOptions{}) + resp, err := c.do(http.MethodGet, "/images/"+name+"/history", doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, ErrNoSuchImage @@ -154,7 +155,7 @@ func (c *Client) ImageHistory(name string) ([]ImageHistory, error) { // // See https://goo.gl/Vd2Pck for more details. func (c *Client) RemoveImage(name string) error { - resp, err := c.do("DELETE", "/images/"+name, doOptions{}) + resp, err := c.do(http.MethodDelete, "/images/"+name, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return ErrNoSuchImage @@ -181,7 +182,7 @@ type RemoveImageOptions struct { // See https://goo.gl/Vd2Pck for more details. func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error { uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts)) - resp, err := c.do("DELETE", uri, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, uri, doOptions{context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return ErrNoSuchImage @@ -196,7 +197,7 @@ func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error // // See https://goo.gl/ncLTG8 for more details. func (c *Client) InspectImage(name string) (*Image, error) { - resp, err := c.do("GET", "/images/"+name+"/json", doOptions{}) + resp, err := c.do(http.MethodGet, "/images/"+name+"/json", doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, ErrNoSuchImage @@ -271,7 +272,7 @@ func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error name := opts.Name opts.Name = "" path := "/images/" + name + "/push?" + queryString(&opts) - return c.stream("POST", path, streamOptions{ + return c.stream(http.MethodPost, path, streamOptions{ setRawTerminal: true, rawJSONStream: opts.RawJSONStream, headers: headers, @@ -288,6 +289,7 @@ func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error type PullImageOptions struct { Repository string `qs:"fromImage"` Tag string + Platform string `ver:"1.32"` // Only required for Docker Engine 1.9 or 1.10 w/ Remote API < 1.21 // and Docker Engine < 1.9 @@ -318,12 +320,16 @@ func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error opts.Repository = parts[0] opts.Tag = parts[1] } - return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) + return c.createImage(&opts, headers, nil, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) } -func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool, timeout time.Duration, context context.Context) error { - path := "/images/create?" + qs - return c.stream("POST", path, streamOptions{ +//nolint:golint +func (c *Client) createImage(opts interface{}, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool, timeout time.Duration, context context.Context) error { + url, err := c.getPath("/images/create", opts) + if err != nil { + return err + } + return c.streamURL(http.MethodPost, url, streamOptions{ setRawTerminal: true, headers: headers, in: in, @@ -347,7 +353,7 @@ type LoadImageOptions struct { // // See https://goo.gl/rEsBV3 for more details. func (c *Client) LoadImage(opts LoadImageOptions) error { - return c.stream("POST", "/images/load", streamOptions{ + return c.stream(http.MethodPost, "/images/load", streamOptions{ setRawTerminal: true, in: opts.InputStream, stdout: opts.OutputStream, @@ -369,7 +375,7 @@ type ExportImageOptions struct { // // See https://goo.gl/AuySaA for more details. func (c *Client) ExportImage(opts ExportImageOptions) error { - return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{ + return c.stream(http.MethodGet, fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{ setRawTerminal: true, stdout: opts.OutputStream, inactivityTimeout: opts.InactivityTimeout, @@ -394,7 +400,28 @@ func (c *Client) ExportImages(opts ExportImagesOptions) error { if opts.Names == nil || len(opts.Names) == 0 { return ErrMustSpecifyNames } - return c.stream("GET", "/images/get?"+queryString(&opts), streamOptions{ + // API < 1.25 allows multiple name values + // 1.25 says name must be a comma separated list + var err error + var exporturl string + if c.requestedAPIVersion.GreaterThanOrEqualTo(apiVersion125) { + str := opts.Names[0] + for _, val := range opts.Names[1:] { + str += "," + val + } + exporturl, err = c.getPath("/images/get", ExportImagesOptions{ + Names: []string{str}, + OutputStream: opts.OutputStream, + InactivityTimeout: opts.InactivityTimeout, + Context: opts.Context, + }) + } else { + exporturl, err = c.getPath("/images/get", &opts) + } + if err != nil { + return err + } + return c.streamURL(http.MethodGet, exporturl, streamOptions{ setRawTerminal: true, stdout: opts.OutputStream, inactivityTimeout: opts.InactivityTimeout, @@ -435,7 +462,7 @@ func (c *Client) ImportImage(opts ImportImageOptions) error { opts.InputStream = f opts.Source = "-" } - return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) + return c.createImage(&opts, nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) } // BuildImageOptions present the set of informations available for building an @@ -445,35 +472,39 @@ func (c *Client) ImportImage(opts ImportImageOptions) error { // https://goo.gl/4nYHwV. type BuildImageOptions struct { Context context.Context - Name string `qs:"t"` - Dockerfile string `qs:"dockerfile"` - CacheFrom []string `qs:"-"` - Memory int64 `qs:"memory"` - Memswap int64 `qs:"memswap"` - CPUShares int64 `qs:"cpushares"` - CPUQuota int64 `qs:"cpuquota"` - CPUPeriod int64 `qs:"cpuperiod"` - CPUSetCPUs string `qs:"cpusetcpus"` - Labels map[string]string `qs:"labels"` - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - Remote string `qs:"remote"` + Name string `qs:"t"` + Dockerfile string `ver:"1.25"` + ExtraHosts string `ver:"1.28"` + CacheFrom []string `qs:"-" ver:"1.25"` + Memory int64 + Memswap int64 + ShmSize int64 + CPUShares int64 + CPUQuota int64 `ver:"1.21"` + CPUPeriod int64 `ver:"1.21"` + CPUSetCPUs string + Labels map[string]string + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + Remote string Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header ContextDir string `qs:"-"` - Ulimits []ULimit `qs:"-"` - BuildArgs []BuildArg `qs:"-"` - NetworkMode string `qs:"networkmode"` + Ulimits []ULimit `qs:"-" ver:"1.18"` + BuildArgs []BuildArg `qs:"-" ver:"1.21"` + NetworkMode string `ver:"1.25"` + Platform string `ver:"1.32"` InactivityTimeout time.Duration `qs:"-"` - CgroupParent string `qs:"cgroupparent"` - SecurityOpt []string `qs:"securityopt"` - Target string `gs:"target"` - NoCache bool `qs:"nocache"` - SuppressOutput bool `qs:"q"` - Pull bool `qs:"pull"` - RmTmpContainer bool `qs:"rm"` - ForceRmTmpContainer bool `qs:"forcerm"` - RawJSONStream bool `qs:"-"` + CgroupParent string + SecurityOpt []string + Target string + Outputs string `ver:"1.40"` + NoCache bool + SuppressOutput bool `qs:"q"` + Pull bool `ver:"1.16"` + RmTmpContainer bool `qs:"rm"` + ForceRmTmpContainer bool `qs:"forcerm" ver:"1.12"` + RawJSONStream bool `qs:"-"` } // BuildArg represents arguments that can be passed to the image when building @@ -516,13 +547,16 @@ func (c *Client) BuildImage(opts BuildImageOptions) error { return err } } - qs := queryString(&opts) + qs, ver := queryStringVersion(&opts) - if c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion125) && len(opts.CacheFrom) > 0 { + if len(opts.CacheFrom) > 0 { if b, err := json.Marshal(opts.CacheFrom); err == nil { item := url.Values(map[string][]string{}) item.Add("cachefrom", string(b)) qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + if ver == nil || apiVersion125.GreaterThan(ver) { + ver = apiVersion125 + } } } @@ -531,6 +565,9 @@ func (c *Client) BuildImage(opts BuildImageOptions) error { item := url.Values(map[string][]string{}) item.Add("ulimits", string(b)) qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + if ver == nil || apiVersion118.GreaterThan(ver) { + ver = apiVersion118 + } } } @@ -543,10 +580,18 @@ func (c *Client) BuildImage(opts BuildImageOptions) error { item := url.Values(map[string][]string{}) item.Add("buildargs", string(b)) qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + if ver == nil || apiVersion121.GreaterThan(ver) { + ver = apiVersion121 + } } } - return c.stream("POST", fmt.Sprintf("/build?%s", qs), streamOptions{ + buildURL, err := c.pathVersionCheck("/build", qs, ver) + if err != nil { + return err + } + + return c.streamURL(http.MethodPost, buildURL, streamOptions{ setRawTerminal: true, rawJSONStream: opts.RawJSONStream, headers: headers, @@ -584,7 +629,7 @@ func (c *Client) TagImage(name string, opts TagImageOptions) error { if name == "" { return ErrNoSuchImage } - resp, err := c.do("POST", "/images/"+name+"/tag?"+queryString(&opts), doOptions{ + resp, err := c.do(http.MethodPost, "/images/"+name+"/tag?"+queryString(&opts), doOptions{ context: opts.Context, }) if err != nil { @@ -609,7 +654,7 @@ func isURL(u string) bool { } func headersWithAuth(auths ...registryAuth) (map[string]string, error) { - var headers = make(map[string]string) + headers := make(map[string]string) for _, auth := range auths { if auth.isEmpty() { @@ -640,7 +685,7 @@ type APIImageSearch struct { // // See https://goo.gl/KLO9IZ for more details. func (c *Client) SearchImages(term string) ([]APIImageSearch, error) { - resp, err := c.do("GET", "/images/search?term="+term, doOptions{}) + resp, err := c.do(http.MethodGet, "/images/search?term="+term, doOptions{}) if err != nil { return nil, err } @@ -661,7 +706,7 @@ func (c *Client) SearchImagesEx(term string, auth AuthConfiguration) ([]APIImage return nil, err } - resp, err := c.do("GET", "/images/search?term="+term, doOptions{ + resp, err := c.do(http.MethodGet, "/images/search?term="+term, doOptions{ headers: headers, }) if err != nil { @@ -699,7 +744,7 @@ type PruneImagesResults struct { // See https://goo.gl/qfZlbZ for more details. func (c *Client) PruneImages(opts PruneImagesOptions) (*PruneImagesResults, error) { path := "/images/prune?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go deleted file mode 100644 index f11ee0ee37..0000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go +++ /dev/null @@ -1,509 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package archive - -import ( - "archive/tar" - "bufio" - "compress/gzip" - "fmt" - "io" - "log" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" -) - -const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Bzip2 is bzip2 compression algorithm. - Bzip2 - // Gzip is gzip compression algorithm. - Gzip - // Xz is xz compression algorithm. - Xz -) - -const ( - modeISDIR = 040000 // Directory - modeISFIFO = 010000 // FIFO - modeISREG = 0100000 // Regular file - modeISLNK = 0120000 // Symbolic link - modeISBLK = 060000 // Block special file - modeISCHR = 020000 // Character special file - modeISSOCK = 0140000 // Socket -) - -// Compression is the state represents if compressed or not. -type Compression int - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - } - return "" -} - -// WhiteoutFormat is the format of whiteouts unpacked -type WhiteoutFormat int - -// TarOptions wraps the tar options. -type TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - ChownOpts *idtools.Identity - // WhiteoutFormat is the expected on disk format for whiteout files. - // This format will be converted to the standard format on pack - // and from the standard format on unpack. - WhiteoutFormat WhiteoutFormat - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - NoLchown bool - InUserNS bool - IncludeSourceDir bool - NoOverwriteDirNonDir bool -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - // Fix the source path to work with long path names. This is a no-op - // on platforms other than Windows. - srcPath = fixVolumePathPrefix(srcPath) - - pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) - if err != nil { - return nil, err - } - - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - go func() { - ta := newTarAppender( - idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), - compressWriter, - options.ChownOpts, - ) - ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - log.Printf("Can't close tar writer: %s", err) - } - if err := compressWriter.Close(); err != nil { - log.Printf("Can't close compress writer: %s", err) - } - if err := pipeWriter.Close(); err != nil { - log.Printf("Can't close pipe writer: %s", err) - } - }() - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - stat, err := os.Lstat(srcPath) - if err != nil { - return - } - - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(options.IncludeFiles) > 0 { - log.Print("Tar: Can't archive a file with includes") - } - - dir, base := SplitPathDirEntry(srcPath) - srcPath = dir - options.IncludeFiles = []string{base} - } - - if len(options.IncludeFiles) == 0 { - options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - for _, include := range options.IncludeFiles { - include := include - rebaseName := options.RebaseNames[include] - - walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - log.Printf("Tar: Can't stat file %s to tar: %s", srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil - } - - if options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - skip, err = pm.Matches(relFilePath) - if err != nil { - log.Printf("Error matching %s: %v", relFilePath, err) - return err - } - } - - if skip { - // If we want to skip this file and its a directory - // then we should first check to see if there's an - // excludes pattern (e.g. !dir/file) that starts with this - // dir. If so then we can't skip this dir. - - // Its not a dir then so we can just return/skip. - if !f.IsDir() { - return nil - } - - // No exceptions (!...) in patterns so just skip dir - if !pm.Exclusions() { - return filepath.SkipDir - } - - dirSlash := relFilePath + string(filepath.Separator) - - for _, pat := range pm.Patterns() { - if !pat.Exclusion() { - continue - } - if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { - // found a match - so can't skip this dir - return nil - } - } - - // No matching exclusion dir so just skip dir - return filepath.SkipDir - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource. - if rebaseName != "" { - var replacement string - if rebaseName != string(filepath.Separator) { - // Special case the root directory to replace with an - // empty string instead so that we don't end up with - // double slashes in the paths. - replacement = rebaseName - } - - relFilePath = strings.Replace(relFilePath, include, replacement, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - log.Printf("Can't add file %s to tar: %s", filePath, err) - // if pipe is broken, stop writing tar stream to it - if err == io.ErrClosedPipe { - return err - } - } - return nil - }) - } - }() - - return pipeReader, nil -} - -// CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) - switch compression { - case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil - case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - //lint:ignore ST1005 this is vendored/copied code - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - //lint:ignore ST1005 this is vendored/copied code - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -type tarWhiteoutConverter interface { - ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) - ConvertRead(*tar.Header, string) (bool, error) -} - -type tarAppender struct { - TarWriter *tar.Writer - Buffer *bufio.Writer - - // for hardlink mapping - SeenFiles map[uint64]string - IdentityMapping *idtools.IdentityMapping - ChownOpts *idtools.Identity - - // For packing and unpacking whiteout files in the - // non standard format. The whiteout files defined - // by the AUFS standard are used as the tar whiteout - // standard. - WhiteoutConverter tarWhiteoutConverter -} - -func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { - return &tarAppender{ - SeenFiles: make(map[uint64]string), - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - IdentityMapping: idMapping, - ChownOpts: chownOpts, - } -} - -// addTarFile adds to the tar archive a file from `path` as `name` -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - var link string - if fi.Mode()&os.ModeSymlink != 0 { - var err error - link, err = os.Readlink(path) - if err != nil { - return err - } - } - - hdr, err := FileInfoHeader(name, fi, link) - if err != nil { - return err - } - if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { - return err - } - - // if it's not a directory and has more than 1 link, - // it's hard linked, so set the type flag accordingly - if !fi.IsDir() && hasHardlinks(fi) { - inode, err := getInodeFromStat(fi.Sys()) - if err != nil { - return err - } - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - // check whether the file is overlayfs whiteout - // if yes, skip re-mapping container ID mappings. - isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 - - // handle re-mapping container ID mappings back to host ID mappings before - // writing tar headers/files. We skip whiteout files because they were written - // by the kernel and already have proper ownership relative to the host - if !isOverlayWhiteout && - !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && - !ta.IdentityMapping.Empty() { - fileIdentity, err := getFileIdentity(fi.Sys()) - if err != nil { - return err - } - hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIdentity) - if err != nil { - return err - } - } - - // explicitly override with ChownOpts - if ta.ChownOpts != nil { - hdr.Uid = ta.ChownOpts.UID - hdr.Gid = ta.ChownOpts.GID - } - - if ta.WhiteoutConverter != nil { - wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) - if err != nil { - return err - } - - // If a new whiteout file exists, write original hdr, then - // replace hdr with wo to be written after. Whiteouts should - // always be written after the original. Note the original - // hdr may have been updated to be a whiteout with returning - // a whiteout header - if wo != nil { - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - return fmt.Errorf("tar: cannot use whiteout for non-empty file") - } - hdr = wo - } - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - // We use system.OpenSequential to ensure we use sequential file - // access on Windows to avoid depleting the standby list. - // On Linux, this equates to a regular os.Open. - file, err := system.OpenSequential(path) - if err != nil { - return err - } - - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = io.Copy(ta.Buffer, file) - file.Close() - if err != nil { - return err - } - err = ta.Buffer.Flush() - if err != nil { - return err - } - } - - return nil -} - -// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem -// to a tar header -func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - //lint:ignore SA1019 this is vendored/copied code - hdr.Xattrs = make(map[string]string) - //lint:ignore SA1019 this is vendored/copied code - hdr.Xattrs["security.capability"] = string(capability) - } - return nil -} - -// FileInfoHeader creates a populated Header from fi. -// Compared to archive pkg this function fills in more information. -// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), -// which have been deleted since Go 1.9 archive/tar. -func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return nil, err - } - hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) - name, err = canonicalTarName(name, fi.IsDir()) - if err != nil { - return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) - } - hdr.Name = name - if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { - return nil, err - } - return hdr, nil -} - -// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar -// https://github.com/golang/go/commit/66b5a2f -func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { - fm := fi.Mode() - switch { - case fm.IsRegular(): - mode |= modeISREG - case fi.IsDir(): - mode |= modeISDIR - case fm&os.ModeSymlink != 0: - mode |= modeISLNK - case fm&os.ModeDevice != 0: - if fm&os.ModeCharDevice != 0 { - mode |= modeISCHR - } else { - mode |= modeISBLK - } - case fm&os.ModeNamedPipe != 0: - mode |= modeISFIFO - case fm&os.ModeSocket != 0: - mode |= modeISSOCK - } - return mode -} - -// canonicalTarName provides a platform-independent and consistent posix-style -// path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) (string, error) { - name, err := CanonicalTarNameForPath(name) - if err != nil { - return "", err - } - - // suffix with '/' for directories - if isDir && !strings.HasSuffix(name, "/") { - name += "/" - } - return name, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go deleted file mode 100644 index e2059e489b..0000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_linux.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package archive - -import ( - "archive/tar" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -const ( - // AUFSWhiteoutFormat is the default format for whiteouts - AUFSWhiteoutFormat WhiteoutFormat = iota - // OverlayWhiteoutFormat formats whiteout according to the overlay - // standard. - OverlayWhiteoutFormat -) - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - if format == OverlayWhiteoutFormat { - return overlayWhiteoutConverter{} - } - return nil -} - -type overlayWhiteoutConverter struct{} - -func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { - // convert whiteouts to AUFS format - if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { - // we just rename the file and make it normal - dir, filename := filepath.Split(hdr.Name) - hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 - hdr.Typeflag = tar.TypeReg - hdr.Size = 0 - } - - if fi.Mode()&os.ModeDir != 0 { - // convert opaque dirs to AUFS format by writing an empty file with the prefix - opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") - if err != nil { - return nil, err - } - if len(opaque) == 1 && opaque[0] == 'y' { - //lint:ignore SA1019 this is vendored/copied code - if hdr.Xattrs != nil { - //lint:ignore SA1019 this is vendored/copied code - delete(hdr.Xattrs, "trusted.overlay.opaque") - } - - // create a header for the whiteout file - // it should inherit some properties from the parent, but be a regular file - wo = &tar.Header{ - Typeflag: tar.TypeReg, - Mode: hdr.Mode & int64(os.ModePerm), - Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), - Size: 0, - Uid: hdr.Uid, - Uname: hdr.Uname, - Gid: hdr.Gid, - Gname: hdr.Gname, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } - } - } - - return -} - -func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { - base := filepath.Base(path) - dir := filepath.Dir(path) - - // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay - if base == WhiteoutOpaqueDir { - err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) - // don't write the file itself - return false, err - } - - // if a file was deleted and we are using overlay, we need to create a character device - if strings.HasPrefix(base, WhiteoutPrefix) { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { - return false, err - } - if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { - return false, err - } - - // don't write the file itself - return false, nil - } - - return true, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go deleted file mode 100644 index 72822c8578..0000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/archive_other.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -// +build !linux - -package archive - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - return nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go deleted file mode 100644 index 39ea287bfe..0000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_unix.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -// +build !windows - -package archive - -import ( - "os" - "syscall" -) - -func hasHardlinks(fi os.FileInfo) bool { - return fi.Sys().(*syscall.Stat_t).Nlink > 1 -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go deleted file mode 100644 index a93130474a..0000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/changes_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package archive - -import "os" - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go b/vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go deleted file mode 100644 index 45d45f20ed..0000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/archive/copy.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package archive - -import ( - "os" - "path/filepath" -) - -// SplitPathDirEntry splits the given path between its directory name and its -// basename by first cleaning the path but preserves a trailing "." if the -// original path specified the current directory. -func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(filepath.FromSlash(path)) - - if specifiesCurrentDir(path) { - cleanedPath += string(os.PathSeparator) + "." - } - - return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) -} - -// specifiesCurrentDir returns whether the given path specifies -// a "current directory", i.e., the last path segment is `.`. -func specifiesCurrentDir(path string) bool { - return filepath.Base(path) == "." -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go deleted file mode 100644 index 7d3c113585..0000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/term/term.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package term - -// Winsize represents the size of the terminal window. -type Winsize struct { - Height uint16 - Width uint16 -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go deleted file mode 100644 index 92a80a3082..0000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -// +build !windows - -package term - -import "golang.org/x/sys/unix" - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) - ws := &Winsize{Height: uws.Row, Width: uws.Col} - return ws, err -} diff --git a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go b/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go deleted file mode 100644 index 4a07a5d19b..0000000000 --- a/vendor/github.com/fsouza/go-dockerclient/internal/term/winsize_windows.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package term - -import "github.com/Azure/go-ansiterm/winterm" - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/misc.go b/vendor/github.com/fsouza/go-dockerclient/misc.go index 01fd1f687e..d42a66df67 100644 --- a/vendor/github.com/fsouza/go-dockerclient/misc.go +++ b/vendor/github.com/fsouza/go-dockerclient/misc.go @@ -8,6 +8,7 @@ import ( "context" "encoding/json" "net" + "net/http" "strings" "github.com/docker/docker/api/types/swarm" @@ -22,7 +23,7 @@ func (c *Client) Version() (*Env, error) { // VersionWithContext returns version information about the docker server. func (c *Client) VersionWithContext(ctx context.Context) (*Env, error) { - resp, err := c.do("GET", "/version", doOptions{context: ctx}) + resp, err := c.do(http.MethodGet, "/version", doOptions{context: ctx}) if err != nil { return nil, err } @@ -37,6 +38,7 @@ func (c *Client) VersionWithContext(ctx context.Context) (*Env, error) { // DockerInfo contains information about the Docker server // // See https://goo.gl/bHUoz9 for more details. +//nolint:golint type DockerInfo struct { ID string Containers int @@ -162,7 +164,7 @@ type IndexInfo struct { // // See https://goo.gl/ElTHi2 for more details. func (c *Client) Info() (*DockerInfo, error) { - resp, err := c.do("GET", "/info", doOptions{}) + resp, err := c.do(http.MethodGet, "/info", doOptions{}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/network.go b/vendor/github.com/fsouza/go-dockerclient/network.go index 8c03b9ae6e..3a06a52d52 100644 --- a/vendor/github.com/fsouza/go-dockerclient/network.go +++ b/vendor/github.com/fsouza/go-dockerclient/network.go @@ -48,7 +48,7 @@ type Endpoint struct { // // See https://goo.gl/6GugX3 for more details. func (c *Client) ListNetworks() ([]Network, error) { - resp, err := c.do("GET", "/networks", doOptions{}) + resp, err := c.do(http.MethodGet, "/networks", doOptions{}) if err != nil { return nil, err } @@ -75,7 +75,7 @@ func (c *Client) FilteredListNetworks(opts NetworkFilterOpts) ([]Network, error) qs := make(url.Values) qs.Add("filters", string(params)) path := "/networks?" + qs.Encode() - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { return nil, err } @@ -92,7 +92,7 @@ func (c *Client) FilteredListNetworks(opts NetworkFilterOpts) ([]Network, error) // See https://goo.gl/6GugX3 for more details. func (c *Client) NetworkInfo(id string) (*Network, error) { path := "/networks/" + id - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchNetwork{ID: id} @@ -114,15 +114,26 @@ func (c *Client) NetworkInfo(id string) (*Network, error) { type CreateNetworkOptions struct { Name string `json:"Name" yaml:"Name" toml:"Name"` Driver string `json:"Driver" yaml:"Driver" toml:"Driver"` + Scope string `json:"Scope" yaml:"Scope" toml:"Scope"` IPAM *IPAMOptions `json:"IPAM,omitempty" yaml:"IPAM" toml:"IPAM"` + ConfigFrom *NetworkConfigFrom `json:"ConfigFrom,omitempty" yaml:"ConfigFrom" toml:"ConfigFrom"` Options map[string]interface{} `json:"Options" yaml:"Options" toml:"Options"` Labels map[string]string `json:"Labels" yaml:"Labels" toml:"Labels"` CheckDuplicate bool `json:"CheckDuplicate" yaml:"CheckDuplicate" toml:"CheckDuplicate"` Internal bool `json:"Internal" yaml:"Internal" toml:"Internal"` EnableIPv6 bool `json:"EnableIPv6" yaml:"EnableIPv6" toml:"EnableIPv6"` + Attachable bool `json:"Attachable" yaml:"Attachable" toml:"Attachable"` + ConfigOnly bool `json:"ConfigOnly" yaml:"ConfigOnly" toml:"ConfigOnly"` + Ingress bool `json:"Ingress" yaml:"Ingress" toml:"Ingress"` Context context.Context `json:"-"` } +// NetworkConfigFrom is used in network creation for specifying the source of a +// network configuration. +type NetworkConfigFrom struct { + Network string `json:"Network" yaml:"Network" toml:"Network"` +} + // IPAMOptions controls IP Address Management when creating a network // // See https://goo.gl/T8kRVH for more details. @@ -148,7 +159,7 @@ type IPAMConfig struct { // See https://goo.gl/6GugX3 for more details. func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { resp, err := c.do( - "POST", + http.MethodPost, "/networks/create", doOptions{ data: opts, @@ -182,7 +193,7 @@ func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { // // See https://goo.gl/6GugX3 for more details. func (c *Client) RemoveNetwork(id string) error { - resp, err := c.do("DELETE", "/networks/"+id, doOptions{}) + resp, err := c.do(http.MethodDelete, "/networks/"+id, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchNetwork{ID: id} @@ -225,6 +236,7 @@ type EndpointConfig struct { GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty" toml:"GlobalIPv6Address,omitempty"` GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty" toml:"GlobalIPv6PrefixLen,omitempty"` MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"` + DriverOpts map[string]string `json:"DriverOpts,omitempty" yaml:"DriverOpts,omitempty" toml:"DriverOpts,omitempty"` } // EndpointIPAMConfig represents IPAM configurations for an @@ -241,7 +253,7 @@ type EndpointIPAMConfig struct { // // See https://goo.gl/6GugX3 for more details. func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error { - resp, err := c.do("POST", "/networks/"+id+"/connect", doOptions{ + resp, err := c.do(http.MethodPost, "/networks/"+id+"/connect", doOptions{ data: opts, context: opts.Context, }) @@ -260,7 +272,7 @@ func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error // // See https://goo.gl/6GugX3 for more details. func (c *Client) DisconnectNetwork(id string, opts NetworkConnectionOptions) error { - resp, err := c.do("POST", "/networks/"+id+"/disconnect", doOptions{data: opts}) + resp, err := c.do(http.MethodPost, "/networks/"+id+"/disconnect", doOptions{data: opts}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container} @@ -291,7 +303,7 @@ type PruneNetworksResults struct { // See https://goo.gl/kX0S9h for more details. func (c *Client) PruneNetworks(opts PruneNetworksOptions) (*PruneNetworksResults, error) { path := "/networks/prune?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/plugin.go b/vendor/github.com/fsouza/go-dockerclient/plugin.go index 0887903131..9cec415123 100644 --- a/vendor/github.com/fsouza/go-dockerclient/plugin.go +++ b/vendor/github.com/fsouza/go-dockerclient/plugin.go @@ -35,15 +35,26 @@ type InstallPluginOptions struct { // // See https://goo.gl/C4t7Tz for more details. func (c *Client) InstallPlugins(opts InstallPluginOptions) error { + headers, err := headersWithAuth(opts.Auth) + if err != nil { + return err + } + path := "/plugins/pull?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ data: opts.Plugins, context: opts.Context, + headers: headers, }) if err != nil { return err } - resp.Body.Close() + defer resp.Body.Close() + // PullPlugin streams back the progress of the pull, we must consume the whole body + // otherwise the pull will be canceled on the engine. + if _, err := ioutil.ReadAll(resp.Body); err != nil { + return err + } return nil } @@ -152,7 +163,7 @@ type PluginDetail struct { // // See https://goo.gl/C4t7Tz for more details. func (c *Client) ListPlugins(ctx context.Context) ([]PluginDetail, error) { - resp, err := c.do("GET", "/plugins", doOptions{ + resp, err := c.do(http.MethodGet, "/plugins", doOptions{ context: ctx, }) if err != nil { @@ -179,7 +190,7 @@ type ListFilteredPluginsOptions struct { // See https://goo.gl/rmdmWg for more details. func (c *Client) ListFilteredPlugins(opts ListFilteredPluginsOptions) ([]PluginDetail, error) { path := "/plugins/json?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{ + resp, err := c.do(http.MethodGet, path, doOptions{ context: opts.Context, }) if err != nil { @@ -193,12 +204,41 @@ func (c *Client) ListFilteredPlugins(opts ListFilteredPluginsOptions) ([]PluginD return pluginDetails, nil } -// GetPluginPrivileges returns pulginPrivileges or an error. +// GetPluginPrivileges returns pluginPrivileges or an error. // // See https://goo.gl/C4t7Tz for more details. -func (c *Client) GetPluginPrivileges(name string, ctx context.Context) ([]PluginPrivilege, error) { - resp, err := c.do("GET", "/plugins/privileges?remote="+name, doOptions{ - context: ctx, +//nolint:golint +func (c *Client) GetPluginPrivileges(remote string, ctx context.Context) ([]PluginPrivilege, error) { + return c.GetPluginPrivilegesWithOptions( + GetPluginPrivilegesOptions{ + Remote: remote, + Context: ctx, + }) +} + +// GetPluginPrivilegesOptions specify parameters to the GetPluginPrivilegesWithOptions function. +// +// See https://goo.gl/C4t7Tz for more details. +type GetPluginPrivilegesOptions struct { + Remote string + Auth AuthConfiguration + Context context.Context +} + +// GetPluginPrivilegesWithOptions returns pluginPrivileges or an error. +// +// See https://goo.gl/C4t7Tz for more details. +//nolint:golint +func (c *Client) GetPluginPrivilegesWithOptions(opts GetPluginPrivilegesOptions) ([]PluginPrivilege, error) { + headers, err := headersWithAuth(opts.Auth) + if err != nil { + return nil, err + } + + path := "/plugins/privileges?" + queryString(opts) + resp, err := c.do(http.MethodGet, path, doOptions{ + context: opts.Context, + headers: headers, }) if err != nil { return nil, err @@ -214,21 +254,18 @@ func (c *Client) GetPluginPrivileges(name string, ctx context.Context) ([]Plugin // InspectPlugins returns a pluginDetail or an error. // // See https://goo.gl/C4t7Tz for more details. +//nolint:golint func (c *Client) InspectPlugins(name string, ctx context.Context) (*PluginDetail, error) { - resp, err := c.do("GET", "/plugins/"+name+"/json", doOptions{ + resp, err := c.do(http.MethodGet, "/plugins/"+name+"/json", doOptions{ context: ctx, }) - if err != nil { - return nil, err - } - defer resp.Body.Close() if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchPlugin{ID: name} } return nil, err } - resp.Body.Close() + defer resp.Body.Close() var pluginDetail PluginDetail if err := json.NewDecoder(resp.Body).Decode(&pluginDetail); err != nil { return nil, err @@ -252,20 +289,26 @@ type RemovePluginOptions struct { // See https://goo.gl/C4t7Tz for more details. func (c *Client) RemovePlugin(opts RemovePluginOptions) (*PluginDetail, error) { path := "/plugins/" + opts.Name + "?" + queryString(opts) - resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchPlugin{ID: opts.Name} + } return nil, err } defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchPlugin{ID: opts.Name} - } return nil, err } - resp.Body.Close() + + if len(body) == 0 { + // Seems like newer docker versions won't return the plugindetail after removal + return nil, nil + } + var pluginDetail PluginDetail - if err := json.NewDecoder(resp.Body).Decode(&pluginDetail); err != nil { + if err := json.Unmarshal(body, &pluginDetail); err != nil { return nil, err } return &pluginDetail, nil @@ -287,7 +330,7 @@ type EnablePluginOptions struct { // See https://goo.gl/C4t7Tz for more details. func (c *Client) EnablePlugin(opts EnablePluginOptions) error { path := "/plugins/" + opts.Name + "/enable?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { return err } @@ -310,7 +353,7 @@ type DisablePluginOptions struct { // See https://goo.gl/C4t7Tz for more details. func (c *Client) DisablePlugin(opts DisablePluginOptions) error { path := "/plugins/" + opts.Name + "/disable" - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { return err } @@ -335,7 +378,7 @@ type CreatePluginOptions struct { // See https://goo.gl/C4t7Tz for more details. func (c *Client) CreatePlugin(opts CreatePluginOptions) (string, error) { path := "/plugins/create?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ data: opts.Path, context: opts.Context, }) @@ -365,7 +408,7 @@ type PushPluginOptions struct { // See https://goo.gl/C4t7Tz for more details. func (c *Client) PushPlugin(opts PushPluginOptions) error { path := "/plugins/" + opts.Name + "/push" - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { return err } @@ -389,7 +432,7 @@ type ConfigurePluginOptions struct { // See https://goo.gl/C4t7Tz for more details. func (c *Client) ConfigurePlugin(opts ConfigurePluginOptions) error { path := "/plugins/" + opts.Name + "/set" - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ data: opts.Envs, context: opts.Context, }) diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm.go b/vendor/github.com/fsouza/go-dockerclient/swarm.go index a257758fc2..c1bbce7632 100644 --- a/vendor/github.com/fsouza/go-dockerclient/swarm.go +++ b/vendor/github.com/fsouza/go-dockerclient/swarm.go @@ -36,7 +36,7 @@ type InitSwarmOptions struct { // See https://goo.gl/ZWyG1M for more details. func (c *Client) InitSwarm(opts InitSwarmOptions) (string, error) { path := "/swarm/init" - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ data: opts.InitRequest, forceJSON: true, context: opts.Context, @@ -66,7 +66,7 @@ type JoinSwarmOptions struct { // See https://goo.gl/N59IP1 for more details. func (c *Client) JoinSwarm(opts JoinSwarmOptions) error { path := "/swarm/join" - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ data: opts.JoinRequest, forceJSON: true, context: opts.Context, @@ -93,7 +93,7 @@ func (c *Client) LeaveSwarm(opts LeaveSwarmOptions) error { params := make(url.Values) params.Set("force", strconv.FormatBool(opts.Force)) path := "/swarm/leave?" + params.Encode() - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ context: opts.Context, }) if err != nil { @@ -123,7 +123,7 @@ func (c *Client) UpdateSwarm(opts UpdateSwarmOptions) error { params.Set("rotateWorkerToken", strconv.FormatBool(opts.RotateWorkerToken)) params.Set("rotateManagerToken", strconv.FormatBool(opts.RotateManagerToken)) path := "/swarm/update?" + params.Encode() - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ data: opts.Swarm, forceJSON: true, context: opts.Context, @@ -141,7 +141,7 @@ func (c *Client) UpdateSwarm(opts UpdateSwarmOptions) error { // See https://goo.gl/MFwgX9 for more details. func (c *Client) InspectSwarm(ctx context.Context) (swarm.Swarm, error) { response := swarm.Swarm{} - resp, err := c.do("GET", "/swarm", doOptions{ + resp, err := c.do(http.MethodGet, "/swarm", doOptions{ context: ctx, }) if err != nil { diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go b/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go index fb73ab2efb..399aa1dcea 100644 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go +++ b/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go @@ -46,7 +46,7 @@ func (c *Client) CreateConfig(opts CreateConfigOptions) (*swarm.Config, error) { return nil, err } path := "/configs/create?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ headers: headers, data: opts.ConfigSpec, forceJSON: true, @@ -76,7 +76,7 @@ type RemoveConfigOptions struct { // See https://goo.gl/Tqrtya for more details. func (c *Client) RemoveConfig(opts RemoveConfigOptions) error { path := "/configs/" + opts.ID - resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchConfig{ID: opts.ID} @@ -109,7 +109,7 @@ func (c *Client) UpdateConfig(id string, opts UpdateConfigOptions) error { } params := make(url.Values) params.Set("version", strconv.FormatUint(opts.Version, 10)) - resp, err := c.do("POST", "/configs/"+id+"/update?"+params.Encode(), doOptions{ + resp, err := c.do(http.MethodPost, "/configs/"+id+"/update?"+params.Encode(), doOptions{ headers: headers, data: opts.ConfigSpec, forceJSON: true, @@ -130,7 +130,7 @@ func (c *Client) UpdateConfig(id string, opts UpdateConfigOptions) error { // See https://goo.gl/dHmr75 for more details. func (c *Client) InspectConfig(id string) (*swarm.Config, error) { path := "/configs/" + id - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchConfig{ID: id} @@ -158,7 +158,7 @@ type ListConfigsOptions struct { // See https://goo.gl/DwvNMd for more details. func (c *Client) ListConfigs(opts ListConfigsOptions) ([]swarm.Config, error) { path := "/configs?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_node.go b/vendor/github.com/fsouza/go-dockerclient/swarm_node.go index 095653cd94..c149db287b 100644 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_node.go +++ b/vendor/github.com/fsouza/go-dockerclient/swarm_node.go @@ -40,7 +40,7 @@ type ListNodesOptions struct { // See http://goo.gl/3K4GwU for more details. func (c *Client) ListNodes(opts ListNodesOptions) ([]swarm.Node, error) { path := "/nodes?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } @@ -56,7 +56,7 @@ func (c *Client) ListNodes(opts ListNodesOptions) ([]swarm.Node, error) { // // See http://goo.gl/WjkTOk for more details. func (c *Client) InspectNode(id string) (*swarm.Node, error) { - resp, err := c.do("GET", "/nodes/"+id, doOptions{}) + resp, err := c.do(http.MethodGet, "/nodes/"+id, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchNode{ID: id} @@ -87,7 +87,7 @@ func (c *Client) UpdateNode(id string, opts UpdateNodeOptions) error { params := make(url.Values) params.Set("version", strconv.FormatUint(opts.Version, 10)) path := "/nodes/" + id + "/update?" + params.Encode() - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ context: opts.Context, forceJSON: true, data: opts.NodeSpec, @@ -118,7 +118,7 @@ func (c *Client) RemoveNode(opts RemoveNodeOptions) error { params := make(url.Values) params.Set("force", strconv.FormatBool(opts.Force)) path := "/nodes/" + opts.ID + "?" + params.Encode() - resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchNode{ID: opts.ID} diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go b/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go index 5a3b82ca5d..058c4a4af4 100644 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go +++ b/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go @@ -46,7 +46,7 @@ func (c *Client) CreateSecret(opts CreateSecretOptions) (*swarm.Secret, error) { return nil, err } path := "/secrets/create?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ headers: headers, data: opts.SecretSpec, forceJSON: true, @@ -76,7 +76,7 @@ type RemoveSecretOptions struct { // See https://goo.gl/Tqrtya for more details. func (c *Client) RemoveSecret(opts RemoveSecretOptions) error { path := "/secrets/" + opts.ID - resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchSecret{ID: opts.ID} @@ -109,7 +109,7 @@ func (c *Client) UpdateSecret(id string, opts UpdateSecretOptions) error { } params := make(url.Values) params.Set("version", strconv.FormatUint(opts.Version, 10)) - resp, err := c.do("POST", "/secrets/"+id+"/update?"+params.Encode(), doOptions{ + resp, err := c.do(http.MethodPost, "/secrets/"+id+"/update?"+params.Encode(), doOptions{ headers: headers, data: opts.SecretSpec, forceJSON: true, @@ -130,7 +130,7 @@ func (c *Client) UpdateSecret(id string, opts UpdateSecretOptions) error { // See https://goo.gl/dHmr75 for more details. func (c *Client) InspectSecret(id string) (*swarm.Secret, error) { path := "/secrets/" + id - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchSecret{ID: id} @@ -158,7 +158,7 @@ type ListSecretsOptions struct { // See https://goo.gl/DwvNMd for more details. func (c *Client) ListSecrets(opts ListSecretsOptions) ([]swarm.Secret, error) { path := "/secrets?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_service.go b/vendor/github.com/fsouza/go-dockerclient/swarm_service.go index d9c4b2acc8..cedbe41e32 100644 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_service.go +++ b/vendor/github.com/fsouza/go-dockerclient/swarm_service.go @@ -46,7 +46,7 @@ func (c *Client) CreateService(opts CreateServiceOptions) (*swarm.Service, error return nil, err } path := "/services/create?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{ + resp, err := c.do(http.MethodPost, path, doOptions{ headers: headers, data: opts.ServiceSpec, forceJSON: true, @@ -76,7 +76,7 @@ type RemoveServiceOptions struct { // See https://goo.gl/Tqrtya for more details. func (c *Client) RemoveService(opts RemoveServiceOptions) error { path := "/services/" + opts.ID - resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return &NoSuchService{ID: opts.ID} @@ -106,7 +106,7 @@ func (c *Client) UpdateService(id string, opts UpdateServiceOptions) error { if err != nil { return err } - resp, err := c.do("POST", "/services/"+id+"/update?"+queryString(opts), doOptions{ + resp, err := c.do(http.MethodPost, "/services/"+id+"/update?"+queryString(opts), doOptions{ headers: headers, data: opts.ServiceSpec, forceJSON: true, @@ -127,7 +127,7 @@ func (c *Client) UpdateService(id string, opts UpdateServiceOptions) error { // See https://goo.gl/dHmr75 for more details. func (c *Client) InspectService(id string) (*swarm.Service, error) { path := "/services/" + id - resp, err := c.do("GET", path, doOptions{}) + resp, err := c.do(http.MethodGet, path, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchService{ID: id} @@ -155,7 +155,7 @@ type ListServicesOptions struct { // See https://goo.gl/DwvNMd for more details. func (c *Client) ListServices(opts ListServicesOptions) ([]swarm.Service, error) { path := "/services?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } @@ -203,7 +203,7 @@ func (c *Client) GetServiceLogs(opts LogsServiceOptions) error { opts.Tail = "all" } path := "/services/" + opts.Service + "/logs?" + queryString(opts) - return c.stream("GET", path, streamOptions{ + return c.stream(http.MethodGet, path, streamOptions{ setRawTerminal: opts.RawTerminal, stdout: opts.OutputStream, stderr: opts.ErrorStream, diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_task.go b/vendor/github.com/fsouza/go-dockerclient/swarm_task.go index 3b1161ab98..547642f5e3 100644 --- a/vendor/github.com/fsouza/go-dockerclient/swarm_task.go +++ b/vendor/github.com/fsouza/go-dockerclient/swarm_task.go @@ -38,7 +38,7 @@ type ListTasksOptions struct { // See http://goo.gl/rByLzw for more details. func (c *Client) ListTasks(opts ListTasksOptions) ([]swarm.Task, error) { path := "/tasks?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } @@ -54,7 +54,7 @@ func (c *Client) ListTasks(opts ListTasksOptions) ([]swarm.Task, error) { // // See http://goo.gl/kyziuq for more details. func (c *Client) InspectTask(id string) (*swarm.Task, error) { - resp, err := c.do("GET", "/tasks/"+id, doOptions{}) + resp, err := c.do(http.MethodGet, "/tasks/"+id, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, &NoSuchTask{ID: id} diff --git a/vendor/github.com/fsouza/go-dockerclient/system.go b/vendor/github.com/fsouza/go-dockerclient/system.go index a43dfb5a26..46b9faf00e 100644 --- a/vendor/github.com/fsouza/go-dockerclient/system.go +++ b/vendor/github.com/fsouza/go-dockerclient/system.go @@ -3,6 +3,7 @@ package docker import ( "context" "encoding/json" + "net/http" ) // VolumeUsageData represents usage data from the docker system api @@ -59,7 +60,7 @@ type DiskUsageOptions struct { // More Info Here https://dockr.ly/2PNzQyO func (c *Client) DiskUsage(opts DiskUsageOptions) (*DiskUsage, error) { path := "/system/df" - resp, err := c.do("GET", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context}) if err != nil { return nil, err } diff --git a/vendor/github.com/fsouza/go-dockerclient/tar.go b/vendor/github.com/fsouza/go-dockerclient/tar.go index 611da8c9e0..9716a77128 100644 --- a/vendor/github.com/fsouza/go-dockerclient/tar.go +++ b/vendor/github.com/fsouza/go-dockerclient/tar.go @@ -13,8 +13,8 @@ import ( "path/filepath" "strings" + "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/fileutils" - "github.com/fsouza/go-dockerclient/internal/archive" ) func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) { diff --git a/vendor/github.com/fsouza/go-dockerclient/tls.go b/vendor/github.com/fsouza/go-dockerclient/tls.go index 07661f3d1b..08e7f8ec2c 100644 --- a/vendor/github.com/fsouza/go-dockerclient/tls.go +++ b/vendor/github.com/fsouza/go-dockerclient/tls.go @@ -103,7 +103,7 @@ func copyTLSConfig(cfg *tls.Config) *tls.Config { ClientCAs: cfg.ClientCAs, ClientSessionCache: cfg.ClientSessionCache, CurvePreferences: cfg.CurvePreferences, - InsecureSkipVerify: cfg.InsecureSkipVerify, + InsecureSkipVerify: cfg.InsecureSkipVerify, //nolint:gosec MaxVersion: cfg.MaxVersion, MinVersion: cfg.MinVersion, NameToCertificate: cfg.NameToCertificate, diff --git a/vendor/github.com/fsouza/go-dockerclient/volume.go b/vendor/github.com/fsouza/go-dockerclient/volume.go index c8f50469ed..c39a273bff 100644 --- a/vendor/github.com/fsouza/go-dockerclient/volume.go +++ b/vendor/github.com/fsouza/go-dockerclient/volume.go @@ -44,7 +44,7 @@ type ListVolumesOptions struct { // // See https://goo.gl/3wgTsd for more details. func (c *Client) ListVolumes(opts ListVolumesOptions) ([]Volume, error) { - resp, err := c.do("GET", "/volumes?"+queryString(opts), doOptions{ + resp, err := c.do(http.MethodGet, "/volumes?"+queryString(opts), doOptions{ context: opts.Context, }) if err != nil { @@ -85,7 +85,7 @@ type CreateVolumeOptions struct { // // See https://goo.gl/qEhmEC for more details. func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) { - resp, err := c.do("POST", "/volumes/create", doOptions{ + resp, err := c.do(http.MethodPost, "/volumes/create", doOptions{ data: opts, context: opts.Context, }) @@ -104,7 +104,7 @@ func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) { // // See https://goo.gl/GMjsMc for more details. func (c *Client) InspectVolume(name string) (*Volume, error) { - resp, err := c.do("GET", "/volumes/"+name, doOptions{}) + resp, err := c.do(http.MethodGet, "/volumes/"+name, doOptions{}) if err != nil { if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { return nil, ErrNoSuchVolume @@ -142,7 +142,7 @@ type RemoveVolumeOptions struct { // See https://goo.gl/nvd6qj for more details. func (c *Client) RemoveVolumeWithOptions(opts RemoveVolumeOptions) error { path := "/volumes/" + opts.Name - resp, err := c.do("DELETE", path+"?"+queryString(opts), doOptions{context: opts.Context}) + resp, err := c.do(http.MethodDelete, path+"?"+queryString(opts), doOptions{context: opts.Context}) if err != nil { if e, ok := err.(*Error); ok { if e.Status == http.StatusNotFound { @@ -179,7 +179,7 @@ type PruneVolumesResults struct { // See https://goo.gl/f9XDem for more details. func (c *Client) PruneVolumes(opts PruneVolumesOptions) (*PruneVolumesResults, error) { path := "/volumes/prune?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{context: opts.Context}) + resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context}) if err != nil { return nil, err } diff --git a/vendor/github.com/ijc/Gotty/LICENSE b/vendor/github.com/ijc/Gotty/LICENSE deleted file mode 100644 index 0b71c97360..0000000000 --- a/vendor/github.com/ijc/Gotty/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com) -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The views and conclusions contained in the software and documentation are those -of the authors and should not be interpreted as representing official policies, -either expressed or implied, of the FreeBSD Project. diff --git a/vendor/github.com/ijc/Gotty/README b/vendor/github.com/ijc/Gotty/README deleted file mode 100644 index a6b0d9a8fe..0000000000 --- a/vendor/github.com/ijc/Gotty/README +++ /dev/null @@ -1,5 +0,0 @@ -Gotty is a library written in Go that determines and reads termcap database -files to produce an interface for interacting with the capabilities of a -terminal. -See the godoc documentation or the source code for more information about -function usage. diff --git a/vendor/github.com/ijc/Gotty/TODO b/vendor/github.com/ijc/Gotty/TODO deleted file mode 100644 index 470460531c..0000000000 --- a/vendor/github.com/ijc/Gotty/TODO +++ /dev/null @@ -1,3 +0,0 @@ -gotty.go:// TODO add more concurrency to name lookup, look for more opportunities. -all:// TODO add more documentation, with function usage in a doc.go file. -all:// TODO add more testing/benchmarking with go test. diff --git a/vendor/github.com/ijc/Gotty/attributes.go b/vendor/github.com/ijc/Gotty/attributes.go deleted file mode 100644 index a4c005fae5..0000000000 --- a/vendor/github.com/ijc/Gotty/attributes.go +++ /dev/null @@ -1,514 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package gotty - -// Boolean capabilities -var BoolAttr = [...]string{ - "auto_left_margin", "bw", - "auto_right_margin", "am", - "no_esc_ctlc", "xsb", - "ceol_standout_glitch", "xhp", - "eat_newline_glitch", "xenl", - "erase_overstrike", "eo", - "generic_type", "gn", - "hard_copy", "hc", - "has_meta_key", "km", - "has_status_line", "hs", - "insert_null_glitch", "in", - "memory_above", "da", - "memory_below", "db", - "move_insert_mode", "mir", - "move_standout_mode", "msgr", - "over_strike", "os", - "status_line_esc_ok", "eslok", - "dest_tabs_magic_smso", "xt", - "tilde_glitch", "hz", - "transparent_underline", "ul", - "xon_xoff", "nxon", - "needs_xon_xoff", "nxon", - "prtr_silent", "mc5i", - "hard_cursor", "chts", - "non_rev_rmcup", "nrrmc", - "no_pad_char", "npc", - "non_dest_scroll_region", "ndscr", - "can_change", "ccc", - "back_color_erase", "bce", - "hue_lightness_saturation", "hls", - "col_addr_glitch", "xhpa", - "cr_cancels_micro_mode", "crxm", - "has_print_wheel", "daisy", - "row_addr_glitch", "xvpa", - "semi_auto_right_margin", "sam", - "cpi_changes_res", "cpix", - "lpi_changes_res", "lpix", - "backspaces_with_bs", "", - "crt_no_scrolling", "", - "no_correctly_working_cr", "", - "gnu_has_meta_key", "", - "linefeed_is_newline", "", - "has_hardware_tabs", "", - "return_does_clr_eol", "", -} - -// Numerical capabilities -var NumAttr = [...]string{ - "columns", "cols", - "init_tabs", "it", - "lines", "lines", - "lines_of_memory", "lm", - "magic_cookie_glitch", "xmc", - "padding_baud_rate", "pb", - "virtual_terminal", "vt", - "width_status_line", "wsl", - "num_labels", "nlab", - "label_height", "lh", - "label_width", "lw", - "max_attributes", "ma", - "maximum_windows", "wnum", - "max_colors", "colors", - "max_pairs", "pairs", - "no_color_video", "ncv", - "buffer_capacity", "bufsz", - "dot_vert_spacing", "spinv", - "dot_horz_spacing", "spinh", - "max_micro_address", "maddr", - "max_micro_jump", "mjump", - "micro_col_size", "mcs", - "micro_line_size", "mls", - "number_of_pins", "npins", - "output_res_char", "orc", - "output_res_line", "orl", - "output_res_horz_inch", "orhi", - "output_res_vert_inch", "orvi", - "print_rate", "cps", - "wide_char_size", "widcs", - "buttons", "btns", - "bit_image_entwining", "bitwin", - "bit_image_type", "bitype", - "magic_cookie_glitch_ul", "", - "carriage_return_delay", "", - "new_line_delay", "", - "backspace_delay", "", - "horizontal_tab_delay", "", - "number_of_function_keys", "", -} - -// String capabilities -var StrAttr = [...]string{ - "back_tab", "cbt", - "bell", "bel", - "carriage_return", "cr", - "change_scroll_region", "csr", - "clear_all_tabs", "tbc", - "clear_screen", "clear", - "clr_eol", "el", - "clr_eos", "ed", - "column_address", "hpa", - "command_character", "cmdch", - "cursor_address", "cup", - "cursor_down", "cud1", - "cursor_home", "home", - "cursor_invisible", "civis", - "cursor_left", "cub1", - "cursor_mem_address", "mrcup", - "cursor_normal", "cnorm", - "cursor_right", "cuf1", - "cursor_to_ll", "ll", - "cursor_up", "cuu1", - "cursor_visible", "cvvis", - "delete_character", "dch1", - "delete_line", "dl1", - "dis_status_line", "dsl", - "down_half_line", "hd", - "enter_alt_charset_mode", "smacs", - "enter_blink_mode", "blink", - "enter_bold_mode", "bold", - "enter_ca_mode", "smcup", - "enter_delete_mode", "smdc", - "enter_dim_mode", "dim", - "enter_insert_mode", "smir", - "enter_secure_mode", "invis", - "enter_protected_mode", "prot", - "enter_reverse_mode", "rev", - "enter_standout_mode", "smso", - "enter_underline_mode", "smul", - "erase_chars", "ech", - "exit_alt_charset_mode", "rmacs", - "exit_attribute_mode", "sgr0", - "exit_ca_mode", "rmcup", - "exit_delete_mode", "rmdc", - "exit_insert_mode", "rmir", - "exit_standout_mode", "rmso", - "exit_underline_mode", "rmul", - "flash_screen", "flash", - "form_feed", "ff", - "from_status_line", "fsl", - "init_1string", "is1", - "init_2string", "is2", - "init_3string", "is3", - "init_file", "if", - "insert_character", "ich1", - "insert_line", "il1", - "insert_padding", "ip", - "key_backspace", "kbs", - "key_catab", "ktbc", - "key_clear", "kclr", - "key_ctab", "kctab", - "key_dc", "kdch1", - "key_dl", "kdl1", - "key_down", "kcud1", - "key_eic", "krmir", - "key_eol", "kel", - "key_eos", "ked", - "key_f0", "kf0", - "key_f1", "kf1", - "key_f10", "kf10", - "key_f2", "kf2", - "key_f3", "kf3", - "key_f4", "kf4", - "key_f5", "kf5", - "key_f6", "kf6", - "key_f7", "kf7", - "key_f8", "kf8", - "key_f9", "kf9", - "key_home", "khome", - "key_ic", "kich1", - "key_il", "kil1", - "key_left", "kcub1", - "key_ll", "kll", - "key_npage", "knp", - "key_ppage", "kpp", - "key_right", "kcuf1", - "key_sf", "kind", - "key_sr", "kri", - "key_stab", "khts", - "key_up", "kcuu1", - "keypad_local", "rmkx", - "keypad_xmit", "smkx", - "lab_f0", "lf0", - "lab_f1", "lf1", - "lab_f10", "lf10", - "lab_f2", "lf2", - "lab_f3", "lf3", - "lab_f4", "lf4", - "lab_f5", "lf5", - "lab_f6", "lf6", - "lab_f7", "lf7", - "lab_f8", "lf8", - "lab_f9", "lf9", - "meta_off", "rmm", - "meta_on", "smm", - "newline", "_glitch", - "pad_char", "npc", - "parm_dch", "dch", - "parm_delete_line", "dl", - "parm_down_cursor", "cud", - "parm_ich", "ich", - "parm_index", "indn", - "parm_insert_line", "il", - "parm_left_cursor", "cub", - "parm_right_cursor", "cuf", - "parm_rindex", "rin", - "parm_up_cursor", "cuu", - "pkey_key", "pfkey", - "pkey_local", "pfloc", - "pkey_xmit", "pfx", - "print_screen", "mc0", - "prtr_off", "mc4", - "prtr_on", "mc5", - "repeat_char", "rep", - "reset_1string", "rs1", - "reset_2string", "rs2", - "reset_3string", "rs3", - "reset_file", "rf", - "restore_cursor", "rc", - "row_address", "mvpa", - "save_cursor", "row_address", - "scroll_forward", "ind", - "scroll_reverse", "ri", - "set_attributes", "sgr", - "set_tab", "hts", - "set_window", "wind", - "tab", "s_magic_smso", - "to_status_line", "tsl", - "underline_char", "uc", - "up_half_line", "hu", - "init_prog", "iprog", - "key_a1", "ka1", - "key_a3", "ka3", - "key_b2", "kb2", - "key_c1", "kc1", - "key_c3", "kc3", - "prtr_non", "mc5p", - "char_padding", "rmp", - "acs_chars", "acsc", - "plab_norm", "pln", - "key_btab", "kcbt", - "enter_xon_mode", "smxon", - "exit_xon_mode", "rmxon", - "enter_am_mode", "smam", - "exit_am_mode", "rmam", - "xon_character", "xonc", - "xoff_character", "xoffc", - "ena_acs", "enacs", - "label_on", "smln", - "label_off", "rmln", - "key_beg", "kbeg", - "key_cancel", "kcan", - "key_close", "kclo", - "key_command", "kcmd", - "key_copy", "kcpy", - "key_create", "kcrt", - "key_end", "kend", - "key_enter", "kent", - "key_exit", "kext", - "key_find", "kfnd", - "key_help", "khlp", - "key_mark", "kmrk", - "key_message", "kmsg", - "key_move", "kmov", - "key_next", "knxt", - "key_open", "kopn", - "key_options", "kopt", - "key_previous", "kprv", - "key_print", "kprt", - "key_redo", "krdo", - "key_reference", "kref", - "key_refresh", "krfr", - "key_replace", "krpl", - "key_restart", "krst", - "key_resume", "kres", - "key_save", "ksav", - "key_suspend", "kspd", - "key_undo", "kund", - "key_sbeg", "kBEG", - "key_scancel", "kCAN", - "key_scommand", "kCMD", - "key_scopy", "kCPY", - "key_screate", "kCRT", - "key_sdc", "kDC", - "key_sdl", "kDL", - "key_select", "kslt", - "key_send", "kEND", - "key_seol", "kEOL", - "key_sexit", "kEXT", - "key_sfind", "kFND", - "key_shelp", "kHLP", - "key_shome", "kHOM", - "key_sic", "kIC", - "key_sleft", "kLFT", - "key_smessage", "kMSG", - "key_smove", "kMOV", - "key_snext", "kNXT", - "key_soptions", "kOPT", - "key_sprevious", "kPRV", - "key_sprint", "kPRT", - "key_sredo", "kRDO", - "key_sreplace", "kRPL", - "key_sright", "kRIT", - "key_srsume", "kRES", - "key_ssave", "kSAV", - "key_ssuspend", "kSPD", - "key_sundo", "kUND", - "req_for_input", "rfi", - "key_f11", "kf11", - "key_f12", "kf12", - "key_f13", "kf13", - "key_f14", "kf14", - "key_f15", "kf15", - "key_f16", "kf16", - "key_f17", "kf17", - "key_f18", "kf18", - "key_f19", "kf19", - "key_f20", "kf20", - "key_f21", "kf21", - "key_f22", "kf22", - "key_f23", "kf23", - "key_f24", "kf24", - "key_f25", "kf25", - "key_f26", "kf26", - "key_f27", "kf27", - "key_f28", "kf28", - "key_f29", "kf29", - "key_f30", "kf30", - "key_f31", "kf31", - "key_f32", "kf32", - "key_f33", "kf33", - "key_f34", "kf34", - "key_f35", "kf35", - "key_f36", "kf36", - "key_f37", "kf37", - "key_f38", "kf38", - "key_f39", "kf39", - "key_f40", "kf40", - "key_f41", "kf41", - "key_f42", "kf42", - "key_f43", "kf43", - "key_f44", "kf44", - "key_f45", "kf45", - "key_f46", "kf46", - "key_f47", "kf47", - "key_f48", "kf48", - "key_f49", "kf49", - "key_f50", "kf50", - "key_f51", "kf51", - "key_f52", "kf52", - "key_f53", "kf53", - "key_f54", "kf54", - "key_f55", "kf55", - "key_f56", "kf56", - "key_f57", "kf57", - "key_f58", "kf58", - "key_f59", "kf59", - "key_f60", "kf60", - "key_f61", "kf61", - "key_f62", "kf62", - "key_f63", "kf63", - "clr_bol", "el1", - "clear_margins", "mgc", - "set_left_margin", "smgl", - "set_right_margin", "smgr", - "label_format", "fln", - "set_clock", "sclk", - "display_clock", "dclk", - "remove_clock", "rmclk", - "create_window", "cwin", - "goto_window", "wingo", - "hangup", "hup", - "dial_phone", "dial", - "quick_dial", "qdial", - "tone", "tone", - "pulse", "pulse", - "flash_hook", "hook", - "fixed_pause", "pause", - "wait_tone", "wait", - "user0", "u0", - "user1", "u1", - "user2", "u2", - "user3", "u3", - "user4", "u4", - "user5", "u5", - "user6", "u6", - "user7", "u7", - "user8", "u8", - "user9", "u9", - "orig_pair", "op", - "orig_colors", "oc", - "initialize_color", "initc", - "initialize_pair", "initp", - "set_color_pair", "scp", - "set_foreground", "setf", - "set_background", "setb", - "change_char_pitch", "cpi", - "change_line_pitch", "lpi", - "change_res_horz", "chr", - "change_res_vert", "cvr", - "define_char", "defc", - "enter_doublewide_mode", "swidm", - "enter_draft_quality", "sdrfq", - "enter_italics_mode", "sitm", - "enter_leftward_mode", "slm", - "enter_micro_mode", "smicm", - "enter_near_letter_quality", "snlq", - "enter_normal_quality", "snrmq", - "enter_shadow_mode", "sshm", - "enter_subscript_mode", "ssubm", - "enter_superscript_mode", "ssupm", - "enter_upward_mode", "sum", - "exit_doublewide_mode", "rwidm", - "exit_italics_mode", "ritm", - "exit_leftward_mode", "rlm", - "exit_micro_mode", "rmicm", - "exit_shadow_mode", "rshm", - "exit_subscript_mode", "rsubm", - "exit_superscript_mode", "rsupm", - "exit_upward_mode", "rum", - "micro_column_address", "mhpa", - "micro_down", "mcud1", - "micro_left", "mcub1", - "micro_right", "mcuf1", - "micro_row_address", "mvpa", - "micro_up", "mcuu1", - "order_of_pins", "porder", - "parm_down_micro", "mcud", - "parm_left_micro", "mcub", - "parm_right_micro", "mcuf", - "parm_up_micro", "mcuu", - "select_char_set", "scs", - "set_bottom_margin", "smgb", - "set_bottom_margin_parm", "smgbp", - "set_left_margin_parm", "smglp", - "set_right_margin_parm", "smgrp", - "set_top_margin", "smgt", - "set_top_margin_parm", "smgtp", - "start_bit_image", "sbim", - "start_char_set_def", "scsd", - "stop_bit_image", "rbim", - "stop_char_set_def", "rcsd", - "subscript_characters", "subcs", - "superscript_characters", "supcs", - "these_cause_cr", "docr", - "zero_motion", "zerom", - "char_set_names", "csnm", - "key_mouse", "kmous", - "mouse_info", "minfo", - "req_mouse_pos", "reqmp", - "get_mouse", "getm", - "set_a_foreground", "setaf", - "set_a_background", "setab", - "pkey_plab", "pfxl", - "device_type", "devt", - "code_set_init", "csin", - "set0_des_seq", "s0ds", - "set1_des_seq", "s1ds", - "set2_des_seq", "s2ds", - "set3_des_seq", "s3ds", - "set_lr_margin", "smglr", - "set_tb_margin", "smgtb", - "bit_image_repeat", "birep", - "bit_image_newline", "binel", - "bit_image_carriage_return", "bicr", - "color_names", "colornm", - "define_bit_image_region", "defbi", - "end_bit_image_region", "endbi", - "set_color_band", "setcolor", - "set_page_length", "slines", - "display_pc_char", "dispc", - "enter_pc_charset_mode", "smpch", - "exit_pc_charset_mode", "rmpch", - "enter_scancode_mode", "smsc", - "exit_scancode_mode", "rmsc", - "pc_term_options", "pctrm", - "scancode_escape", "scesc", - "alt_scancode_esc", "scesa", - "enter_horizontal_hl_mode", "ehhlm", - "enter_left_hl_mode", "elhlm", - "enter_low_hl_mode", "elohlm", - "enter_right_hl_mode", "erhlm", - "enter_top_hl_mode", "ethlm", - "enter_vertical_hl_mode", "evhlm", - "set_a_attributes", "sgr1", - "set_pglen_inch", "slength", - "termcap_init2", "", - "termcap_reset", "", - "linefeed_if_not_lf", "", - "backspace_if_not_bs", "", - "other_non_function_keys", "", - "arrow_key_map", "", - "acs_ulcorner", "", - "acs_llcorner", "", - "acs_urcorner", "", - "acs_lrcorner", "", - "acs_ltee", "", - "acs_rtee", "", - "acs_btee", "", - "acs_ttee", "", - "acs_hline", "", - "acs_vline", "", - "acs_plus", "", - "memory_lock", "", - "memory_unlock", "", - "box_chars_1", "", -} diff --git a/vendor/github.com/ijc/Gotty/gotty.go b/vendor/github.com/ijc/Gotty/gotty.go deleted file mode 100644 index c329778a1d..0000000000 --- a/vendor/github.com/ijc/Gotty/gotty.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Gotty is a Go-package for reading and parsing the terminfo database -package gotty - -// TODO add more concurrency to name lookup, look for more opportunities. - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "os" - "path" - "reflect" - "strings" - "sync" -) - -// Open a terminfo file by the name given and construct a TermInfo object. -// If something went wrong reading the terminfo database file, an error is -// returned. -func OpenTermInfo(termName string) (*TermInfo, error) { - if len(termName) == 0 { - return nil, errors.New("No termname given") - } - // Find the environment variables - if termloc := os.Getenv("TERMINFO"); len(termloc) > 0 { - return readTermInfo(path.Join(termloc, string(termName[0]), termName)) - } else { - // Search like ncurses - locations := []string{} - if h := os.Getenv("HOME"); len(h) > 0 { - locations = append(locations, path.Join(h, ".terminfo")) - } - locations = append(locations, - "/etc/terminfo/", - "/lib/terminfo/", - "/usr/share/terminfo/") - for _, str := range locations { - term, err := readTermInfo(path.Join(str, string(termName[0]), termName)) - if err == nil { - return term, nil - } - } - return nil, errors.New("No terminfo file(-location) found") - } -} - -// Open a terminfo file from the environment variable containing the current -// terminal name and construct a TermInfo object. If something went wrong -// reading the terminfo database file, an error is returned. -func OpenTermInfoEnv() (*TermInfo, error) { - termenv := os.Getenv("TERM") - return OpenTermInfo(termenv) -} - -// Return an attribute by the name attr provided. If none can be found, -// an error is returned. -func (term *TermInfo) GetAttribute(attr string) (stacker, error) { - // Channel to store the main value in. - var value stacker - // Add a blocking WaitGroup - var block sync.WaitGroup - // Keep track of variable being written. - written := false - // Function to put into goroutine. - f := func(ats interface{}) { - var ok bool - var v stacker - // Switch on type of map to use and assign value to it. - switch reflect.TypeOf(ats).Elem().Kind() { - case reflect.Bool: - v, ok = ats.(map[string]bool)[attr] - case reflect.Int16: - v, ok = ats.(map[string]int16)[attr] - case reflect.String: - v, ok = ats.(map[string]string)[attr] - } - // If ok, a value is found, so we can write. - if ok { - value = v - written = true - } - // Goroutine is done - block.Done() - } - block.Add(3) - // Go for all 3 attribute lists. - go f(term.boolAttributes) - go f(term.numAttributes) - go f(term.strAttributes) - // Wait until every goroutine is done. - block.Wait() - // If a value has been written, return it. - if written { - return value, nil - } - // Otherwise, error. - return nil, fmt.Errorf("Erorr finding attribute") -} - -// Return an attribute by the name attr provided. If none can be found, -// an error is returned. A name is first converted to its termcap value. -func (term *TermInfo) GetAttributeName(name string) (stacker, error) { - tc := GetTermcapName(name) - return term.GetAttribute(tc) -} - -// A utility function that finds and returns the termcap equivalent of a -// variable name. -func GetTermcapName(name string) string { - // Termcap name - var tc string - // Blocking group - var wait sync.WaitGroup - // Function to put into a goroutine - f := func(attrs []string) { - // Find the string corresponding to the name - for i, s := range attrs { - if s == name { - tc = attrs[i+1] - } - } - // Goroutine is finished - wait.Done() - } - wait.Add(3) - // Go for all 3 attribute lists - go f(BoolAttr[:]) - go f(NumAttr[:]) - go f(StrAttr[:]) - // Wait until every goroutine is done - wait.Wait() - // Return the termcap name - return tc -} - -// This function takes a path to a terminfo file and reads it in binary -// form to construct the actual TermInfo file. -func readTermInfo(path string) (*TermInfo, error) { - // Open the terminfo file - file, err := os.Open(path) - defer file.Close() - if err != nil { - return nil, err - } - - // magic, nameSize, boolSize, nrSNum, nrOffsetsStr, strSize - // Header is composed of the magic 0432 octal number, size of the name - // section, size of the boolean section, the amount of number values, - // the number of offsets of strings, and the size of the string section. - var header [6]int16 - // Byte array is used to read in byte values - var byteArray []byte - // Short array is used to read in short values - var shArray []int16 - // TermInfo object to store values - var term TermInfo - - // Read in the header - err = binary.Read(file, binary.LittleEndian, &header) - if err != nil { - return nil, err - } - // If magic number isn't there or isn't correct, we have the wrong filetype - if header[0] != 0432 { - return nil, errors.New(fmt.Sprintf("Wrong filetype")) - } - - // Read in the names - byteArray = make([]byte, header[1]) - err = binary.Read(file, binary.LittleEndian, &byteArray) - if err != nil { - return nil, err - } - term.Names = strings.Split(string(byteArray), "|") - - // Read in the booleans - byteArray = make([]byte, header[2]) - err = binary.Read(file, binary.LittleEndian, &byteArray) - if err != nil { - return nil, err - } - term.boolAttributes = make(map[string]bool) - for i, b := range byteArray { - if b == 1 { - term.boolAttributes[BoolAttr[i*2+1]] = true - } - } - // If the number of bytes read is not even, a byte for alignment is added - // We know the header is an even number of bytes so only need to check the - // total of the names and booleans. - if (header[1]+header[2])%2 != 0 { - err = binary.Read(file, binary.LittleEndian, make([]byte, 1)) - if err != nil { - return nil, err - } - } - - // Read in shorts - shArray = make([]int16, header[3]) - err = binary.Read(file, binary.LittleEndian, &shArray) - if err != nil { - return nil, err - } - term.numAttributes = make(map[string]int16) - for i, n := range shArray { - if n != 0377 && n > -1 { - term.numAttributes[NumAttr[i*2+1]] = n - } - } - - // Read the offsets into the short array - shArray = make([]int16, header[4]) - err = binary.Read(file, binary.LittleEndian, &shArray) - if err != nil { - return nil, err - } - // Read the actual strings in the byte array - byteArray = make([]byte, header[5]) - err = binary.Read(file, binary.LittleEndian, &byteArray) - if err != nil { - return nil, err - } - term.strAttributes = make(map[string]string) - // We get an offset, and then iterate until the string is null-terminated - for i, offset := range shArray { - if offset > -1 { - if int(offset) >= len(byteArray) { - return nil, errors.New("array out of bounds reading string section") - } - r := bytes.IndexByte(byteArray[offset:], 0) - if r == -1 { - return nil, errors.New("missing nul byte reading string section") - } - r += int(offset) - term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r]) - } - } - return &term, nil -} diff --git a/vendor/github.com/ijc/Gotty/parser.go b/vendor/github.com/ijc/Gotty/parser.go deleted file mode 100644 index a9d5d23c54..0000000000 --- a/vendor/github.com/ijc/Gotty/parser.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package gotty - -import ( - "bytes" - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -var exp = [...]string{ - "%%", - "%c", - "%s", - "%p(\\d)", - "%P([A-z])", - "%g([A-z])", - "%'(.)'", - "%{([0-9]+)}", - "%l", - "%\\+|%-|%\\*|%/|%m", - "%&|%\\||%\\^", - "%=|%>|%<", - "%A|%O", - "%!|%~", - "%i", - "%(:[\\ #\\-\\+]{0,4})?(\\d+\\.\\d+|\\d+)?[doxXs]", - "%\\?(.*?);", -} - -var regex *regexp.Regexp -var staticVar map[byte]stacker - -// Parses the attribute that is received with name attr and parameters params. -func (term *TermInfo) Parse(attr string, params ...interface{}) (string, error) { - // Get the attribute name first. - iface, err := term.GetAttribute(attr) - str, ok := iface.(string) - if err != nil { - return "", err - } - if !ok { - return str, errors.New("Only string capabilities can be parsed.") - } - // Construct the hidden parser struct so we can use a recursive stack based - // parser. - ps := &parser{} - // Dynamic variables only exist in this context. - ps.dynamicVar = make(map[byte]stacker, 26) - ps.parameters = make([]stacker, len(params)) - // Convert the parameters to insert them into the parser struct. - for i, x := range params { - ps.parameters[i] = x - } - // Recursively walk and return. - result, err := ps.walk(str) - return result, err -} - -// Parses the attribute that is received with name attr and parameters params. -// Only works on full name of a capability that is given, which it uses to -// search for the termcap name. -func (term *TermInfo) ParseName(attr string, params ...interface{}) (string, error) { - tc := GetTermcapName(attr) - return term.Parse(tc, params) -} - -// Identify each token in a stack based manner and do the actual parsing. -func (ps *parser) walk(attr string) (string, error) { - // We use a buffer to get the modified string. - var buf bytes.Buffer - // Next, find and identify all tokens by their indices and strings. - tokens := regex.FindAllStringSubmatch(attr, -1) - if len(tokens) == 0 { - return attr, nil - } - indices := regex.FindAllStringIndex(attr, -1) - q := 0 // q counts the matches of one token - // Iterate through the string per character. - for i := 0; i < len(attr); i++ { - // If the current position is an identified token, execute the following - // steps. - if q < len(indices) && i >= indices[q][0] && i < indices[q][1] { - // Switch on token. - switch { - case tokens[q][0][:2] == "%%": - // Literal percentage character. - buf.WriteByte('%') - case tokens[q][0][:2] == "%c": - // Pop a character. - c, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - buf.WriteByte(c.(byte)) - case tokens[q][0][:2] == "%s": - // Pop a string. - str, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - if _, ok := str.(string); !ok { - return buf.String(), errors.New("Stack head is not a string") - } - buf.WriteString(str.(string)) - case tokens[q][0][:2] == "%p": - // Push a parameter on the stack. - index, err := strconv.ParseInt(tokens[q][1], 10, 8) - index-- - if err != nil { - return buf.String(), err - } - if int(index) >= len(ps.parameters) { - return buf.String(), errors.New("Parameters index out of bound") - } - ps.st.push(ps.parameters[index]) - case tokens[q][0][:2] == "%P": - // Pop a variable from the stack as a dynamic or static variable. - val, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - index := tokens[q][2] - if len(index) > 1 { - errorStr := fmt.Sprintf("%s is not a valid dynamic variables index", - index) - return buf.String(), errors.New(errorStr) - } - // Specify either dynamic or static. - if index[0] >= 'a' && index[0] <= 'z' { - ps.dynamicVar[index[0]] = val - } else if index[0] >= 'A' && index[0] <= 'Z' { - staticVar[index[0]] = val - } - case tokens[q][0][:2] == "%g": - // Push a variable from the stack as a dynamic or static variable. - index := tokens[q][3] - if len(index) > 1 { - errorStr := fmt.Sprintf("%s is not a valid static variables index", - index) - return buf.String(), errors.New(errorStr) - } - var val stacker - if index[0] >= 'a' && index[0] <= 'z' { - val = ps.dynamicVar[index[0]] - } else if index[0] >= 'A' && index[0] <= 'Z' { - val = staticVar[index[0]] - } - ps.st.push(val) - case tokens[q][0][:2] == "%'": - // Push a character constant. - con := tokens[q][4] - if len(con) > 1 { - errorStr := fmt.Sprintf("%s is not a valid character constant", con) - return buf.String(), errors.New(errorStr) - } - ps.st.push(con[0]) - case tokens[q][0][:2] == "%{": - // Push an integer constant. - con, err := strconv.ParseInt(tokens[q][5], 10, 32) - if err != nil { - return buf.String(), err - } - ps.st.push(con) - case tokens[q][0][:2] == "%l": - // Push the length of the string that is popped from the stack. - popStr, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - if _, ok := popStr.(string); !ok { - errStr := fmt.Sprintf("Stack head is not a string") - return buf.String(), errors.New(errStr) - } - ps.st.push(len(popStr.(string))) - case tokens[q][0][:2] == "%?": - // If-then-else construct. First, the whole string is identified and - // then inside this substring, we can specify which parts to switch on. - ifReg, _ := regexp.Compile("%\\?(.*)%t(.*)%e(.*);|%\\?(.*)%t(.*);") - ifTokens := ifReg.FindStringSubmatch(tokens[q][0]) - var ( - ifStr string - err error - ) - // Parse the if-part to determine if-else. - if len(ifTokens[1]) > 0 { - ifStr, err = ps.walk(ifTokens[1]) - } else { // else - ifStr, err = ps.walk(ifTokens[4]) - } - // Return any errors - if err != nil { - return buf.String(), err - } else if len(ifStr) > 0 { - // Self-defined limitation, not sure if this is correct, but didn't - // seem like it. - return buf.String(), errors.New("If-clause cannot print statements") - } - var thenStr string - // Pop the first value that is set by parsing the if-clause. - choose, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - // Switch to if or else. - if choose.(int) == 0 && len(ifTokens[1]) > 0 { - thenStr, err = ps.walk(ifTokens[3]) - } else if choose.(int) != 0 { - if len(ifTokens[1]) > 0 { - thenStr, err = ps.walk(ifTokens[2]) - } else { - thenStr, err = ps.walk(ifTokens[5]) - } - } - if err != nil { - return buf.String(), err - } - buf.WriteString(thenStr) - case tokens[q][0][len(tokens[q][0])-1] == 'd': // Fallthrough for printing - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 'o': // digits. - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 'x': - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 'X': - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 's': - token := tokens[q][0] - // Remove the : that comes before a flag. - if token[1] == ':' { - token = token[:1] + token[2:] - } - digit, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - // The rest is determined like the normal formatted prints. - digitStr := fmt.Sprintf(token, digit.(int)) - buf.WriteString(digitStr) - case tokens[q][0][:2] == "%i": - // Increment the parameters by one. - if len(ps.parameters) < 2 { - return buf.String(), errors.New("Not enough parameters to increment.") - } - val1, val2 := ps.parameters[0].(int), ps.parameters[1].(int) - val1++ - val2++ - ps.parameters[0], ps.parameters[1] = val1, val2 - default: - // The rest of the tokens is a special case, where two values are - // popped and then operated on by the token that comes after them. - op1, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - op2, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - var result stacker - switch tokens[q][0][:2] { - case "%+": - // Addition - result = op2.(int) + op1.(int) - case "%-": - // Subtraction - result = op2.(int) - op1.(int) - case "%*": - // Multiplication - result = op2.(int) * op1.(int) - case "%/": - // Division - result = op2.(int) / op1.(int) - case "%m": - // Modulo - result = op2.(int) % op1.(int) - case "%&": - // Bitwise AND - result = op2.(int) & op1.(int) - case "%|": - // Bitwise OR - result = op2.(int) | op1.(int) - case "%^": - // Bitwise XOR - result = op2.(int) ^ op1.(int) - case "%=": - // Equals - result = op2 == op1 - case "%>": - // Greater-than - result = op2.(int) > op1.(int) - case "%<": - // Lesser-than - result = op2.(int) < op1.(int) - case "%A": - // Logical AND - result = op2.(bool) && op1.(bool) - case "%O": - // Logical OR - result = op2.(bool) || op1.(bool) - case "%!": - // Logical complement - result = !op1.(bool) - case "%~": - // Bitwise complement - result = ^(op1.(int)) - } - ps.st.push(result) - } - - i = indices[q][1] - 1 - q++ - } else { - // We are not "inside" a token, so just skip until the end or the next - // token, and add all characters to the buffer. - j := i - if q != len(indices) { - for !(j >= indices[q][0] && j < indices[q][1]) { - j++ - } - } else { - j = len(attr) - } - buf.WriteString(string(attr[i:j])) - i = j - } - } - // Return the buffer as a string. - return buf.String(), nil -} - -// Push a stacker-value onto the stack. -func (st *stack) push(s stacker) { - *st = append(*st, s) -} - -// Pop a stacker-value from the stack. -func (st *stack) pop() (stacker, error) { - if len(*st) == 0 { - return nil, errors.New("Stack is empty.") - } - newStack := make(stack, len(*st)-1) - val := (*st)[len(*st)-1] - copy(newStack, (*st)[:len(*st)-1]) - *st = newStack - return val, nil -} - -// Initialize regexes and the static vars (that don't get changed between -// calls. -func init() { - // Initialize the main regex. - expStr := strings.Join(exp[:], "|") - regex, _ = regexp.Compile(expStr) - // Initialize the static variables. - staticVar = make(map[byte]stacker, 26) -} diff --git a/vendor/github.com/ijc/Gotty/types.go b/vendor/github.com/ijc/Gotty/types.go deleted file mode 100644 index 9bcc65e9b8..0000000000 --- a/vendor/github.com/ijc/Gotty/types.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package gotty - -type TermInfo struct { - boolAttributes map[string]bool - numAttributes map[string]int16 - strAttributes map[string]string - // The various names of the TermInfo file. - Names []string -} - -type stacker interface { -} -type stack []stacker - -type parser struct { - st stack - parameters []stacker - dynamicVar map[byte]stacker -} diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE index 7448756763..1eb75ef68e 100644 --- a/vendor/github.com/klauspost/compress/LICENSE +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -1,4 +1,5 @@ Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index 202f36a99b..413ec3b3cd 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -243,7 +243,7 @@ func (s *Scratch) buildDtable() error { nBits := s.actualTableLog - byte(highBits(uint32(nextState))) s.decTable[u].nbBits = nBits newState := (nextState << nBits) - tableSize - if newState > tableSize { + if newState >= tableSize { return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) } if newState == uint16(u) && nBits == 0 { @@ -281,8 +281,12 @@ func (s *Scratch) decompress() error { tmp[off+2] = s1.nextFast() tmp[off+3] = s2.nextFast() off += 4 + // When off is 0, we have overflowed and should write. if off == 0 { s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } } } } else { @@ -296,7 +300,7 @@ func (s *Scratch) decompress() error { off += 4 if off == 0 { s.Out = append(s.Out, tmp...) - off = 0 + // When off is 0, we have overflowed and should write. if len(s.Out) >= s.DecompressLimit { return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 261c54274c..43b4815b37 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -193,14 +193,26 @@ func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { tmp[off+3] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask]) off += 4 if off == 0 { + if len(s.Out)+256 > s.MaxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } s.Out = append(s.Out, tmp...) } } + if len(s.Out)+int(off) > s.MaxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } s.Out = append(s.Out, tmp[:off]...) for !br.finished() { br.fill() + if len(s.Out) >= s.MaxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } s.Out = append(s.Out, decode()) } return s.Out, br.close() @@ -218,6 +230,9 @@ func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { if len(in) < 6+(4*1) { return nil, errors.New("input too small") } + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } // TODO: We do not detect when we overrun a buffer, except if the last one does. var br [4]bitReader @@ -247,9 +262,13 @@ func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { dstOut := s.Out dstEvery := (dstSize + 3) / 4 + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := s.dt.single[:tlSize] + decode := func(br *bitReader) byte { val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */ - v := s.dt.single[val] + v := single[val&tlMask] br.bitsRead += v.nBits return v.byte } @@ -279,7 +298,7 @@ bigloop: off += 2 if off == bufoff { if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun") + return nil, errors.New("corruption detected: stream overrun 1") } copy(dstOut, tmp[:bufoff]) copy(dstOut[dstEvery:], tmp[bufoff:bufoff*2]) @@ -288,15 +307,15 @@ bigloop: off = 0 dstOut = dstOut[bufoff:] // There must at least be 3 buffers left. - if len(dstOut) < dstEvery*3+3 { - return nil, errors.New("corruption detected: stream overrun") + if len(dstOut) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") } } } if off > 0 { ioff := int(off) if len(dstOut) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun") + return nil, errors.New("corruption detected: stream overrun 3") } copy(dstOut, tmp[:off]) copy(dstOut[dstEvery:dstEvery+ioff], tmp[bufoff:bufoff*2]) @@ -311,7 +330,7 @@ bigloop: for !br.finished() { br.fill() if offset >= len(dstOut) { - return nil, errors.New("corruption detected: stream overrun") + return nil, errors.New("corruption detected: stream overrun 4") } dstOut[offset] = decode(br) offset++ diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 50d02e4405..6f823f94d7 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -35,6 +35,9 @@ var ( // ErrTooBig is return if input is too large for a single block. ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") ) type ReusePolicy uint8 @@ -86,6 +89,11 @@ type Scratch struct { // Reuse will specify the reuse policy Reuse ReusePolicy + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + br byteReader symbolLen uint16 // Length of active part of the symbol table. maxCount int // count of the most probable symbol @@ -116,6 +124,9 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) { if s.TableLog > tableLogMax { return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, tableLogMax) } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } if s.clearCount && s.maxCount == 0 { for i := range s.count { s.count[i] = 0 diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s index e6179f65e3..1c66e37234 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s +++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s @@ -184,9 +184,7 @@ tagLit60Plus: // checks. In the asm version, we code it once instead of once per switch case. ADDQ CX, SI SUBQ $58, SI - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 + CMPQ SI, R13 JA errCorrupt // case x == 60: @@ -232,9 +230,7 @@ tagCopy4: ADDQ $5, SI // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 + CMPQ SI, R13 JA errCorrupt // length = 1 + int(src[s-5])>>2 @@ -251,9 +247,7 @@ tagCopy2: ADDQ $3, SI // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 + CMPQ SI, R13 JA errCorrupt // length = 1 + int(src[s-3])>>2 @@ -277,9 +271,7 @@ tagCopy: ADDQ $2, SI // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 + CMPQ SI, R13 JA errCorrupt // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) diff --git a/vendor/github.com/klauspost/compress/snappy/decode_other.go b/vendor/github.com/klauspost/compress/snappy/decode_other.go index 8c9f2049bc..94a96c5d7b 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode_other.go +++ b/vendor/github.com/klauspost/compress/snappy/decode_other.go @@ -85,14 +85,28 @@ func decode(dst, src []byte) int { if offset <= 0 || d < offset || length > len(dst)-d { return decodeErrCodeCorrupt } - // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike - // the built-in copy function, this byte-by-byte copy always runs + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs // forwards, even if the slices overlap. Conceptually, this is: // // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - for end := d + length; d != end; d++ { - dst[d] = dst[d-offset] + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] } + d += length } if d != len(dst) { return decodeErrCodeCorrupt diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 670f98af44..d9d38b23f1 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -34,7 +34,8 @@ For now, a high speed (fastest) and medium-fast (default) compressor has been im The "Fastest" compression ratio is roughly equivalent to zstd level 1. The "Default" compression ration is roughly equivalent to zstd level 3 (default). -In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. The compression ratio compared to stdlib is around level 3, but usually 3x as fast. +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. Compared to cgo zstd, the speed is around level 3 (default), but compression slightly worse, between level 1&2. @@ -217,7 +218,8 @@ silesia.tar zstd 3 211947520 66793301 1377 146.79 As part of the development process a *Snappy* -> *Zstandard* converter was also built. -This can convert a *framed* [Snappy Stream](https://godoc.org/github.com/golang/snappy#Writer) to a zstd stream. Note that a single block is not framed. +This can convert a *framed* [Snappy Stream](https://godoc.org/github.com/golang/snappy#Writer) to a zstd stream. +Note that a single block is not framed. Conversion is done by converting the stream directly from Snappy without intermediate full decoding. Therefore the compression ratio is much less than what can be done by a full decompression diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index aca1cb85d1..3e161ea15e 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -63,7 +63,8 @@ var ( type blockDec struct { // Raw source data of the block. - data []byte + data []byte + dataStorage []byte // Destination of the decoded data. dst []byte @@ -145,18 +146,18 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } // Read block data. - if cap(b.data) < cSize { + if cap(b.dataStorage) < cSize { if b.lowMem { - b.data = make([]byte, 0, cSize) + b.dataStorage = make([]byte, 0, cSize) } else { - b.data = make([]byte, 0, maxBlockSize) + b.dataStorage = make([]byte, 0, maxBlockSize) } } if cap(b.dst) <= maxBlockSize { b.dst = make([]byte, 0, maxBlockSize+1) } var err error - b.data, err = br.readBig(cSize, b.data[:0]) + b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { if debug { println("Reading block:", err) @@ -447,6 +448,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { } // Use our out buffer. huff.Out = b.literalBuf[:0] + huff.MaxDecodedSize = litRegenSize if fourStreams { literals, err = huff.Decompress4X(literals, litRegenSize) } else { @@ -609,6 +611,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Use our out buffer. huff = hist.huffTree huff.Out = b.literalBuf[:0] + huff.MaxDecodedSize = litRegenSize if fourStreams { literals, err = huff.Decompress4X(literals, litRegenSize) } else { diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index cba24c76d1..9d9151a0ef 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -155,14 +155,17 @@ func (h *literalsHeader) setSize(regenLen int) { } // setSizes will set the size of a compressed literals section and the input length. -func (h *literalsHeader) setSizes(compLen, inLen int) { +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) // Only retain 2 bits const mask = 3 lh := uint64(*h & mask) switch { case compBits <= 10 && inBits <= 10: - lh |= (1 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) if debug { const mmask = (1 << 24) - 1 n := (lh >> 4) & mmask @@ -175,8 +178,14 @@ func (h *literalsHeader) setSizes(compLen, inLen int) { } case compBits <= 14 && inBits <= 14: lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } case compBits <= 18 && inBits <= 18: lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } default: panic("internal error: block too big") } @@ -307,12 +316,30 @@ func (b *blockEnc) encodeLits() error { return nil } - // TODO: Switch to 1X when less than x bytes. - out, reUsed, err := huff0.Compress4X(b.literals, b.litEnc) - // Bail out of compression is too little. - if len(out) > (len(b.literals) - len(b.literals)>>4) { + var ( + out []byte + reUsed, single bool + err error + ) + if len(b.literals) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + if len(out) > len(b.literals)-len(b.literals)>>4 { + // Bail out of compression is too little. + err = huff0.ErrIncompressible + } + } else if len(b.literals) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + if len(out) > len(b.literals)-len(b.literals)>>4 { + // Bail out of compression is too little. + err = huff0.ErrIncompressible + } + } else { err = huff0.ErrIncompressible } + switch err { case huff0.ErrIncompressible: if debug { @@ -351,7 +378,7 @@ func (b *blockEnc) encodeLits() error { lh.setType(literalsBlockCompressed) } // Set sizes - lh.setSizes(len(out), len(b.literals)) + lh.setSizes(len(out), len(b.literals), single) bh.setSize(uint32(len(out) + lh.size() + 1)) // Write block headers. @@ -381,16 +408,23 @@ func (b *blockEnc) encode() error { b.output = bh.appendTo(b.output) var ( - out []byte - reUsed bool - err error + out []byte + reUsed, single bool + err error ) - if len(b.literals) > 32 { - // TODO: Switch to 1X on small blocks. + if len(b.literals) >= 1024 { + // Use 4 Streams. out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) if len(out) > len(b.literals)-len(b.literals)>>4 { err = huff0.ErrIncompressible } + } else if len(b.literals) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + if len(out) > len(b.literals)-len(b.literals)>>4 { + err = huff0.ErrIncompressible + } } else { err = huff0.ErrIncompressible } @@ -435,7 +469,7 @@ func (b *blockEnc) encode() error { } } } - lh.setSizes(len(out), len(b.literals)) + lh.setSizes(len(out), len(b.literals), single) if debug { printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) println("Adding literal header:", lh) diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 4a84604768..3538063f1a 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -116,6 +116,9 @@ func (r *readerWrapper) readByte() (byte, error) { } func (r *readerWrapper) skipN(n int) error { - _, err := io.CopyN(ioutil.Discard, r.r, int64(n)) + n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) + if n2 != int64(n) { + err = io.ErrUnexpectedEOF + } return err } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f06bff6f6f..f4db3096ad 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -127,6 +127,9 @@ func (d *Decoder) Read(p []byte) (int, error) { } } if len(d.current.b) > 0 { + if debug { + println("returning", n, "still bytes left:", len(d.current.b)) + } // Only return error at end of block return n, nil } @@ -159,6 +162,9 @@ func (d *Decoder) Reset(r io.Reader) error { // If bytes buffer and < 1MB, do sync decoding anyway. if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 { + if debug { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } b := bb.Bytes() dst, err := d.DecodeAll(b, nil) if err == nil { @@ -167,6 +173,9 @@ func (d *Decoder) Reset(r io.Reader) error { d.current.b = dst d.current.err = err d.current.flushed = true + if debug { + println("sync decode to ", len(dst), "bytes, err:", err) + } return nil } @@ -193,7 +202,9 @@ func (d *Decoder) drainOutput() { d.current.cancel = nil } if d.current.d != nil { - println("re-adding current decoder", d.current.d, len(d.decoders)) + if debug { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } d.decoders <- d.current.d d.current.d = nil d.current.b = nil @@ -206,7 +217,9 @@ func (d *Decoder) drainOutput() { select { case v := <-d.current.output: if v.d != nil { - println("got decoder", v.d) + if debug { + printf("re-adding decoder %p", v.d) + } d.decoders <- v.d } if v.err == errEndOfStream { @@ -259,20 +272,22 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { if d.current.err == ErrDecoderClosed { return dst, ErrDecoderClosed } - //println(len(d.frames), len(d.decoders), d.current) + + // Grab a block decoder and frame decoder. block, frame := <-d.decoders, <-d.frames defer func() { + if debug { + printf("re-adding decoder: %p", block) + } d.decoders <- block frame.rawInput = nil + frame.bBuf = nil d.frames <- frame }() - if cap(dst) == 0 { - // Allocate 1MB by default. - dst = make([]byte, 0, 1<<20) - } - br := byteBuf(input) + frame.bBuf = input + for { - err := frame.reset(&br) + err := frame.reset(&frame.bBuf) if err == io.EOF { return dst, nil } @@ -290,11 +305,21 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { dst = dst2 } } + if cap(dst) == 0 { + // Allocate window size * 2 by default if nothing is provided and we didn't get frame content size. + size := frame.WindowSize * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + dst = make([]byte, 0, frame.WindowSize) + } + dst, err = frame.runDecoder(dst, block) if err != nil { return dst, err } - if len(br) == 0 { + if len(frame.bBuf) == 0 { break } } @@ -305,6 +330,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { // If an error occurs d.err will be set. func (d *Decoder) nextBlock() { if d.current.d != nil { + if debug { + printf("re-adding current decoder %p", d.current.d) + } d.decoders <- d.current.d d.current.d = nil } @@ -377,6 +405,9 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { defer d.streamWg.Done() frame := newFrameDec(d.o) for stream := range inStream { + if debug { + println("got new stream") + } br := readerWrapper{r: stream.r} decodeStream: for { diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 52c1eb0662..2ac9cd2dd3 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -50,15 +50,17 @@ func WithDecoderConcurrency(n int) DOption { } // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory -// (non-streaming) operations. -// Maxmimum and default is 1 << 63 bytes. +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// For streaming operations, the maximum window size is capped at 1<<30 bytes. +// Maximum and default is 1 << 63 bytes. func WithDecoderMaxMemory(n uint64) DOption { return func(o *decoderOptions) error { if n == 0 { - return errors.New("WithDecoderMaxmemory must be at least 1") + return errors.New("WithDecoderMaxMemory must be at least 1") } if n > 1<<63 { - return fmt.Errorf("WithDecoderMaxmemorymust be less than 1 << 63") + return fmt.Errorf("WithDecoderMaxmemory must be less than 1 << 63") } o.maxDecodedSize = n return nil diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 02c79814fc..e120625d85 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -82,16 +82,11 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { stepSize++ } - // TEMPLATE - const kSearchStrength = 8 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s cv := load6432(src, s) - // nextHash is the hash at s - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) // Relative offsets offset1 := int32(blk.recentOffsets[0]) @@ -119,8 +114,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS = nextHashS & dFastShortTableMask - nextHashL = nextHashL & dFastLongTableMask + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -172,8 +167,6 @@ encodeLoop: break encodeLoop } cv = load6432(src, s) - nextHashS = hash5(cv, dFastShortTableBits) - nextHashL = hash8(cv, dFastLongTableBits) continue } const repOff2 = 1 @@ -221,8 +214,6 @@ encodeLoop: break encodeLoop } cv = load6432(src, s) - nextHashS = hash5(cv, dFastShortTableBits) - nextHashL = hash8(cv, dFastLongTableBits) // Swap offsets offset1, offset2 = offset2, offset1 continue @@ -296,8 +287,6 @@ encodeLoop: break encodeLoop } cv = load6432(src, s) - nextHashS = hash5(cv, dFastShortTableBits) - nextHashL = hash8(cv, dFastLongTableBits) } // A 4-byte match has been found. Update recent offsets. @@ -345,38 +334,54 @@ encodeLoop: break encodeLoop } - // Index match start + 2 and end - 2 - index0 := s - l + 2 + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) index1 := s - 2 - if l == 4 { - // if l is 4, we would check the same place twice, so index s-1 instead. - index1++ - } cv0 := load6432(src, index0) cv1 := load6432(src, index1) - entry0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - entry1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.table[hash5(cv0, dFastShortTableBits)&dFastShortTableMask] = entry0 - e.longTable[hash8(cv0, dFastLongTableBits)&dFastLongTableMask] = entry0 - e.table[hash5(cv1, dFastShortTableBits)&dFastShortTableMask] = entry1 - e.longTable[hash8(cv1, dFastLongTableBits)&dFastLongTableMask] = entry1 + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hash8(cv0, dFastLongTableBits)] = te0 + e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hash5(cv0, dFastShortTableBits)] = te0 + e.table[hash5(cv1, dFastShortTableBits)] = te1 cv = load6432(src, s) - nextHashS = hash5(cv, dFastShortTableBits) - nextHashL = hash8(cv, dFastLongTableBits) + + if !canRepeat { + continue + } // Check offset 2 - if o2 := s - offset2; canRepeat && o2 > 0 && load3232(src, o2) == uint32(cv) { + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv1>>8, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + // We have at least 4 byte match. // No need to check backwards. We come straight from a match l := 4 + e.matchlen(s+4, o2+4, src) - // Store this, since we have it. + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL&dFastLongTableMask] = entry - e.table[nextHashS&dFastShortTableMask] = entry + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry seq.matchLen = uint32(l) - zstdMinMatch seq.litLen = 0 + // Since litlen is always 0, this is offset 1. seq.offset = 1 s += l @@ -389,12 +394,10 @@ encodeLoop: // Swap offset 1 and 2. offset1, offset2 = offset2, offset1 if s >= sLimit { + // Finished break encodeLoop } - // Prepare next loop. cv = load6432(src, s) - nextHashS = hash5(cv, dFastShortTableBits) - nextHashL = hash8(cv, dFastLongTableBits) } } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index a8edaa8885..6f388de041 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -124,8 +124,6 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { // nextEmit is where in src the next emitLiteral should start from. nextEmit := s cv := load6432(src, s) - // nextHash is the hash at s - nextHash := hash6(cv, hashLog) // Relative offsets offset1 := int32(blk.recentOffsets[0]) @@ -157,8 +155,8 @@ encodeLoop: panic("offset0 was 0") } - nextHash2 := hash6(cv>>8, hashLog) & tableMask - nextHash = nextHash & tableMask + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) candidate := e.table[nextHash] candidate2 := e.table[nextHash2] repIndex := s - offset1 + 2 @@ -207,8 +205,6 @@ encodeLoop: break encodeLoop } cv = load6432(src, s) - //nextHash = hashLen(cv, hashLog, mls) - nextHash = hash6(cv, hashLog) continue } coffset0 := s - (candidate.offset - e.cur) @@ -245,7 +241,6 @@ encodeLoop: break encodeLoop } cv = load6432(src, s) - nextHash = hash6(cv, hashLog) } // A 4-byte match has been found. We'll later see if more than 4 bytes. offset2 = offset1 @@ -292,15 +287,16 @@ encodeLoop: break encodeLoop } cv = load6432(src, s) - nextHash = hash6(cv, hashLog) // Check offset 2 - if o2 := s - offset2; canRepeat && o2 > 0 && load3232(src, o2) == uint32(cv) { + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { // We have at least 4 byte match. // No need to check backwards. We come straight from a match l := 4 + e.matchlen(s+4, o2+4, src) + // Store this, since we have it. - e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: uint32(cv)} + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} seq.matchLen = uint32(l) - zstdMinMatch seq.litLen = 0 // Since litlen is always 0, this is offset 1. @@ -319,7 +315,6 @@ encodeLoop: } // Prepare next loop. cv = load6432(src, s) - nextHash = hash6(cv, hashLog) } } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index ed028f5a79..b7011be29a 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -211,6 +211,7 @@ func (e *Encoder) nextBlock(final bool) error { s.wWg.Wait() _, s.err = s.w.Write(blk.output) s.nWritten += int64(len(blk.output)) + s.eofWritten = true } return s.err } @@ -256,7 +257,12 @@ func (e *Encoder) nextBlock(final bool) error { } s.wWg.Done() }() - err := blk.encode() + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode() + } switch err { case errIncompressible: if debug { @@ -443,7 +449,13 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { if len(src) == 0 { blk.last = true } - err := blk.encode() + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { + err = blk.encode() + } + switch err { case errIncompressible: if debug { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 6e210c4a05..a8559e900b 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -6,7 +6,7 @@ import ( "strings" ) -// DOption is an option for creating a encoder. +// EOption is an option for creating a encoder. type EOption func(*encoderOptions) error // options retains accumulated state of multiple options. diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 8fa264fc24..839a95fbff 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -39,6 +39,9 @@ type frameDec struct { rawInput byteBuffer + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + // asyncRunning indicates whether the async routine processes input on 'decoding'. asyncRunning bool asyncRunningMu sync.Mutex @@ -59,6 +62,9 @@ func newFrameDec(o decoderOptions) *frameDec { o: o, maxWindowSize: 1 << 30, } + if d.maxWindowSize > o.maxDecodedSize { + d.maxWindowSize = o.maxDecodedSize + } return &d } @@ -232,7 +238,9 @@ func (d *frameDec) reset(br byteBuffer) error { // next will start decoding the next block from stream. func (d *frameDec) next(block *blockDec) error { - println("decoding new block") + if debug { + printf("decoding new block %p:%p", block, block.data) + } err := block.reset(d.rawInput, d.WindowSize) if err != nil { println("block error:", err) @@ -280,13 +288,13 @@ func (d *frameDec) checkCRC() error { if !d.HasCheckSum { return nil } - var tmp [8]byte - gotB := d.crc.Sum(tmp[:0]) + var tmp [4]byte + got := d.crc.Sum64() // Flip to match file order. - gotB[0] = gotB[7] - gotB[1] = gotB[6] - gotB[2] = gotB[5] - gotB[3] = gotB[4] + tmp[0] = byte(got >> 0) + tmp[1] = byte(got >> 8) + tmp[2] = byte(got >> 16) + tmp[3] = byte(got >> 24) // We can overwrite upper tmp now want := d.rawInput.readSmall(4) @@ -295,8 +303,10 @@ func (d *frameDec) checkCRC() error { return io.ErrUnexpectedEOF } - if !bytes.Equal(gotB[:4], want) { - println("CRC Check Failed:", gotB[:4], "!=", want) + if !bytes.Equal(tmp[:], want) { + if debug { + println("CRC Check Failed:", tmp[:], "!=", want) + } return ErrCRCMismatch } println("CRC ok") @@ -423,7 +433,7 @@ func (d *frameDec) startDecoder(output chan decodeOutput) { } } -// runDecoder will create a sync decoder that will decodeAsync a block of data. +// runDecoder will create a sync decoder that will decode a block of data. func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { // TODO: Init to dictionary d.history.reset() diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go index a86d00bc35..9efe34feb3 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -184,29 +184,75 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { // decSymbol contains information about a state entry, // Including the state offset base, the output symbol and // the number of bits to read for the low part of the destination state. -type decSymbol struct { - newState uint16 - addBits uint8 // Used for symbols until transformed. - nbBits uint8 - baseline uint32 +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baseline() uint32 { + return uint32(d >> 32) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { + *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setBaseline(baseline uint32) { + const mask = 0xffffffff + *d = (*d & mask) | decSymbol(baseline)<<32 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) } // decSymbolValue returns the transformed decSymbol for the given symbol. func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { if int(symb) >= len(t) { - return decSymbol{}, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) } lu := t[symb] - return decSymbol{ - addBits: lu.addBits, - baseline: lu.baseLine, - }, nil + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil } // setRLE will set the decoder til RLE mode. func (s *fseDecoder) setRLE(symbol decSymbol) { s.actualTableLog = 0 - s.maxBits = symbol.addBits + s.maxBits = symbol.addBits() s.dt[0] = symbol } @@ -220,7 +266,7 @@ func (s *fseDecoder) buildDtable() error { { for i, v := range s.norm[:s.symbolLen] { if v == -1 { - s.dt[highThreshold].addBits = uint8(i) + s.dt[highThreshold].setAddBits(uint8(i)) highThreshold-- symbolNext[i] = 1 } else { @@ -235,7 +281,7 @@ func (s *fseDecoder) buildDtable() error { position := uint32(0) for ss, v := range s.norm[:s.symbolLen] { for i := 0; i < int(v); i++ { - s.dt[position].addBits = uint8(ss) + s.dt[position].setAddBits(uint8(ss)) position = (position + step) & tableMask for position > highThreshold { // lowprob area @@ -253,11 +299,11 @@ func (s *fseDecoder) buildDtable() error { { tableSize := uint16(1 << s.actualTableLog) for u, v := range s.dt[:tableSize] { - symbol := v.addBits + symbol := v.addBits() nextState := symbolNext[symbol] symbolNext[symbol] = nextState + 1 nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].nbBits = nBits + s.dt[u&maxTableMask].setNBits(nBits) newState := (nextState << nBits) - tableSize if newState > tableSize { return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) @@ -266,7 +312,7 @@ func (s *fseDecoder) buildDtable() error { // Seems weird that this is possible with nbits > 0. return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) } - s.dt[u&maxTableMask].newState = newState + s.dt[u&maxTableMask].setNewState(newState) } } return nil @@ -279,25 +325,21 @@ func (s *fseDecoder) transform(t []baseOffset) error { tableSize := uint16(1 << s.actualTableLog) s.maxBits = 0 for i, v := range s.dt[:tableSize] { - if int(v.addBits) >= len(t) { - return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits, len(t)) + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) } - lu := t[v.addBits] + lu := t[add] if lu.addBits > s.maxBits { s.maxBits = lu.addBits } - s.dt[i&maxTableMask] = decSymbol{ - newState: v.newState, - nbBits: v.nbBits, - addBits: lu.addBits, - baseline: lu.baseLine, - } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v } return nil } type fseState struct { - // TODO: Check if *[1 << maxTablelog]decSymbol is faster. dt []decSymbol state decSymbol } @@ -312,26 +354,31 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { // next returns the current symbol and sets the next state. // At least tablelog bits must be available in the bit reader. func (s *fseState) next(br *bitReader) { - lowBits := uint16(br.getBits(s.state.nbBits)) - s.state = s.dt[s.state.newState+lowBits] + lowBits := uint16(br.getBits(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] } // finished returns true if all bits have been read from the bitstream // and the next state would require reading bits from the input. func (s *fseState) finished(br *bitReader) bool { - return br.finished() && s.state.nbBits > 0 + return br.finished() && s.state.nbBits() > 0 } // final returns the current state symbol without decoding the next. func (s *fseState) final() (int, uint8) { - return int(s.state.baseline), s.state.addBits + return s.state.baselineInt(), s.state.addBits() +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() } // nextFast returns the next symbol and sets the next state. // This can only be used if no symbols are 0 bits. // At least tablelog bits must be available in the bit reader. func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { - lowBits := uint16(br.getBitsFast(s.state.nbBits)) - s.state = s.dt[s.state.newState+lowBits] - return s.state.baseline, s.state.addBits + lowBits := uint16(br.getBitsFast(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] + return s.state.baseline(), s.state.addBits() } diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go index 819d87f881..4a752067fc 100644 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -64,7 +64,7 @@ func hash6(u uint64, h uint8) uint32 { return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) } -// hash6 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. // Preferably h should be a constant and should always be <64. func hash7(u uint64, h uint8) uint32 { return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index cef69e35b5..15a45f7b50 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -89,6 +89,10 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] // decode sequences from the stream with the provided history. func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + for i := seqs - 1; i >= 0; i-- { if br.overread() { printf("reading sequence %d, exceeded available data\n", seqs-i) @@ -96,10 +100,10 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } var litLen, matchOff, matchLen int if br.off > 4+((maxOffsetBits+16+16)>>3) { - litLen, matchOff, matchLen = s.nextFast(br) + litLen, matchOff, matchLen = s.nextFast(br, llState, mlState, ofState) br.fillFast() } else { - litLen, matchOff, matchLen = s.next(br) + litLen, matchOff, matchLen = s.next(br, llState, mlState, ofState) br.fill() } @@ -175,30 +179,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { // This is the last sequence, so we shouldn't update state. break } - if true { - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - - nBits := a.nbBits + b.nbBits + c.nbBits - if nBits == 0 { - s.litLengths.state.state = s.litLengths.state.dt[a.newState] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState] - s.offsets.state.state = s.offsets.state.dt[c.newState] - } else { - bits := br.getBitsFast(nBits) - lowBits := uint16(bits >> ((c.nbBits + b.nbBits) & 31)) - s.litLengths.state.state = s.litLengths.state.dt[a.newState+lowBits] - - lowBits = uint16(bits >> (c.nbBits & 31)) - lowBits &= bitMask[b.nbBits&15] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState+lowBits] - lowBits = uint16(bits) & bitMask[c.nbBits&15] - s.offsets.state.state = s.offsets.state.dt[c.newState+lowBits] - } + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] } else { - s.updateAlt(br) + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] } } @@ -230,55 +229,49 @@ func (s *sequenceDecs) updateAlt(br *bitReader) { // Update all 3 states at once. Approx 20% faster. a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - nBits := a.nbBits + b.nbBits + c.nbBits + nBits := a.nbBits() + b.nbBits() + c.nbBits() if nBits == 0 { - s.litLengths.state.state = s.litLengths.state.dt[a.newState] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState] - s.offsets.state.state = s.offsets.state.dt[c.newState] + s.litLengths.state.state = s.litLengths.state.dt[a.newState()] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] + s.offsets.state.state = s.offsets.state.dt[c.newState()] return } bits := br.getBitsFast(nBits) - lowBits := uint16(bits >> ((c.nbBits + b.nbBits) & 31)) - s.litLengths.state.state = s.litLengths.state.dt[a.newState+lowBits] + lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) + s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] - lowBits = uint16(bits >> (c.nbBits & 31)) - lowBits &= bitMask[b.nbBits&15] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState+lowBits] + lowBits = uint16(bits >> (c.nbBits() & 31)) + lowBits &= bitMask[b.nbBits()&15] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] - lowBits = uint16(bits) & bitMask[c.nbBits&15] - s.offsets.state.state = s.offsets.state.dt[c.newState+lowBits] + lowBits = uint16(bits) & bitMask[c.nbBits()&15] + s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] } // nextFast will return new states when there are at least 4 unused bytes left on the stream when done. -func (s *sequenceDecs) nextFast(br *bitReader) (ll, mo, ml int) { +func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { // Final will not read from stream. - ll, llB := s.litLengths.state.final() - ml, mlB := s.matchLengths.state.final() - mo, moB := s.offsets.state.final() + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() // extra bits are stored in reverse order. br.fillFast() - if s.maxBits <= 32 { - mo += br.getBits(moB) - ml += br.getBits(mlB) - ll += br.getBits(llB) - } else { - mo += br.getBits(moB) + mo += br.getBits(moB) + if s.maxBits > 32 { br.fillFast() - // matchlength+literal length, max 32 bits - ml += br.getBits(mlB) - ll += br.getBits(llB) } + ml += br.getBits(mlB) + ll += br.getBits(llB) - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup if moB > 1 { s.prevOffset[2] = s.prevOffset[1] s.prevOffset[1] = s.prevOffset[0] s.prevOffset[0] = mo return } - + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup if ll == 0 { // There is an exception though, when current sequence's literals_length = 0. // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, @@ -312,11 +305,11 @@ func (s *sequenceDecs) nextFast(br *bitReader) (ll, mo, ml int) { return } -func (s *sequenceDecs) next(br *bitReader) (ll, mo, ml int) { +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { // Final will not read from stream. - ll, llB := s.litLengths.state.final() - ml, mlB := s.matchLengths.state.final() - mo, moB := s.offsets.state.final() + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() // extra bits are stored in reverse order. br.fill() diff --git a/vendor/github.com/mattn/go-shellwords/.travis.yml b/vendor/github.com/mattn/go-shellwords/.travis.yml index 16d1430aa2..6294d337f6 100644 --- a/vendor/github.com/mattn/go-shellwords/.travis.yml +++ b/vendor/github.com/mattn/go-shellwords/.travis.yml @@ -1,8 +1,14 @@ language: go +sudo: false go: - tip + before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover + - go get -t -v ./... + script: - - $HOME/gopath/bin/goveralls -repotoken 2FMhp57u8LcstKL9B190fLTcEnBtAAiEL + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) + diff --git a/vendor/github.com/mattn/go-shellwords/README.md b/vendor/github.com/mattn/go-shellwords/README.md index b1d235c78d..9e1e650457 100644 --- a/vendor/github.com/mattn/go-shellwords/README.md +++ b/vendor/github.com/mattn/go-shellwords/README.md @@ -1,6 +1,6 @@ # go-shellwords -[![Coverage Status](https://coveralls.io/repos/mattn/go-shellwords/badge.png?branch=master)](https://coveralls.io/r/mattn/go-shellwords?branch=master) +[![codecov](https://codecov.io/gh/mattn/go-shellwords/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-shellwords) [![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords) Parse line as shell words. diff --git a/vendor/github.com/mattn/go-shellwords/go.test.sh b/vendor/github.com/mattn/go-shellwords/go.test.sh new file mode 100644 index 0000000000..a7deaca96a --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-shellwords/shellwords.go b/vendor/github.com/mattn/go-shellwords/shellwords.go index 41429d8f26..2dca7f1361 100644 --- a/vendor/github.com/mattn/go-shellwords/shellwords.go +++ b/vendor/github.com/mattn/go-shellwords/shellwords.go @@ -40,6 +40,7 @@ type Parser struct { ParseEnv bool ParseBacktick bool Position int + Dir string // If ParseEnv is true, use this for getenv. // If nil, use os.Getenv. @@ -51,6 +52,7 @@ func NewParser() *Parser { ParseEnv: ParseEnv, ParseBacktick: ParseBacktick, Position: 0, + Dir: "", } } @@ -100,11 +102,11 @@ loop: if !singleQuoted && !doubleQuoted && !dollarQuote { if p.ParseBacktick { if backQuote { - out, err := shellRun(backtick) + out, err := shellRun(backtick, p.Dir) if err != nil { return nil, err } - buf = out + buf = buf[:len(buf)-len(backtick)] + out } backtick = "" backQuote = !backQuote @@ -117,15 +119,11 @@ loop: if !singleQuoted && !doubleQuoted && !backQuote { if p.ParseBacktick { if dollarQuote { - out, err := shellRun(backtick) + out, err := shellRun(backtick, p.Dir) if err != nil { return nil, err } - if r == ')' { - buf = buf[:len(buf)-len(backtick)-2] + out - } else { - buf = buf[:len(buf)-len(backtick)-1] + out - } + buf = buf[:len(buf)-len(backtick)-2] + out } backtick = "" dollarQuote = !dollarQuote @@ -155,7 +153,7 @@ loop: continue } case ';', '&', '|', '<', '>': - if !(escaped || singleQuoted || doubleQuoted || backQuote) { + if !(escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote) { if r == '>' && len(buf) > 0 { if c := buf[0]; '0' <= c && c <= '9' { i -= 1 diff --git a/vendor/github.com/mattn/go-shellwords/util_go15.go b/vendor/github.com/mattn/go-shellwords/util_go15.go index 180f00f0bd..ddcbf229e6 100644 --- a/vendor/github.com/mattn/go-shellwords/util_go15.go +++ b/vendor/github.com/mattn/go-shellwords/util_go15.go @@ -9,14 +9,19 @@ import ( "strings" ) -func shellRun(line string) (string, error) { +func shellRun(line, dir string) (string, error) { var b []byte var err error + var cmd *exec.Cmd if runtime.GOOS == "windows" { - b, err = exec.Command(os.Getenv("COMSPEC"), "/c", line).Output() + cmd = exec.Command(os.Getenv("COMSPEC"), "/c", line) } else { - b, err = exec.Command(os.Getenv("SHELL"), "-c", line).Output() + cmd = exec.Command(os.Getenv("SHELL"), "-c", line) } + if dir != "" { + cmd.Dir = dir + } + b, err = cmd.Output() if err != nil { return "", err } diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go index eaf1011d60..3aef2c4d79 100644 --- a/vendor/github.com/mattn/go-shellwords/util_posix.go +++ b/vendor/github.com/mattn/go-shellwords/util_posix.go @@ -9,9 +9,13 @@ import ( "strings" ) -func shellRun(line string) (string, error) { +func shellRun(line, dir string) (string, error) { shell := os.Getenv("SHELL") - b, err := exec.Command(shell, "-c", line).Output() + cmd := exec.Command(shell, "-c", line) + if dir != "" { + cmd.Dir = dir + } + b, err := cmd.Output() if err != nil { if eerr, ok := err.(*exec.ExitError); ok { b = eerr.Stderr diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go index e46f89a1fe..cda6850910 100644 --- a/vendor/github.com/mattn/go-shellwords/util_windows.go +++ b/vendor/github.com/mattn/go-shellwords/util_windows.go @@ -9,9 +9,13 @@ import ( "strings" ) -func shellRun(line string) (string, error) { +func shellRun(line, dir string) (string, error) { shell := os.Getenv("COMSPEC") - b, err := exec.Command(shell, "/c", line).Output() + cmd := exec.Command(shell, "/c", line) + if dir != "" { + cmd.Dir = dir + } + b, err := cmd.Output() if err != nil { if eerr, ok := err.(*exec.ExitError); ok { b = eerr.Stderr diff --git a/vendor/github.com/morikuni/aec/LICENSE b/vendor/github.com/morikuni/aec/LICENSE new file mode 100644 index 0000000000..1c26401641 --- /dev/null +++ b/vendor/github.com/morikuni/aec/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Taihei Morikuni + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/morikuni/aec/README.md b/vendor/github.com/morikuni/aec/README.md new file mode 100644 index 0000000000..3cbc4343ee --- /dev/null +++ b/vendor/github.com/morikuni/aec/README.md @@ -0,0 +1,178 @@ +# aec + +[![GoDoc](https://godoc.org/github.com/morikuni/aec?status.svg)](https://godoc.org/github.com/morikuni/aec) + +Go wrapper for ANSI escape code. + +## Install + +```bash +go get github.com/morikuni/aec +``` + +## Features + +ANSI escape codes depend on terminal environment. +Some of these features may not work. +Check supported Font-Style/Font-Color features with [checkansi](./checkansi). + +[Wikipedia](https://en.wikipedia.org/wiki/ANSI_escape_code) for more detail. + +### Cursor + +- `Up(n)` +- `Down(n)` +- `Right(n)` +- `Left(n)` +- `NextLine(n)` +- `PreviousLine(n)` +- `Column(col)` +- `Position(row, col)` +- `Save` +- `Restore` +- `Hide` +- `Show` +- `Report` + +### Erase + +- `EraseDisplay(mode)` +- `EraseLine(mode)` + +### Scroll + +- `ScrollUp(n)` +- `ScrollDown(n)` + +### Font Style + +- `Bold` +- `Faint` +- `Italic` +- `Underline` +- `BlinkSlow` +- `BlinkRapid` +- `Inverse` +- `Conceal` +- `CrossOut` +- `Frame` +- `Encircle` +- `Overline` + +### Font Color + +Foreground color. + +- `DefaultF` +- `BlackF` +- `RedF` +- `GreenF` +- `YellowF` +- `BlueF` +- `MagentaF` +- `CyanF` +- `WhiteF` +- `LightBlackF` +- `LightRedF` +- `LightGreenF` +- `LightYellowF` +- `LightBlueF` +- `LightMagentaF` +- `LightCyanF` +- `LightWhiteF` +- `Color3BitF(color)` +- `Color8BitF(color)` +- `FullColorF(r, g, b)` + +Background color. + +- `DefaultB` +- `BlackB` +- `RedB` +- `GreenB` +- `YellowB` +- `BlueB` +- `MagentaB` +- `CyanB` +- `WhiteB` +- `LightBlackB` +- `LightRedB` +- `LightGreenB` +- `LightYellowB` +- `LightBlueB` +- `LightMagentaB` +- `LightCyanB` +- `LightWhiteB` +- `Color3BitB(color)` +- `Color8BitB(color)` +- `FullColorB(r, g, b)` + +### Color Converter + +24bit RGB color to ANSI color. + +- `NewRGB3Bit(r, g, b)` +- `NewRGB8Bit(r, g, b)` + +### Builder + +To mix these features. + +```go +custom := aec.EmptyBuilder.Right(2).RGB8BitF(128, 255, 64).RedB().ANSI +custom.Apply("Hello World") +``` + +## Usage + +1. Create ANSI by `aec.XXX().With(aec.YYY())` or `aec.EmptyBuilder.XXX().YYY().ANSI` +2. Print ANSI by `fmt.Print(ansi, "some string", aec.Reset)` or `fmt.Print(ansi.Apply("some string"))` + +`aec.Reset` should be added when using font style or font color features. + +## Example + +Simple progressbar. + +![sample](./sample.gif) + +```go +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/morikuni/aec" +) + +func main() { + const n = 20 + builder := aec.EmptyBuilder + + up2 := aec.Up(2) + col := aec.Column(n + 2) + bar := aec.Color8BitF(aec.NewRGB8Bit(64, 255, 64)) + label := builder.LightRedF().Underline().With(col).Right(1).ANSI + + // for up2 + fmt.Println() + fmt.Println() + + for i := 0; i <= n; i++ { + fmt.Print(up2) + fmt.Println(label.Apply(fmt.Sprint(i, "/", n))) + fmt.Print("[") + fmt.Print(bar.Apply(strings.Repeat("=", i))) + fmt.Println(col.Apply("]")) + time.Sleep(100 * time.Millisecond) + } +} +``` + +## License + +[MIT](./LICENSE) + + diff --git a/vendor/github.com/morikuni/aec/aec.go b/vendor/github.com/morikuni/aec/aec.go new file mode 100644 index 0000000000..566be6eb1e --- /dev/null +++ b/vendor/github.com/morikuni/aec/aec.go @@ -0,0 +1,137 @@ +package aec + +import "fmt" + +// EraseMode is listed in a variable EraseModes. +type EraseMode uint + +var ( + // EraseModes is a list of EraseMode. + EraseModes struct { + // All erase all. + All EraseMode + + // Head erase to head. + Head EraseMode + + // Tail erase to tail. + Tail EraseMode + } + + // Save saves the cursor position. + Save ANSI + + // Restore restores the cursor position. + Restore ANSI + + // Hide hides the cursor. + Hide ANSI + + // Show shows the cursor. + Show ANSI + + // Report reports the cursor position. + Report ANSI +) + +// Up moves up the cursor. +func Up(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dA", n)) +} + +// Down moves down the cursor. +func Down(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dB", n)) +} + +// Right moves right the cursor. +func Right(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dC", n)) +} + +// Left moves left the cursor. +func Left(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dD", n)) +} + +// NextLine moves down the cursor to head of a line. +func NextLine(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dE", n)) +} + +// PreviousLine moves up the cursor to head of a line. +func PreviousLine(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dF", n)) +} + +// Column set the cursor position to a given column. +func Column(col uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dG", col)) +} + +// Position set the cursor position to a given absolute position. +func Position(row, col uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%d;%dH", row, col)) +} + +// EraseDisplay erases display by given EraseMode. +func EraseDisplay(m EraseMode) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dJ", m)) +} + +// EraseLine erases lines by given EraseMode. +func EraseLine(m EraseMode) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dK", m)) +} + +// ScrollUp scrolls up the page. +func ScrollUp(n int) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dS", n)) +} + +// ScrollDown scrolls down the page. +func ScrollDown(n int) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dT", n)) +} + +func init() { + EraseModes = struct { + All EraseMode + Head EraseMode + Tail EraseMode + }{ + Tail: 0, + Head: 1, + All: 2, + } + + Save = newAnsi(esc + "s") + Restore = newAnsi(esc + "u") + Hide = newAnsi(esc + "?25l") + Show = newAnsi(esc + "?25h") + Report = newAnsi(esc + "6n") +} diff --git a/vendor/github.com/morikuni/aec/ansi.go b/vendor/github.com/morikuni/aec/ansi.go new file mode 100644 index 0000000000..e60722e6e6 --- /dev/null +++ b/vendor/github.com/morikuni/aec/ansi.go @@ -0,0 +1,59 @@ +package aec + +import ( + "fmt" + "strings" +) + +const esc = "\x1b[" + +// Reset resets SGR effect. +const Reset string = "\x1b[0m" + +var empty = newAnsi("") + +// ANSI represents ANSI escape code. +type ANSI interface { + fmt.Stringer + + // With adapts given ANSIs. + With(...ANSI) ANSI + + // Apply wraps given string in ANSI. + Apply(string) string +} + +type ansiImpl string + +func newAnsi(s string) *ansiImpl { + r := ansiImpl(s) + return &r +} + +func (a *ansiImpl) With(ansi ...ANSI) ANSI { + return concat(append([]ANSI{a}, ansi...)) +} + +func (a *ansiImpl) Apply(s string) string { + return a.String() + s + Reset +} + +func (a *ansiImpl) String() string { + return string(*a) +} + +// Apply wraps given string in ANSIs. +func Apply(s string, ansi ...ANSI) string { + if len(ansi) == 0 { + return s + } + return concat(ansi).Apply(s) +} + +func concat(ansi []ANSI) ANSI { + strs := make([]string, 0, len(ansi)) + for _, p := range ansi { + strs = append(strs, p.String()) + } + return newAnsi(strings.Join(strs, "")) +} diff --git a/vendor/github.com/morikuni/aec/builder.go b/vendor/github.com/morikuni/aec/builder.go new file mode 100644 index 0000000000..13bd002d4e --- /dev/null +++ b/vendor/github.com/morikuni/aec/builder.go @@ -0,0 +1,388 @@ +package aec + +// Builder is a lightweight syntax to construct customized ANSI. +type Builder struct { + ANSI ANSI +} + +// EmptyBuilder is an initialized Builder. +var EmptyBuilder *Builder + +// NewBuilder creates a Builder from existing ANSI. +func NewBuilder(a ...ANSI) *Builder { + return &Builder{concat(a)} +} + +// With is a syntax for With. +func (builder *Builder) With(a ...ANSI) *Builder { + return NewBuilder(builder.ANSI.With(a...)) +} + +// Up is a syntax for Up. +func (builder *Builder) Up(n uint) *Builder { + return builder.With(Up(n)) +} + +// Down is a syntax for Down. +func (builder *Builder) Down(n uint) *Builder { + return builder.With(Down(n)) +} + +// Right is a syntax for Right. +func (builder *Builder) Right(n uint) *Builder { + return builder.With(Right(n)) +} + +// Left is a syntax for Left. +func (builder *Builder) Left(n uint) *Builder { + return builder.With(Left(n)) +} + +// NextLine is a syntax for NextLine. +func (builder *Builder) NextLine(n uint) *Builder { + return builder.With(NextLine(n)) +} + +// PreviousLine is a syntax for PreviousLine. +func (builder *Builder) PreviousLine(n uint) *Builder { + return builder.With(PreviousLine(n)) +} + +// Column is a syntax for Column. +func (builder *Builder) Column(col uint) *Builder { + return builder.With(Column(col)) +} + +// Position is a syntax for Position. +func (builder *Builder) Position(row, col uint) *Builder { + return builder.With(Position(row, col)) +} + +// EraseDisplay is a syntax for EraseDisplay. +func (builder *Builder) EraseDisplay(m EraseMode) *Builder { + return builder.With(EraseDisplay(m)) +} + +// EraseLine is a syntax for EraseLine. +func (builder *Builder) EraseLine(m EraseMode) *Builder { + return builder.With(EraseLine(m)) +} + +// ScrollUp is a syntax for ScrollUp. +func (builder *Builder) ScrollUp(n int) *Builder { + return builder.With(ScrollUp(n)) +} + +// ScrollDown is a syntax for ScrollDown. +func (builder *Builder) ScrollDown(n int) *Builder { + return builder.With(ScrollDown(n)) +} + +// Save is a syntax for Save. +func (builder *Builder) Save() *Builder { + return builder.With(Save) +} + +// Restore is a syntax for Restore. +func (builder *Builder) Restore() *Builder { + return builder.With(Restore) +} + +// Hide is a syntax for Hide. +func (builder *Builder) Hide() *Builder { + return builder.With(Hide) +} + +// Show is a syntax for Show. +func (builder *Builder) Show() *Builder { + return builder.With(Show) +} + +// Report is a syntax for Report. +func (builder *Builder) Report() *Builder { + return builder.With(Report) +} + +// Bold is a syntax for Bold. +func (builder *Builder) Bold() *Builder { + return builder.With(Bold) +} + +// Faint is a syntax for Faint. +func (builder *Builder) Faint() *Builder { + return builder.With(Faint) +} + +// Italic is a syntax for Italic. +func (builder *Builder) Italic() *Builder { + return builder.With(Italic) +} + +// Underline is a syntax for Underline. +func (builder *Builder) Underline() *Builder { + return builder.With(Underline) +} + +// BlinkSlow is a syntax for BlinkSlow. +func (builder *Builder) BlinkSlow() *Builder { + return builder.With(BlinkSlow) +} + +// BlinkRapid is a syntax for BlinkRapid. +func (builder *Builder) BlinkRapid() *Builder { + return builder.With(BlinkRapid) +} + +// Inverse is a syntax for Inverse. +func (builder *Builder) Inverse() *Builder { + return builder.With(Inverse) +} + +// Conceal is a syntax for Conceal. +func (builder *Builder) Conceal() *Builder { + return builder.With(Conceal) +} + +// CrossOut is a syntax for CrossOut. +func (builder *Builder) CrossOut() *Builder { + return builder.With(CrossOut) +} + +// BlackF is a syntax for BlackF. +func (builder *Builder) BlackF() *Builder { + return builder.With(BlackF) +} + +// RedF is a syntax for RedF. +func (builder *Builder) RedF() *Builder { + return builder.With(RedF) +} + +// GreenF is a syntax for GreenF. +func (builder *Builder) GreenF() *Builder { + return builder.With(GreenF) +} + +// YellowF is a syntax for YellowF. +func (builder *Builder) YellowF() *Builder { + return builder.With(YellowF) +} + +// BlueF is a syntax for BlueF. +func (builder *Builder) BlueF() *Builder { + return builder.With(BlueF) +} + +// MagentaF is a syntax for MagentaF. +func (builder *Builder) MagentaF() *Builder { + return builder.With(MagentaF) +} + +// CyanF is a syntax for CyanF. +func (builder *Builder) CyanF() *Builder { + return builder.With(CyanF) +} + +// WhiteF is a syntax for WhiteF. +func (builder *Builder) WhiteF() *Builder { + return builder.With(WhiteF) +} + +// DefaultF is a syntax for DefaultF. +func (builder *Builder) DefaultF() *Builder { + return builder.With(DefaultF) +} + +// BlackB is a syntax for BlackB. +func (builder *Builder) BlackB() *Builder { + return builder.With(BlackB) +} + +// RedB is a syntax for RedB. +func (builder *Builder) RedB() *Builder { + return builder.With(RedB) +} + +// GreenB is a syntax for GreenB. +func (builder *Builder) GreenB() *Builder { + return builder.With(GreenB) +} + +// YellowB is a syntax for YellowB. +func (builder *Builder) YellowB() *Builder { + return builder.With(YellowB) +} + +// BlueB is a syntax for BlueB. +func (builder *Builder) BlueB() *Builder { + return builder.With(BlueB) +} + +// MagentaB is a syntax for MagentaB. +func (builder *Builder) MagentaB() *Builder { + return builder.With(MagentaB) +} + +// CyanB is a syntax for CyanB. +func (builder *Builder) CyanB() *Builder { + return builder.With(CyanB) +} + +// WhiteB is a syntax for WhiteB. +func (builder *Builder) WhiteB() *Builder { + return builder.With(WhiteB) +} + +// DefaultB is a syntax for DefaultB. +func (builder *Builder) DefaultB() *Builder { + return builder.With(DefaultB) +} + +// Frame is a syntax for Frame. +func (builder *Builder) Frame() *Builder { + return builder.With(Frame) +} + +// Encircle is a syntax for Encircle. +func (builder *Builder) Encircle() *Builder { + return builder.With(Encircle) +} + +// Overline is a syntax for Overline. +func (builder *Builder) Overline() *Builder { + return builder.With(Overline) +} + +// LightBlackF is a syntax for LightBlueF. +func (builder *Builder) LightBlackF() *Builder { + return builder.With(LightBlackF) +} + +// LightRedF is a syntax for LightRedF. +func (builder *Builder) LightRedF() *Builder { + return builder.With(LightRedF) +} + +// LightGreenF is a syntax for LightGreenF. +func (builder *Builder) LightGreenF() *Builder { + return builder.With(LightGreenF) +} + +// LightYellowF is a syntax for LightYellowF. +func (builder *Builder) LightYellowF() *Builder { + return builder.With(LightYellowF) +} + +// LightBlueF is a syntax for LightBlueF. +func (builder *Builder) LightBlueF() *Builder { + return builder.With(LightBlueF) +} + +// LightMagentaF is a syntax for LightMagentaF. +func (builder *Builder) LightMagentaF() *Builder { + return builder.With(LightMagentaF) +} + +// LightCyanF is a syntax for LightCyanF. +func (builder *Builder) LightCyanF() *Builder { + return builder.With(LightCyanF) +} + +// LightWhiteF is a syntax for LightWhiteF. +func (builder *Builder) LightWhiteF() *Builder { + return builder.With(LightWhiteF) +} + +// LightBlackB is a syntax for LightBlackB. +func (builder *Builder) LightBlackB() *Builder { + return builder.With(LightBlackB) +} + +// LightRedB is a syntax for LightRedB. +func (builder *Builder) LightRedB() *Builder { + return builder.With(LightRedB) +} + +// LightGreenB is a syntax for LightGreenB. +func (builder *Builder) LightGreenB() *Builder { + return builder.With(LightGreenB) +} + +// LightYellowB is a syntax for LightYellowB. +func (builder *Builder) LightYellowB() *Builder { + return builder.With(LightYellowB) +} + +// LightBlueB is a syntax for LightBlueB. +func (builder *Builder) LightBlueB() *Builder { + return builder.With(LightBlueB) +} + +// LightMagentaB is a syntax for LightMagentaB. +func (builder *Builder) LightMagentaB() *Builder { + return builder.With(LightMagentaB) +} + +// LightCyanB is a syntax for LightCyanB. +func (builder *Builder) LightCyanB() *Builder { + return builder.With(LightCyanB) +} + +// LightWhiteB is a syntax for LightWhiteB. +func (builder *Builder) LightWhiteB() *Builder { + return builder.With(LightWhiteB) +} + +// Color3BitF is a syntax for Color3BitF. +func (builder *Builder) Color3BitF(c RGB3Bit) *Builder { + return builder.With(Color3BitF(c)) +} + +// Color3BitB is a syntax for Color3BitB. +func (builder *Builder) Color3BitB(c RGB3Bit) *Builder { + return builder.With(Color3BitB(c)) +} + +// Color8BitF is a syntax for Color8BitF. +func (builder *Builder) Color8BitF(c RGB8Bit) *Builder { + return builder.With(Color8BitF(c)) +} + +// Color8BitB is a syntax for Color8BitB. +func (builder *Builder) Color8BitB(c RGB8Bit) *Builder { + return builder.With(Color8BitB(c)) +} + +// FullColorF is a syntax for FullColorF. +func (builder *Builder) FullColorF(r, g, b uint8) *Builder { + return builder.With(FullColorF(r, g, b)) +} + +// FullColorB is a syntax for FullColorB. +func (builder *Builder) FullColorB(r, g, b uint8) *Builder { + return builder.With(FullColorB(r, g, b)) +} + +// RGB3BitF is a syntax for Color3BitF with NewRGB3Bit. +func (builder *Builder) RGB3BitF(r, g, b uint8) *Builder { + return builder.Color3BitF(NewRGB3Bit(r, g, b)) +} + +// RGB3BitB is a syntax for Color3BitB with NewRGB3Bit. +func (builder *Builder) RGB3BitB(r, g, b uint8) *Builder { + return builder.Color3BitB(NewRGB3Bit(r, g, b)) +} + +// RGB8BitF is a syntax for Color8BitF with NewRGB8Bit. +func (builder *Builder) RGB8BitF(r, g, b uint8) *Builder { + return builder.Color8BitF(NewRGB8Bit(r, g, b)) +} + +// RGB8BitB is a syntax for Color8BitB with NewRGB8Bit. +func (builder *Builder) RGB8BitB(r, g, b uint8) *Builder { + return builder.Color8BitB(NewRGB8Bit(r, g, b)) +} + +func init() { + EmptyBuilder = &Builder{empty} +} diff --git a/vendor/github.com/morikuni/aec/sample.gif b/vendor/github.com/morikuni/aec/sample.gif new file mode 100644 index 0000000000..c6c613bb70 Binary files /dev/null and b/vendor/github.com/morikuni/aec/sample.gif differ diff --git a/vendor/github.com/morikuni/aec/sgr.go b/vendor/github.com/morikuni/aec/sgr.go new file mode 100644 index 0000000000..0ba3464e6d --- /dev/null +++ b/vendor/github.com/morikuni/aec/sgr.go @@ -0,0 +1,202 @@ +package aec + +import ( + "fmt" +) + +// RGB3Bit is a 3bit RGB color. +type RGB3Bit uint8 + +// RGB8Bit is a 8bit RGB color. +type RGB8Bit uint8 + +func newSGR(n uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", n)) +} + +// NewRGB3Bit create a RGB3Bit from given RGB. +func NewRGB3Bit(r, g, b uint8) RGB3Bit { + return RGB3Bit((r >> 7) | ((g >> 6) & 0x2) | ((b >> 5) & 0x4)) +} + +// NewRGB8Bit create a RGB8Bit from given RGB. +func NewRGB8Bit(r, g, b uint8) RGB8Bit { + return RGB8Bit(16 + 36*(r/43) + 6*(g/43) + b/43) +} + +// Color3BitF set the foreground color of text. +func Color3BitF(c RGB3Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", c+30)) +} + +// Color3BitB set the background color of text. +func Color3BitB(c RGB3Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", c+40)) +} + +// Color8BitF set the foreground color of text. +func Color8BitF(c RGB8Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"38;5;%dm", c)) +} + +// Color8BitB set the background color of text. +func Color8BitB(c RGB8Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"48;5;%dm", c)) +} + +// FullColorF set the foreground color of text. +func FullColorF(r, g, b uint8) ANSI { + return newAnsi(fmt.Sprintf(esc+"38;2;%d;%d;%dm", r, g, b)) +} + +// FullColorB set the foreground color of text. +func FullColorB(r, g, b uint8) ANSI { + return newAnsi(fmt.Sprintf(esc+"48;2;%d;%d;%dm", r, g, b)) +} + +// Style +var ( + // Bold set the text style to bold or increased intensity. + Bold ANSI + + // Faint set the text style to faint. + Faint ANSI + + // Italic set the text style to italic. + Italic ANSI + + // Underline set the text style to underline. + Underline ANSI + + // BlinkSlow set the text style to slow blink. + BlinkSlow ANSI + + // BlinkRapid set the text style to rapid blink. + BlinkRapid ANSI + + // Inverse swap the foreground color and background color. + Inverse ANSI + + // Conceal set the text style to conceal. + Conceal ANSI + + // CrossOut set the text style to crossed out. + CrossOut ANSI + + // Frame set the text style to framed. + Frame ANSI + + // Encircle set the text style to encircled. + Encircle ANSI + + // Overline set the text style to overlined. + Overline ANSI +) + +// Foreground color of text. +var ( + // DefaultF is the default color of foreground. + DefaultF ANSI + + // Normal color + BlackF ANSI + RedF ANSI + GreenF ANSI + YellowF ANSI + BlueF ANSI + MagentaF ANSI + CyanF ANSI + WhiteF ANSI + + // Light color + LightBlackF ANSI + LightRedF ANSI + LightGreenF ANSI + LightYellowF ANSI + LightBlueF ANSI + LightMagentaF ANSI + LightCyanF ANSI + LightWhiteF ANSI +) + +// Background color of text. +var ( + // DefaultB is the default color of background. + DefaultB ANSI + + // Normal color + BlackB ANSI + RedB ANSI + GreenB ANSI + YellowB ANSI + BlueB ANSI + MagentaB ANSI + CyanB ANSI + WhiteB ANSI + + // Light color + LightBlackB ANSI + LightRedB ANSI + LightGreenB ANSI + LightYellowB ANSI + LightBlueB ANSI + LightMagentaB ANSI + LightCyanB ANSI + LightWhiteB ANSI +) + +func init() { + Bold = newSGR(1) + Faint = newSGR(2) + Italic = newSGR(3) + Underline = newSGR(4) + BlinkSlow = newSGR(5) + BlinkRapid = newSGR(6) + Inverse = newSGR(7) + Conceal = newSGR(8) + CrossOut = newSGR(9) + + BlackF = newSGR(30) + RedF = newSGR(31) + GreenF = newSGR(32) + YellowF = newSGR(33) + BlueF = newSGR(34) + MagentaF = newSGR(35) + CyanF = newSGR(36) + WhiteF = newSGR(37) + + DefaultF = newSGR(39) + + BlackB = newSGR(40) + RedB = newSGR(41) + GreenB = newSGR(42) + YellowB = newSGR(43) + BlueB = newSGR(44) + MagentaB = newSGR(45) + CyanB = newSGR(46) + WhiteB = newSGR(47) + + DefaultB = newSGR(49) + + Frame = newSGR(51) + Encircle = newSGR(52) + Overline = newSGR(53) + + LightBlackF = newSGR(90) + LightRedF = newSGR(91) + LightGreenF = newSGR(92) + LightYellowF = newSGR(93) + LightBlueF = newSGR(94) + LightMagentaF = newSGR(95) + LightCyanF = newSGR(96) + LightWhiteF = newSGR(97) + + LightBlackB = newSGR(100) + LightRedB = newSGR(101) + LightGreenB = newSGR(102) + LightYellowB = newSGR(103) + LightBlueB = newSGR(104) + LightMagentaB = newSGR(105) + LightCyanB = newSGR(106) + LightWhiteB = newSGR(107) +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index bad7bb97f4..4f35ac134f 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -34,6 +34,10 @@ const ( // referenced by the manifest. MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + // MediaTypeImageLayerZstd is the media type used for zstd compressed + // layers referenced by the manifest. + MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" + // MediaTypeImageLayerNonDistributable is the media type for layers referenced by // the manifest but with distribution restrictions. MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" @@ -43,6 +47,11 @@ const ( // restrictions. MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + // MediaTypeImageLayerNonDistributableZstd is the media type for zstd + // compressed layers referenced by the manifest but with distribution + // restrictions. + MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" + // MediaTypeImageConfig specifies the media type for the image configuration. MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 5d493df233..58f1095ab0 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" + VersionDev = "-dev" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/openshift/imagebuilder/OWNERS b/vendor/github.com/openshift/imagebuilder/OWNERS index 6a900fd07a..db859b7bdf 100644 --- a/vendor/github.com/openshift/imagebuilder/OWNERS +++ b/vendor/github.com/openshift/imagebuilder/OWNERS @@ -1,5 +1,3 @@ -reviewers: -- bparees approvers: - TomSweeneyRedHat - mrunalp diff --git a/vendor/github.com/openshift/imagebuilder/README.md b/vendor/github.com/openshift/imagebuilder/README.md index fd96ed9401..772747bce3 100644 --- a/vendor/github.com/openshift/imagebuilder/README.md +++ b/vendor/github.com/openshift/imagebuilder/README.md @@ -64,6 +64,11 @@ $ imagebuilder -f Dockerfile:Dockerfile.extra . will build the current directory and combine the first Dockerfile with the second. The FROM in the second image is ignored. +Note that imagebuilder adds the built image to the `docker` daemon's internal storage. If you use `podman` you must first pull the image into its local registry: + +``` +$ podman pull docker-daemon:: # must contain either a tag or a digest +``` ## Code Example diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go index 86b139b653..5a2d0d5396 100644 --- a/vendor/github.com/openshift/imagebuilder/builder.go +++ b/vendor/github.com/openshift/imagebuilder/builder.go @@ -212,6 +212,7 @@ func NewStages(node *parser.Node, b *Builder) (Stages, error) { Builder: &Builder{ Args: b.Args, AllowedArgs: b.AllowedArgs, + Env: b.Env, }, Node: root, }) @@ -436,7 +437,7 @@ func (b *Builder) FromImage(image *docker.Image, node *parser.Node) error { SplitChildren(node, command.From) b.RunConfig = *image.Config - b.Env = b.RunConfig.Env + b.Env = append(b.Env, b.RunConfig.Env...) b.RunConfig.Env = nil // Check to see if we have a default PATH, note that windows won't diff --git a/vendor/github.com/openshift/imagebuilder/constants.go b/vendor/github.com/openshift/imagebuilder/constants.go index 86cd2e5e27..7b41e5a491 100644 --- a/vendor/github.com/openshift/imagebuilder/constants.go +++ b/vendor/github.com/openshift/imagebuilder/constants.go @@ -4,10 +4,6 @@ const ( // in docker/system NoBaseImageSpecifier = "scratch" - // not yet part of our import - commandArg = "arg" - commandStopSignal = "stopsignal" - // in docker/system defaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ) diff --git a/vendor/github.com/openshift/imagebuilder/evaluator.go b/vendor/github.com/openshift/imagebuilder/evaluator.go index 1ea3584518..1bbb25f2bd 100644 --- a/vendor/github.com/openshift/imagebuilder/evaluator.go +++ b/vendor/github.com/openshift/imagebuilder/evaluator.go @@ -20,16 +20,16 @@ func ParseDockerfile(r io.Reader) (*parser.Node, error) { // Environment variable interpolation will happen on these statements only. var replaceEnvAllowed = map[string]bool{ - command.Env: true, - command.Label: true, - command.Add: true, - command.Copy: true, - command.Workdir: true, - command.Expose: true, - command.Volume: true, - command.User: true, - commandStopSignal: true, - commandArg: true, + command.Env: true, + command.Label: true, + command.Add: true, + command.Copy: true, + command.Workdir: true, + command.Expose: true, + command.Volume: true, + command.User: true, + command.StopSignal: true, + command.Arg: true, } // Certain commands are allowed to have their args split into more diff --git a/vendor/github.com/openshift/imagebuilder/vendor.conf b/vendor/github.com/openshift/imagebuilder/vendor.conf index e437b79c31..c3f7d1a6b7 100644 --- a/vendor/github.com/openshift/imagebuilder/vendor.conf +++ b/vendor/github.com/openshift/imagebuilder/vendor.conf @@ -5,7 +5,6 @@ github.com/docker/go-connections 97c2040d34dfae1d1b1275fa3a78dbdd2f41cf7e github.com/docker/go-units 2fb04c6466a548a03cb009c5569ee1ab1e35398e github.com/fsouza/go-dockerclient openshift-4.0 https://github.com/openshift/go-dockerclient.git github.com/gogo/protobuf c5a62797aee0054613cc578653a16c6237fef080 -github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998 github.com/konsorten/go-windows-terminal-sequences f55edac94c9bbba5d6182a4be46d86a2c9b5b50e github.com/Microsoft/go-winio 1a8911d1ed007260465c3bfbbc785ac6915a0bb8 github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512 @@ -18,3 +17,4 @@ github.com/sirupsen/logrus d7b6bf5e4d26448fd977d07d745a2a66097ddecb golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83 golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba +k8s.io/klog 8e90cee79f823779174776412c13478955131846 diff --git a/vendor/github.com/pelletier/go-toml/.dockerignore b/vendor/github.com/pelletier/go-toml/.dockerignore deleted file mode 100644 index 7b5883475d..0000000000 --- a/vendor/github.com/pelletier/go-toml/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -cmd/tomll/tomll -cmd/tomljson/tomljson diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore index e6ba63a5c5..99e38bbc53 100644 --- a/vendor/github.com/pelletier/go-toml/.gitignore +++ b/vendor/github.com/pelletier/go-toml/.gitignore @@ -1,5 +1,2 @@ test_program/test_program_bin fuzz/ -cmd/tomll/tomll -cmd/tomljson/tomljson -cmd/tomltestgen/tomltestgen diff --git a/vendor/github.com/pelletier/go-toml/.travis.yml b/vendor/github.com/pelletier/go-toml/.travis.yml index abb03e997b..c9fbf304bf 100644 --- a/vendor/github.com/pelletier/go-toml/.travis.yml +++ b/vendor/github.com/pelletier/go-toml/.travis.yml @@ -1,22 +1,23 @@ sudo: false language: go go: - - 1.11.x - - 1.12.x + - 1.8.x + - 1.9.x + - 1.10.x - tip matrix: allow_failures: - go: tip fast_finish: true -env: - - GO111MODULE=on script: - if [ -n "$(go fmt ./...)" ]; then exit 1; fi - - go test github.com/pelletier/go-toml -race -coverprofile=coverage.txt -covermode=atomic - - go test github.com/pelletier/go-toml/cmd/tomljson - - go test github.com/pelletier/go-toml/cmd/tomll - - go test github.com/pelletier/go-toml/query + - ./test.sh - ./benchmark.sh $TRAVIS_BRANCH https://github.com/$TRAVIS_REPO_SLUG.git - +before_install: + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi +branches: + only: [master] after_success: - - bash <(curl -s https://codecov.io/bash) + - $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=coverage.out -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md deleted file mode 100644 index 405c911c90..0000000000 --- a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md +++ /dev/null @@ -1,132 +0,0 @@ -## Contributing - -Thank you for your interest in go-toml! We appreciate you considering -contributing to go-toml! - -The main goal is the project is to provide an easy-to-use TOML -implementation for Go that gets the job done and gets out of your way – -dealing with TOML is probably not the central piece of your project. - -As the single maintainer of go-toml, time is scarce. All help, big or -small, is more than welcomed! - -### Ask questions - -Any question you may have, somebody else might have it too. Always feel -free to ask them on the [issues tracker][issues-tracker]. We will try to -answer them as clearly and quickly as possible, time permitting. - -Asking questions also helps us identify areas where the documentation needs -improvement, or new features that weren't envisioned before. Sometimes, a -seemingly innocent question leads to the fix of a bug. Don't hesitate and -ask away! - -### Improve the documentation - -The best way to share your knowledge and experience with go-toml is to -improve the documentation. Fix a typo, clarify an interface, add an -example, anything goes! - -The documentation is present in the [README][readme] and thorough the -source code. On release, it gets updated on [GoDoc][godoc]. To make a -change to the documentation, create a pull request with your proposed -changes. For simple changes like that, the easiest way to go is probably -the "Fork this project and edit the file" button on Github, displayed at -the top right of the file. Unless it's a trivial change (for example a -typo), provide a little bit of context in your pull request description or -commit message. - -### Report a bug - -Found a bug! Sorry to hear that :(. Help us and other track them down and -fix by reporting it. [File a new bug report][bug-report] on the [issues -tracker][issues-tracker]. The template should provide enough guidance on -what to include. When in doubt: add more details! By reducing ambiguity and -providing more information, it decreases back and forth and saves everyone -time. - -### Code changes - -Want to contribute a patch? Very happy to hear that! - -First, some high-level rules: - -* A short proposal with some POC code is better than a lengthy piece of - text with no code. Code speaks louder than words. -* No backward-incompatible patch will be accepted unless discussed. - Sometimes it's hard, and Go's lack of versioning by default does not - help, but we try not to break people's programs unless we absolutely have - to. -* If you are writing a new feature or extending an existing one, make sure - to write some documentation. -* Bug fixes need to be accompanied with regression tests. -* New code needs to be tested. -* Your commit messages need to explain why the change is needed, even if - already included in the PR description. - -It does sound like a lot, but those best practices are here to save time -overall and continuously improve the quality of the project, which is -something everyone benefits from. - -#### Get started - -The fairly standard code contribution process looks like that: - -1. [Fork the project][fork]. -2. Make your changes, commit on any branch you like. -3. [Open up a pull request][pull-request] -4. Review, potential ask for changes. -5. Merge. You're in! - -Feel free to ask for help! You can create draft pull requests to gather -some early feedback! - -#### Run the tests - -You can run tests for go-toml using Go's test tool: `go test ./...`. -When creating a pull requests, all tests will be ran on Linux on a few Go -versions (Travis CI), and on Windows using the latest Go version -(AppVeyor). - -#### Style - -Try to look around and follow the same format and structure as the rest of -the code. We enforce using `go fmt` on the whole code base. - ---- - -### Maintainers-only - -#### Merge pull request - -Checklist: - -* Passing CI. -* Does not introduce backward-incompatible changes (unless discussed). -* Has relevant doc changes. -* Has relevant unit tests. - -1. Merge using "squash and merge". -2. Make sure to edit the commit message to keep all the useful information - nice and clean. -3. Make sure the commit title is clear and contains the PR number (#123). - -#### New release - -1. Go to [releases][releases]. Click on "X commits to master since this - release". -2. Make note of all the changes. Look for backward incompatible changes, - new features, and bug fixes. -3. Pick the new version using the above and semver. -4. Create a [new release][new-release]. -5. Follow the same format as [1.1.0][release-110]. - -[issues-tracker]: https://github.com/pelletier/go-toml/issues -[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md -[godoc]: https://godoc.org/github.com/pelletier/go-toml -[readme]: ./README.md -[fork]: https://help.github.com/articles/fork-a-repo -[pull-request]: https://help.github.com/en/articles/creating-a-pull-request -[releases]: https://github.com/pelletier/go-toml/releases -[new-release]: https://github.com/pelletier/go-toml/releases/new -[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile deleted file mode 100644 index 8f439d4791..0000000000 --- a/vendor/github.com/pelletier/go-toml/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM golang:1.12-alpine3.9 as builder -WORKDIR /go/src/github.com/pelletier/go-toml -COPY . . -ENV CGO_ENABLED=0 -ENV GOOS=linux -RUN go install ./... - -FROM scratch -COPY --from=builder /go/bin/tomll /usr/bin/tomll -COPY --from=builder /go/bin/tomljson /usr/bin/tomljson diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 041cdc4a2f..0000000000 --- a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,5 +0,0 @@ -**Issue:** add link to pelletier/go-toml issue here - -Explanation of what this pull request does. - -More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md index f0311b99c4..0d357acf35 100644 --- a/vendor/github.com/pelletier/go-toml/README.md +++ b/vendor/github.com/pelletier/go-toml/README.md @@ -8,10 +8,8 @@ This library supports TOML version [![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml) [![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) [![Build Status](https://travis-ci.org/pelletier/go-toml.svg?branch=master)](https://travis-ci.org/pelletier/go-toml) -[![Windows Build status](https://ci.appveyor.com/api/projects/status/4aepwwjori266hkt/branch/master?svg=true)](https://ci.appveyor.com/project/pelletier/go-toml/branch/master) -[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) +[![Coverage Status](https://coveralls.io/repos/github/pelletier/go-toml/badge.svg?branch=master)](https://coveralls.io/github/pelletier/go-toml?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) ## Features @@ -101,23 +99,6 @@ Go-toml provides two handy command line tools: tomljson --help ``` -### Docker image - -Those tools are also availble as a Docker image from -[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to -use `tomljson`: - -``` -docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml -``` - -Only master (`latest`) and tagged versions are published to dockerhub. You -can build your own image as usual: - -``` -docker build -t go-toml . -``` - ## Contribute Feel free to report bugs and patches using GitHub's pull requests system on @@ -126,7 +107,12 @@ much appreciated! ### Run tests -`go test ./...` +You have to make sure two kind of tests run: + +1. The Go unit tests +2. The TOML examples base + +You can run both of them using `./test.sh`. ### Fuzzing diff --git a/vendor/github.com/pelletier/go-toml/appveyor.yml b/vendor/github.com/pelletier/go-toml/appveyor.yml deleted file mode 100644 index 40e8a41596..0000000000 --- a/vendor/github.com/pelletier/go-toml/appveyor.yml +++ /dev/null @@ -1,34 +0,0 @@ -version: "{build}" - -# Source Config -clone_folder: c:\gopath\src\github.com\pelletier\go-toml - -# Build host -environment: - GOPATH: c:\gopath - DEPTESTBYPASS501: 1 - GOVERSION: 1.12 - GO111MODULE: on - -init: - - git config --global core.autocrlf input - -# Build -install: - # Install the specific Go version. - - rmdir c:\go /s /q - - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi - - msiexec /i go%GOVERSION%.windows-amd64.msi /q - - choco install bzr - - set Path=c:\go\bin;c:\gopath\bin;C:\Program Files (x86)\Bazaar\;C:\Program Files\Mercurial\%Path% - - go version - - go env - -build: false -deploy: false - -test_script: - - go test github.com/pelletier/go-toml - - go test github.com/pelletier/go-toml/cmd/tomljson - - go test github.com/pelletier/go-toml/cmd/tomll - - go test github.com/pelletier/go-toml/query diff --git a/vendor/github.com/pelletier/go-toml/go.mod b/vendor/github.com/pelletier/go-toml/go.mod deleted file mode 100644 index f4690e19d3..0000000000 --- a/vendor/github.com/pelletier/go-toml/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/pelletier/go-toml - -go 1.12 - -require ( - github.com/BurntSushi/toml v0.3.1 - github.com/davecgh/go-spew v1.1.1 - gopkg.in/yaml.v2 v2.2.2 -) diff --git a/vendor/github.com/pelletier/go-toml/go.sum b/vendor/github.com/pelletier/go-toml/go.sum deleted file mode 100644 index 8d91a47853..0000000000 --- a/vendor/github.com/pelletier/go-toml/go.sum +++ /dev/null @@ -1,7 +0,0 @@ -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go index e923bc4f9b..284db64678 100644 --- a/vendor/github.com/pelletier/go-toml/keysparsing.go +++ b/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -3,107 +3,79 @@ package toml import ( + "bytes" "errors" "fmt" "unicode" ) // Convert the bare key group string to an array. -// The input supports double quotation and single quotation, +// The input supports double quotation to allow "." inside the key name, // but escape sequences are not supported. Lexers must unescape them beforehand. func parseKey(key string) ([]string, error) { - runes := []rune(key) - var groups []string + groups := []string{} + var buffer bytes.Buffer + inQuotes := false + wasInQuotes := false + ignoreSpace := true + expectDot := false - if len(key) == 0 { - return nil, errors.New("empty key") - } - - idx := 0 - for idx < len(runes) { - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip leading whitespace - } - if idx >= len(runes) { - break - } - r := runes[idx] - if isValidBareChar(r) { - // parse bare key - startIdx := idx - endIdx := -1 - idx++ - for idx < len(runes) { - r = runes[idx] - if isValidBareChar(r) { - idx++ - } else if r == '.' { - endIdx = idx - break - } else if isSpace(r) { - endIdx = idx - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip trailing whitespace - } - if idx < len(runes) && runes[idx] != '.' { - return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) - } - break - } else { - return nil, fmt.Errorf("invalid bare key character: %c", r) - } + for _, char := range key { + if ignoreSpace { + if char == ' ' { + continue } - if endIdx == -1 { - endIdx = idx + ignoreSpace = false + } + switch char { + case '"': + if inQuotes { + groups = append(groups, buffer.String()) + buffer.Reset() + wasInQuotes = true } - groups = append(groups, string(runes[startIdx:endIdx])) - } else if r == '\'' { - // parse single quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed single-quoted key") - } - r = runes[idx] - if r == '\'' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break + inQuotes = !inQuotes + expectDot = false + case '.': + if inQuotes { + buffer.WriteRune(char) + } else { + if !wasInQuotes { + if buffer.Len() == 0 { + return nil, errors.New("empty table key") + } + groups = append(groups, buffer.String()) + buffer.Reset() } - idx++ + ignoreSpace = true + expectDot = false + wasInQuotes = false } - } else if r == '"' { - // parse double quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed double-quoted key") - } - r = runes[idx] - if r == '"' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ + case ' ': + if inQuotes { + buffer.WriteRune(char) + } else { + expectDot = true } - } else if r == '.' { - idx++ - if idx >= len(runes) { - return nil, fmt.Errorf("unexpected end of key") + default: + if !inQuotes && !isValidBareChar(char) { + return nil, fmt.Errorf("invalid bare character: %c", char) } - r = runes[idx] - if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { - return nil, fmt.Errorf("expecting key part after dot") + if !inQuotes && expectDot { + return nil, errors.New("what?") } - } else { - return nil, fmt.Errorf("invalid key character: %c", r) + buffer.WriteRune(char) + expectDot = false } } + if inQuotes { + return nil, errors.New("mismatched quotes") + } + if buffer.Len() > 0 { + groups = append(groups, buffer.String()) + } if len(groups) == 0 { - return nil, fmt.Errorf("empty key") + return nil, errors.New("empty key") } return groups, nil } diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go index 6254d390dc..d11de42859 100644 --- a/vendor/github.com/pelletier/go-toml/lexer.go +++ b/vendor/github.com/pelletier/go-toml/lexer.go @@ -309,7 +309,7 @@ func (l *tomlLexer) lexKey() tomlLexStateFn { if err != nil { return l.errorf(err.Error()) } - growingString += "\"" + str + "\"" + growingString += str l.next() continue } else if r == '\'' { @@ -318,15 +318,13 @@ func (l *tomlLexer) lexKey() tomlLexStateFn { if err != nil { return l.errorf(err.Error()) } - growingString += "'" + str + "'" + growingString += str l.next() continue } else if r == '\n' { return l.errorf("keys cannot contain new lines") } else if isSpace(r) { break - } else if r == '.' { - // skip } else if !isValidBareChar(r) { return l.errorf("keys cannot contain %c character", r) } diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go index 0e1c57e80b..671da5564c 100644 --- a/vendor/github.com/pelletier/go-toml/marshal.go +++ b/vendor/github.com/pelletier/go-toml/marshal.go @@ -6,28 +6,20 @@ import ( "fmt" "io" "reflect" - "sort" "strconv" "strings" "time" ) -const ( - tagFieldName = "toml" - tagFieldComment = "comment" - tagCommented = "commented" - tagMultiline = "multiline" - tagDefault = "default" -) +const tagKeyMultiline = "multiline" type tomlOpts struct { - name string - comment string - commented bool - multiline bool - include bool - omitempty bool - defaultValue string + name string + comment string + commented bool + multiline bool + include bool + omitempty bool } type encOpts struct { @@ -39,37 +31,10 @@ var encOptsDefaults = encOpts{ quoteMapKeys: false, } -type annotation struct { - tag string - comment string - commented string - multiline string - defaultValue string -} - -var annotationDefault = annotation{ - tag: tagFieldName, - comment: tagFieldComment, - commented: tagCommented, - multiline: tagMultiline, - defaultValue: tagDefault, -} - -type marshalOrder int - -// Orders the Encoder can write the fields to the output stream. -const ( - // Sort fields alphabetically. - OrderAlphabetical marshalOrder = iota + 1 - // Preserve the order the fields are encountered. For example, the order of fields in - // a struct. - OrderPreserve -) - var timeType = reflect.TypeOf(time.Time{}) var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() -// Check if the given marshal type maps to a Tree primitive +// Check if the given marshall type maps to a Tree primitive func isPrimitive(mtype reflect.Type) bool { switch mtype.Kind() { case reflect.Ptr: @@ -91,7 +56,7 @@ func isPrimitive(mtype reflect.Type) bool { } } -// Check if the given marshal type maps to a Tree slice +// Check if the given marshall type maps to a Tree slice func isTreeSlice(mtype reflect.Type) bool { switch mtype.Kind() { case reflect.Slice: @@ -101,7 +66,7 @@ func isTreeSlice(mtype reflect.Type) bool { } } -// Check if the given marshal type maps to a non-Tree slice +// Check if the given marshall type maps to a non-Tree slice func isOtherSlice(mtype reflect.Type) bool { switch mtype.Kind() { case reflect.Ptr: @@ -113,7 +78,7 @@ func isOtherSlice(mtype reflect.Type) bool { } } -// Check if the given marshal type maps to a Tree +// Check if the given marshall type maps to a Tree func isTree(mtype reflect.Type) bool { switch mtype.Kind() { case reflect.Map: @@ -171,8 +136,6 @@ Tree primitive types and corresponding marshal types: string string, pointers to same bool bool, pointers to same time.Time time.Time{}, pointers to same - -For additional flexibility, use the Encoder API. */ func Marshal(v interface{}) ([]byte, error) { return NewEncoder(nil).marshal(v) @@ -182,21 +145,13 @@ func Marshal(v interface{}) ([]byte, error) { type Encoder struct { w io.Writer encOpts - annotation - line int - col int - order marshalOrder } // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { return &Encoder{ - w: w, - encOpts: encOptsDefaults, - annotation: annotationDefault, - line: 0, - col: 1, - order: OrderAlphabetical, + w: w, + encOpts: encOptsDefaults, } } @@ -242,49 +197,11 @@ func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { return e } -// Order allows to change in which order fields will be written to the output stream. -func (e *Encoder) Order(ord marshalOrder) *Encoder { - e.order = ord - return e -} - -// SetTagName allows changing default tag "toml" -func (e *Encoder) SetTagName(v string) *Encoder { - e.tag = v - return e -} - -// SetTagComment allows changing default tag "comment" -func (e *Encoder) SetTagComment(v string) *Encoder { - e.comment = v - return e -} - -// SetTagCommented allows changing default tag "commented" -func (e *Encoder) SetTagCommented(v string) *Encoder { - e.commented = v - return e -} - -// SetTagMultiline allows changing default tag "multiline" -func (e *Encoder) SetTagMultiline(v string) *Encoder { - e.multiline = v - return e -} - func (e *Encoder) marshal(v interface{}) ([]byte, error) { mtype := reflect.TypeOf(v) - - switch mtype.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Ptr: - if mtype.Elem().Kind() != reflect.Struct { - return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") - } - default: - return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") + if mtype.Kind() != reflect.Struct { + return []byte{}, errors.New("Only a struct can be marshaled to TOML") } - sval := reflect.ValueOf(v) if isCustomMarshaler(mtype) { return callCustomMarshaler(sval) @@ -295,27 +212,22 @@ func (e *Encoder) marshal(v interface{}) ([]byte, error) { } var buf bytes.Buffer - _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order) + _, err = t.writeTo(&buf, "", "", 0, e.arraysOneElementPerLine) return buf.Bytes(), err } -// Create next tree with a position based on Encoder.line -func (e *Encoder) nextTree() *Tree { - return newTreeWithPosition(Position{Line: e.line, Col: 1}) -} - // Convert given marshal struct or map value to toml tree func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { if mtype.Kind() == reflect.Ptr { return e.valueToTree(mtype.Elem(), mval.Elem()) } - tval := e.nextTree() + tval := newTree() switch mtype.Kind() { case reflect.Struct: for i := 0; i < mtype.NumField(); i++ { mtypef, mvalf := mtype.Field(i), mval.Field(i) - opts := tomlOptions(mtypef, e.annotation) + opts := tomlOptions(mtypef) if opts.include && (!opts.omitempty || !isZero(mvalf)) { val, err := e.valueToToml(mtypef.Type, mvalf) if err != nil { @@ -330,26 +242,7 @@ func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, er } } case reflect.Map: - keys := mval.MapKeys() - if e.order == OrderPreserve && len(keys) > 0 { - // Sorting []reflect.Value is not straight forward. - // - // OrderPreserve will support deterministic results when string is used - // as the key to maps. - typ := keys[0].Type() - kind := keys[0].Kind() - if kind == reflect.String { - ikeys := make([]string, len(keys)) - for i := range keys { - ikeys[i] = keys[i].Interface().(string) - } - sort.Strings(ikeys) - for i := range ikeys { - keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) - } - } - } - for _, key := range keys { + for _, key := range mval.MapKeys() { mvalf := mval.MapIndex(key) val, err := e.valueToToml(mtype.Elem(), mvalf) if err != nil { @@ -397,7 +290,6 @@ func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (int // Convert given marshal value to toml value func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - e.line++ if mtype.Kind() == reflect.Ptr { return e.valueToToml(mtype.Elem(), mval.Elem()) } @@ -415,9 +307,6 @@ func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface case reflect.Bool: return mval.Bool(), nil case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { - return fmt.Sprint(mval), nil - } return mval.Int(), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return mval.Uint(), nil @@ -437,7 +326,7 @@ func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface // Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for // sub-structs, and only definite types can be unmarshaled. func (t *Tree) Unmarshal(v interface{}) error { - d := Decoder{tval: t, tagName: tagFieldName} + d := Decoder{tval: t} return d.unmarshal(v) } @@ -458,14 +347,6 @@ func (t *Tree) Marshal() ([]byte, error) { // The following struct annotations are supported: // // toml:"Field" Overrides the field's name to map to. -// default:"foo" Provides a default value. -// -// For default values, only fields of the following types are supported: -// * string -// * bool -// * int -// * int64 -// * float64 // // See Marshal() documentation for types mapping table. func Unmarshal(data []byte, v interface{}) error { @@ -481,7 +362,6 @@ type Decoder struct { r io.Reader tval *Tree encOpts - tagName string } // NewDecoder returns a new decoder that reads from r. @@ -489,7 +369,6 @@ func NewDecoder(r io.Reader) *Decoder { return &Decoder{ r: r, encOpts: encOptsDefaults, - tagName: tagFieldName, } } @@ -506,27 +385,13 @@ func (d *Decoder) Decode(v interface{}) error { return d.unmarshal(v) } -// SetTagName allows changing default tag "toml" -func (d *Decoder) SetTagName(v string) *Decoder { - d.tagName = v - return d -} - func (d *Decoder) unmarshal(v interface{}) error { mtype := reflect.TypeOf(v) - if mtype.Kind() != reflect.Ptr { - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") + if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct { + return errors.New("Only a pointer to struct can be unmarshaled from TOML") } - elem := mtype.Elem() - - switch elem.Kind() { - case reflect.Struct, reflect.Map: - default: - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - sval, err := d.valueFromTree(elem, d.tval) + sval, err := d.valueFromTree(mtype.Elem(), d.tval) if err != nil { return err } @@ -545,18 +410,10 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, mval = reflect.New(mtype).Elem() for i := 0; i < mtype.NumField(); i++ { mtypef := mtype.Field(i) - an := annotation{tag: d.tagName} - opts := tomlOptions(mtypef, an) + opts := tomlOptions(mtypef) if opts.include { baseKey := opts.name - keysToTry := []string{ - baseKey, - strings.ToLower(baseKey), - strings.ToTitle(baseKey), - strings.ToLower(string(baseKey[0])) + baseKey[1:], - } - - found := false + keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)} for _, key := range keysToTry { exists := tval.Has(key) if !exists { @@ -568,42 +425,8 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, return mval, formatError(err, tval.GetPosition(key)) } mval.Field(i).Set(mvalf) - found = true break } - - if !found && opts.defaultValue != "" { - mvalf := mval.Field(i) - var val interface{} - var err error - switch mvalf.Kind() { - case reflect.Bool: - val, err = strconv.ParseBool(opts.defaultValue) - if err != nil { - return mval.Field(i), err - } - case reflect.Int: - val, err = strconv.Atoi(opts.defaultValue) - if err != nil { - return mval.Field(i), err - } - case reflect.String: - val = opts.defaultValue - case reflect.Int64: - val, err = strconv.ParseInt(opts.defaultValue, 10, 64) - if err != nil { - return mval.Field(i), err - } - case reflect.Float64: - val, err = strconv.ParseFloat(opts.defaultValue, 64) - if err != nil { - return mval.Field(i), err - } - default: - return mval.Field(i), fmt.Errorf("unsuported field type for default option") - } - mval.Field(i).Set(reflect.ValueOf(val)) - } } } case reflect.Map: @@ -615,7 +438,7 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, if err != nil { return mval, formatError(err, tval.GetPosition(key)) } - mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) + mval.SetMapIndex(reflect.ValueOf(key), mvalf) } } return mval, nil @@ -653,20 +476,20 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.V return d.unwrapPointer(mtype, tval) } - switch t := tval.(type) { + switch tval.(type) { case *Tree: if isTree(mtype) { - return d.valueFromTree(mtype, t) + return d.valueFromTree(mtype, tval.(*Tree)) } return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) case []*Tree: if isTreeSlice(mtype) { - return d.valueFromTreeSlice(mtype, t) + return d.valueFromTreeSlice(mtype, tval.([]*Tree)) } return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) case []interface{}: if isOtherSlice(mtype) { - return d.valueFromOtherSlice(mtype, t) + return d.valueFromOtherSlice(mtype, tval.([]interface{})) } return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) default: @@ -689,17 +512,10 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.V return val.Convert(mtype), nil case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: val := reflect.ValueOf(tval) - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { - d, err := time.ParseDuration(val.String()) - if err != nil { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) - } - return reflect.ValueOf(d), nil - } if !val.Type().ConvertibleTo(mtype) { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(mtype).Int()) { + if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Int()) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } @@ -709,11 +525,10 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.V if !val.Type().ConvertibleTo(mtype) { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - - if val.Convert(reflect.TypeOf(int(1))).Int() < 0 { + if val.Int() < 0 { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Convert(mtype).Uint())) { + if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Int())) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } @@ -723,7 +538,7 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.V if !val.Type().ConvertibleTo(mtype) { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(mtype).Float()) { + if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Float()) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } @@ -744,25 +559,16 @@ func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.V return mval, nil } -func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { - tag := vf.Tag.Get(an.tag) +func tomlOptions(vf reflect.StructField) tomlOpts { + tag := vf.Tag.Get("toml") parse := strings.Split(tag, ",") var comment string - if c := vf.Tag.Get(an.comment); c != "" { + if c := vf.Tag.Get("comment"); c != "" { comment = c } - commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) - multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) - defaultValue := vf.Tag.Get(tagDefault) - result := tomlOpts{ - name: vf.Name, - comment: comment, - commented: commented, - multiline: multiline, - include: true, - omitempty: false, - defaultValue: defaultValue, - } + commented, _ := strconv.ParseBool(vf.Tag.Get("commented")) + multiline, _ := strconv.ParseBool(vf.Tag.Get(tagKeyMultiline)) + result := tomlOpts{name: vf.Name, comment: comment, commented: commented, multiline: multiline, include: true, omitempty: false} if parse[0] != "" { if parse[0] == "-" && len(parse) == 1 { result.include = false diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_Map_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_Map_test.toml deleted file mode 100644 index a3bd5130d9..0000000000 --- a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_Map_test.toml +++ /dev/null @@ -1,17 +0,0 @@ -title = "TOML Marshal Testing" - -[basic_map] - one = "one" - two = "two" - -[long_map] - a7 = "1" - b3 = "2" - c8 = "3" - d4 = "4" - e6 = "5" - f5 = "6" - g10 = "7" - h1 = "8" - i2 = "9" - j9 = "10" diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml deleted file mode 100644 index 9d68b59996..0000000000 --- a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml +++ /dev/null @@ -1,38 +0,0 @@ -title = "TOML Marshal Testing" - -[basic_lists] - floats = [12.3,45.6,78.9] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - ints = [8001,8001,8002] - uints = [5002,5003] - strings = ["One","Two","Three"] - -[[subdocptrs]] - name = "Second" - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.second] - name = "Second" - - [subdoc.first] - name = "First" - -[basic] - uint = 5001 - bool = true - float = 123.4 - int = 5000 - string = "Bite me" - date = 1979-05-27T07:32:00Z - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go index a7498e49b3..2d27599a99 100644 --- a/vendor/github.com/pelletier/go-toml/parser.go +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -77,10 +77,8 @@ func (p *tomlParser) parseStart() tomlParserStateFn { return p.parseAssign case tokenEOF: return nil - case tokenError: - p.raiseError(tok, "parsing error: %s", tok.String()) default: - p.raiseError(tok, "unexpected token %s", tok.typ) + p.raiseError(tok, "unexpected token") } return nil } @@ -167,11 +165,6 @@ func (p *tomlParser) parseAssign() tomlParserStateFn { key := p.getToken() p.assume(tokenEqual) - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err.Error()) - } - value := p.parseRvalue() var tableKey []string if len(p.currentTable) > 0 { @@ -180,9 +173,6 @@ func (p *tomlParser) parseAssign() tomlParserStateFn { tableKey = []string{} } - prefixKey := parsedKey[0 : len(parsedKey)-1] - tableKey = append(tableKey, prefixKey...) - // find the table to assign, looking out for arrays of tables var targetNode *Tree switch node := p.tree.GetPath(tableKey).(type) { @@ -190,19 +180,17 @@ func (p *tomlParser) parseAssign() tomlParserStateFn { targetNode = node[len(node)-1] case *Tree: targetNode = node - case nil: - // create intermediate - if err := p.tree.createSubTree(tableKey, key.Position); err != nil { - p.raiseError(key, "could not create intermediate group: %s", err) - } - targetNode = p.tree.GetPath(tableKey).(*Tree) default: p.raiseError(key, "Unknown table type for path: %s", strings.Join(tableKey, ".")) } // assign value to the found table - keyVal := parsedKey[len(parsedKey)-1] + keyVals := []string{key.val} + if len(keyVals) != 1 { + p.raiseError(key, "Invalid key") + } + keyVal := keyVals[0] localKey := []string{keyVal} finalKey := append(tableKey, keyVal) if targetNode.GetPath(localKey) != nil { @@ -350,7 +338,7 @@ Loop: case tokenRightCurlyBrace: p.getToken() break Loop - case tokenKey, tokenInteger, tokenString: + case tokenKey: if !tokenIsComma(previous) && previous != nil { p.raiseError(follow, "comma expected between fields in inline table") } diff --git a/vendor/github.com/pelletier/go-toml/test.sh b/vendor/github.com/pelletier/go-toml/test.sh new file mode 100644 index 0000000000..ba6adf3fc7 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/test.sh @@ -0,0 +1,88 @@ +#!/bin/bash +# fail out of the script if anything here fails +set -e +set -o pipefail + +# set the path to the present working directory +export GOPATH=`pwd` + +function git_clone() { + path=$1 + branch=$2 + version=$3 + if [ ! -d "src/$path" ]; then + mkdir -p src/$path + git clone https://$path.git src/$path + fi + pushd src/$path + git checkout "$branch" + git reset --hard "$version" + popd +} + +# Remove potential previous runs +rm -rf src test_program_bin toml-test + +go get github.com/pelletier/go-buffruneio +go get github.com/davecgh/go-spew/spew +go get gopkg.in/yaml.v2 +go get github.com/BurntSushi/toml + +# get code for BurntSushi TOML validation +# pinning all to 'HEAD' for version 0.3.x work (TODO: pin to commit hash when tests stabilize) +git_clone github.com/BurntSushi/toml master HEAD +git_clone github.com/BurntSushi/toml-test master HEAD #was: 0.2.0 HEAD + +# build the BurntSushi test application +go build -o toml-test github.com/BurntSushi/toml-test + +# vendorize the current lib for testing +# NOTE: this basically mocks an install without having to go back out to github for code +mkdir -p src/github.com/pelletier/go-toml/cmd +mkdir -p src/github.com/pelletier/go-toml/query +cp *.go *.toml src/github.com/pelletier/go-toml +cp -R cmd/* src/github.com/pelletier/go-toml/cmd +cp -R query/* src/github.com/pelletier/go-toml/query +go build -o test_program_bin src/github.com/pelletier/go-toml/cmd/test_program.go + +# Run basic unit tests +go test github.com/pelletier/go-toml -covermode=count -coverprofile=coverage.out +go test github.com/pelletier/go-toml/cmd/tomljson +go test github.com/pelletier/go-toml/query + +# run the entire BurntSushi test suite +if [[ $# -eq 0 ]] ; then + echo "Running all BurntSushi tests" + ./toml-test ./test_program_bin | tee test_out +else + # run a specific test + test=$1 + test_path='src/github.com/BurntSushi/toml-test/tests' + valid_test="$test_path/valid/$test" + invalid_test="$test_path/invalid/$test" + + if [ -e "$valid_test.toml" ]; then + echo "Valid Test TOML for $test:" + echo "====" + cat "$valid_test.toml" + + echo "Valid Test JSON for $test:" + echo "====" + cat "$valid_test.json" + + echo "Go-TOML Output for $test:" + echo "====" + cat "$valid_test.toml" | ./test_program_bin + fi + + if [ -e "$invalid_test.toml" ]; then + echo "Invalid Test TOML for $test:" + echo "====" + cat "$invalid_test.toml" + + echo "Go-TOML Output for $test:" + echo "====" + echo "go-toml Output:" + cat "$invalid_test.toml" | ./test_program_bin + fi +fi diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go index 358a9be5ce..98c185ad0b 100644 --- a/vendor/github.com/pelletier/go-toml/toml.go +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -27,13 +27,9 @@ type Tree struct { } func newTree() *Tree { - return newTreeWithPosition(Position{}) -} - -func newTreeWithPosition(pos Position) *Tree { return &Tree{ values: make(map[string]interface{}), - position: pos, + position: Position{}, } } @@ -198,10 +194,10 @@ func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { // formatting instructions to the key, that will be reused by Marshal(). func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { subtree := t - for i, intermediateKey := range keys[:len(keys)-1] { + for _, intermediateKey := range keys[:len(keys)-1] { nextTree, exists := subtree.values[intermediateKey] if !exists { - nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) + nextTree = newTree() subtree.values[intermediateKey] = nextTree // add new element here } switch node := nextTree.(type) { @@ -211,7 +207,7 @@ func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interfac // go to most recent element if len(node) == 0 { // create element if it does not exist - subtree.values[intermediateKey] = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) + subtree.values[intermediateKey] = append(node, newTree()) } subtree = node[len(node)-1] } @@ -219,21 +215,19 @@ func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interfac var toInsert interface{} - switch v := value.(type) { + switch value.(type) { case *Tree: - v.comment = opts.Comment + tt := value.(*Tree) + tt.comment = opts.Comment toInsert = value case []*Tree: toInsert = value case *tomlValue: - v.comment = opts.Comment - toInsert = v + tt := value.(*tomlValue) + tt.comment = opts.Comment + toInsert = tt default: - toInsert = &tomlValue{value: value, - comment: opts.Comment, - commented: opts.Commented, - multiline: opts.Multiline, - position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} + toInsert = &tomlValue{value: value, comment: opts.Comment, commented: opts.Commented, multiline: opts.Multiline} } subtree.values[keys[len(keys)-1]] = toInsert @@ -262,35 +256,44 @@ func (t *Tree) SetPath(keys []string, value interface{}) { // SetPathWithComment is the same as SetPath, but allows you to provide comment // information to the key, that will be reused by Marshal(). func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { - t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) -} - -// Delete removes a key from the tree. -// Key is a dot-separated path (e.g. a.b.c). -func (t *Tree) Delete(key string) error { - keys, err := parseKey(key) - if err != nil { - return err + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + nextTree = newTree() + subtree.values[intermediateKey] = nextTree // add new element here + } + switch node := nextTree.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + // create element if it does not exist + subtree.values[intermediateKey] = append(node, newTree()) + } + subtree = node[len(node)-1] + } } - return t.DeletePath(keys) -} -// DeletePath removes a key from the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -func (t *Tree) DeletePath(keys []string) error { - keyLen := len(keys) - if keyLen == 1 { - delete(t.values, keys[0]) - return nil - } - tree := t.GetPath(keys[:keyLen-1]) - item := keys[keyLen-1] - switch node := tree.(type) { + var toInsert interface{} + + switch value.(type) { case *Tree: - delete(node.values, item) - return nil + tt := value.(*Tree) + tt.comment = comment + toInsert = value + case []*Tree: + toInsert = value + case *tomlValue: + tt := value.(*tomlValue) + tt.comment = comment + toInsert = tt + default: + toInsert = &tomlValue{value: value, comment: comment, commented: commented} } - return errors.New("no such key to delete") + + subtree.values[keys[len(keys)-1]] = toInsert } // createSubTree takes a tree and a key and create the necessary intermediate @@ -302,10 +305,10 @@ func (t *Tree) DeletePath(keys []string) error { // Returns nil on success, error object on failure func (t *Tree) createSubTree(keys []string, pos Position) error { subtree := t - for i, intermediateKey := range keys { + for _, intermediateKey := range keys { nextTree, exists := subtree.values[intermediateKey] if !exists { - tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) + tree := newTree() tree.position = pos subtree.values[intermediateKey] = tree nextTree = tree @@ -334,39 +337,10 @@ func LoadBytes(b []byte) (tree *Tree, err error) { err = errors.New(r.(string)) } }() - - if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { - b = b[4:] - } else if len(b) >= 3 && hasUTF8BOM3(b) { - b = b[3:] - } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { - b = b[2:] - } - tree = parseToml(lexToml(b)) return } -func hasUTF16BigEndianBOM2(b []byte) bool { - return b[0] == 0xFE && b[1] == 0xFF -} - -func hasUTF16LittleEndianBOM2(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE -} - -func hasUTF8BOM3(b []byte) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -func hasUTF32BigEndianBOM4(b []byte) bool { - return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF -} - -func hasUTF32LittleEndianBOM4(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 -} - // LoadReader creates a Tree from any io.Reader. func LoadReader(reader io.Reader) (tree *Tree, err error) { inputBytes, err := ioutil.ReadAll(reader) diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go index 198d5ac174..e4049e29f2 100644 --- a/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -12,18 +12,6 @@ import ( "time" ) -type valueComplexity int - -const ( - valueSimple valueComplexity = iota + 1 - valueComplex -) - -type sortNode struct { - key string - complexity valueComplexity -} - // Encodes a string to a TOML-compliant multi-line string value // This function is a clone of the existing encodeTomlString function, except that whitespace characters // are preserved. Quotation marks and backslashes are also not escaped. @@ -165,200 +153,111 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen return "", fmt.Errorf("unsupported value type %T: %v", v, v) } -func getTreeArrayLine(trees []*Tree) (line int) { - // get lowest line number that is not 0 - for _, tv := range trees { - if tv.position.Line < line || line == 0 { - line = tv.position.Line - } - } - return -} - -func sortByLines(t *Tree) (vals []sortNode) { - var ( - line int - lines []int - tv *Tree - tom *tomlValue - node sortNode - ) - vals = make([]sortNode, 0) - m := make(map[int]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree: - tv = v.(*Tree) - line = tv.position.Line - node = sortNode{key: k, complexity: valueComplex} - case []*Tree: - line = getTreeArrayLine(v.([]*Tree)) - node = sortNode{key: k, complexity: valueComplex} - default: - tom = v.(*tomlValue) - line = tom.position.Line - node = sortNode{key: k, complexity: valueSimple} - } - lines = append(lines, line) - vals = append(vals, node) - m[line] = node - } - sort.Ints(lines) - - for i, line := range lines { - vals[i] = m[line] - } - - return vals -} - -func sortAlphabetical(t *Tree) (vals []sortNode) { - var ( - node sortNode - simpVals []string - compVals []string - ) - vals = make([]sortNode, 0) - m := make(map[string]sortNode) +func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { + simpleValuesKeys := make([]string, 0) + complexValuesKeys := make([]string, 0) for k := range t.values { v := t.values[k] switch v.(type) { case *Tree, []*Tree: - node = sortNode{key: k, complexity: valueComplex} - compVals = append(compVals, node.key) + complexValuesKeys = append(complexValuesKeys, k) default: - node = sortNode{key: k, complexity: valueSimple} - simpVals = append(simpVals, node.key) + simpleValuesKeys = append(simpleValuesKeys, k) } - vals = append(vals, node) - m[node.key] = node - } - - // Simples first to match previous implementation - sort.Strings(simpVals) - i := 0 - for _, key := range simpVals { - vals[i] = m[key] - i++ } - sort.Strings(compVals) - for _, key := range compVals { - vals[i] = m[key] - i++ - } + sort.Strings(simpleValuesKeys) + sort.Strings(complexValuesKeys) - return vals -} + for _, k := range simpleValuesKeys { + v, ok := t.values[k].(*tomlValue) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } -func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { - return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical) -} + repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } -func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord marshalOrder) (int64, error) { - var orderedVals []sortNode + if v.comment != "" { + comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) + var commented string + if v.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } } - for _, node := range orderedVals { - switch node.complexity { - case valueComplex: - k := node.key - v := t.values[k] - - combinedKey := k - if keyspace != "" { - combinedKey = keyspace + "." + combinedKey - } - var commented string - if t.commented { - commented = "# " - } + for _, k := range complexValuesKeys { + v := t.values[k] - switch node := v.(type) { - // node has to be of those two types given how keys are sorted above - case *Tree: - tv, ok := t.values[k].(*Tree) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - if tv.comment != "" { - comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - bytesCount, err = node.writeToOrdered(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine, ord) - if err != nil { - return bytesCount, err - } - case []*Tree: - for _, subTree := range node { - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } + combinedKey := k + if keyspace != "" { + combinedKey = keyspace + "." + combinedKey + } + var commented string + if t.commented { + commented = "# " + } - bytesCount, err = subTree.writeToOrdered(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine, ord) - if err != nil { - return bytesCount, err - } - } - } - default: // Simple - k := node.key - v, ok := t.values[k].(*tomlValue) + switch node := v.(type) { + // node has to be of those two types given how keys are sorted above + case *Tree: + tv, ok := t.values[k].(*Tree) if !ok { return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) } - - repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - - if v.comment != "" { - comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) + if tv.comment != "" { + comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) start := "# " if strings.HasPrefix(comment, "#") { start = "" } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) bytesCount += int64(writtenBytesCountComment) if errc != nil { return bytesCount, errc } } - - var commented string - if v.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n") + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") bytesCount += int64(writtenBytesCount) if err != nil { return bytesCount, err } + bytesCount, err = node.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + case []*Tree: + for _, subTree := range node { + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + + bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + } } } diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go b/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go index 0f22c469d6..96746b9d49 100644 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go @@ -55,7 +55,7 @@ func (r *ffReader) Reset(d []byte) { r.l = len(d) } -// Calcuates the Position with line and line offset, +// Calculates the Position with line and line offset, // because this isn't counted for performance reasons, // it will iterate the buffer from the beginning, and should // only be used in error-paths. diff --git a/vendor/github.com/spf13/viper/go.mod b/vendor/github.com/spf13/viper/go.mod index 2794300552..86e801c150 100644 --- a/vendor/github.com/spf13/viper/go.mod +++ b/vendor/github.com/spf13/viper/go.mod @@ -2,42 +2,23 @@ module github.com/spf13/viper require ( github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 // indirect - github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/etcd v3.3.10+incompatible // indirect + github.com/coreos/go-etcd v2.0.0+incompatible // indirect github.com/coreos/go-semver v0.2.0 // indirect - github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e // indirect - github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect - github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/fsnotify/fsnotify v1.4.7 - github.com/gogo/protobuf v1.2.1 // indirect - github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect - github.com/google/btree v1.0.0 // indirect - github.com/gorilla/websocket v1.4.0 // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.9.0 // indirect github.com/hashicorp/hcl v1.0.0 - github.com/jonboulle/clockwork v0.1.0 // indirect github.com/magiconair/properties v1.8.0 github.com/mitchellh/mapstructure v1.1.2 github.com/pelletier/go-toml v1.2.0 - github.com/prometheus/client_golang v0.9.3 // indirect - github.com/soheilhy/cmux v0.1.4 // indirect github.com/spf13/afero v1.1.2 github.com/spf13/cast v1.3.0 github.com/spf13/jwalterweatherman v1.0.0 github.com/spf13/pflag v1.0.3 github.com/stretchr/testify v1.2.2 - github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect - github.com/ugorji/go v1.1.4 // indirect - github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect + github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 // indirect github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 - go.etcd.io/bbolt v1.3.2 // indirect - go.uber.org/atomic v1.4.0 // indirect - go.uber.org/multierr v1.1.0 // indirect - go.uber.org/zap v1.10.0 // indirect - golang.org/x/net v0.0.0-20190522155817-f3200d17e092 // indirect - golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect - google.golang.org/grpc v1.21.0 // indirect + golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 // indirect + golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a // indirect + golang.org/x/text v0.3.0 // indirect gopkg.in/yaml.v2 v2.2.2 ) diff --git a/vendor/github.com/spf13/viper/go.sum b/vendor/github.com/spf13/viper/go.sum index 97afaffe22..5c9fb7d544 100644 --- a/vendor/github.com/spf13/viper/go.sum +++ b/vendor/github.com/spf13/viper/go.sum @@ -1,109 +1,19 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= @@ -112,67 +22,14 @@ github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9 github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1YthsFqr/5mxHduZW2A= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index a3d37f8c2b..7173c6e9a2 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -180,11 +180,10 @@ type Viper struct { remoteProviders []*defaultRemoteProvider // Name of file to look for inside the path - configName string - configFile string - configType string - configPermissions os.FileMode - envPrefix string + configName string + configFile string + configType string + envPrefix string automaticEnvApplied bool envKeyReplacer *strings.Replacer @@ -211,7 +210,6 @@ func New() *Viper { v := new(Viper) v.keyDelim = "." v.configName = "config" - v.configPermissions = os.FileMode(0644) v.fs = afero.NewOsFs() v.config = make(map[string]interface{}) v.override = make(map[string]interface{}) @@ -689,12 +687,6 @@ func (v *Viper) Get(key string) interface{} { return cast.ToString(val) case int32, int16, int8, int: return cast.ToInt(val) - case uint: - return cast.ToUint(val) - case uint32: - return cast.ToUint32(val) - case uint64: - return cast.ToUint64(val) case int64: return cast.ToInt64(val) case float64, float32: @@ -758,24 +750,6 @@ func (v *Viper) GetInt64(key string) int64 { return cast.ToInt64(v.Get(key)) } -// GetUint returns the value associated with the key as an unsigned integer. -func GetUint(key string) uint { return v.GetUint(key) } -func (v *Viper) GetUint(key string) uint { - return cast.ToUint(v.Get(key)) -} - -// GetUint32 returns the value associated with the key as an unsigned integer. -func GetUint32(key string) uint32 { return v.GetUint32(key) } -func (v *Viper) GetUint32(key string) uint32 { - return cast.ToUint32(v.Get(key)) -} - -// GetUint64 returns the value associated with the key as an unsigned integer. -func GetUint64(key string) uint64 { return v.GetUint64(key) } -func (v *Viper) GetUint64(key string) uint64 { - return cast.ToUint64(v.Get(key)) -} - // GetFloat64 returns the value associated with the key as a float64. func GetFloat64(key string) float64 { return v.GetFloat64(key) } func (v *Viper) GetFloat64(key string) float64 { @@ -1354,7 +1328,7 @@ func (v *Viper) writeConfig(filename string, force bool) error { return fmt.Errorf("File: %s exists. Use WriteConfig to overwrite.", filename) } } - f, err := v.fs.OpenFile(filename, flags, v.configPermissions) + f, err := v.fs.OpenFile(filename, flags, os.FileMode(0644)) if err != nil { return err } @@ -1791,12 +1765,6 @@ func (v *Viper) SetConfigType(in string) { } } -// SetConfigPermissions sets the permissions for the config file. -func SetConfigPermissions(perm os.FileMode) { v.SetConfigPermissions(perm) } -func (v *Viper) SetConfigPermissions(perm os.FileMode) { - v.configPermissions = perm.Perm() -} - func (v *Viper) getConfigType() string { if v.configType != "" { return v.configType diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 0000000000..9857fe53d3 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,66 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +package errgroup + +import ( + "context" + "sync" +) + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid and does not cancel on error. +type Group struct { + cancel func() + + wg sync.WaitGroup + + errOnce sync.Once + err error +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := context.WithCancel(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel() + } + return g.err +} + +// Go calls the given function in a new goroutine. +// +// The first call to return a non-nil error cancels the group; its error will be +// returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go deleted file mode 100644 index c256483434..0000000000 --- a/vendor/golang.org/x/sys/windows/registry/key.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -// Package registry provides access to the Windows registry. -// -// Here is a simple example, opening a registry key and reading a string value from it. -// -// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) -// if err != nil { -// log.Fatal(err) -// } -// defer k.Close() -// -// s, _, err := k.GetStringValue("SystemRoot") -// if err != nil { -// log.Fatal(err) -// } -// fmt.Printf("Windows system root is %q\n", s) -// -package registry - -import ( - "io" - "syscall" - "time" -) - -const ( - // Registry key security and access rights. - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx - // for details. - ALL_ACCESS = 0xf003f - CREATE_LINK = 0x00020 - CREATE_SUB_KEY = 0x00004 - ENUMERATE_SUB_KEYS = 0x00008 - EXECUTE = 0x20019 - NOTIFY = 0x00010 - QUERY_VALUE = 0x00001 - READ = 0x20019 - SET_VALUE = 0x00002 - WOW64_32KEY = 0x00200 - WOW64_64KEY = 0x00100 - WRITE = 0x20006 -) - -// Key is a handle to an open Windows registry key. -// Keys can be obtained by calling OpenKey; there are -// also some predefined root keys such as CURRENT_USER. -// Keys can be used directly in the Windows API. -type Key syscall.Handle - -const ( - // Windows defines some predefined root keys that are always open. - // An application can use these keys as entry points to the registry. - // Normally these keys are used in OpenKey to open new keys, - // but they can also be used anywhere a Key is required. - CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) - CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) - LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) - USERS = Key(syscall.HKEY_USERS) - CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) - PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) -) - -// Close closes open key k. -func (k Key) Close() error { - return syscall.RegCloseKey(syscall.Handle(k)) -} - -// OpenKey opens a new key with path name relative to key k. -// It accepts any open key, including CURRENT_USER and others, -// and returns the new key and an error. -// The access parameter specifies desired access rights to the -// key to be opened. -func OpenKey(k Key, path string, access uint32) (Key, error) { - p, err := syscall.UTF16PtrFromString(path) - if err != nil { - return 0, err - } - var subkey syscall.Handle - err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) - if err != nil { - return 0, err - } - return Key(subkey), nil -} - -// OpenRemoteKey opens a predefined registry key on another -// computer pcname. The key to be opened is specified by k, but -// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. -// If pcname is "", OpenRemoteKey returns local computer key. -func OpenRemoteKey(pcname string, k Key) (Key, error) { - var err error - var p *uint16 - if pcname != "" { - p, err = syscall.UTF16PtrFromString(`\\` + pcname) - if err != nil { - return 0, err - } - } - var remoteKey syscall.Handle - err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) - if err != nil { - return 0, err - } - return Key(remoteKey), nil -} - -// ReadSubKeyNames returns the names of subkeys of key k. -// The parameter n controls the number of returned names, -// analogous to the way os.File.Readdirnames works. -func (k Key) ReadSubKeyNames(n int) ([]string, error) { - names := make([]string, 0) - // Registry key size limit is 255 bytes and described there: - // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx - buf := make([]uint16, 256) //plus extra room for terminating zero byte -loopItems: - for i := uint32(0); ; i++ { - if n > 0 { - if len(names) == n { - return names, nil - } - } - l := uint32(len(buf)) - for { - err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) - if err == nil { - break - } - if err == syscall.ERROR_MORE_DATA { - // Double buffer size and try again. - l = uint32(2 * len(buf)) - buf = make([]uint16, l) - continue - } - if err == _ERROR_NO_MORE_ITEMS { - break loopItems - } - return names, err - } - names = append(names, syscall.UTF16ToString(buf[:l])) - } - if n > len(names) { - return names, io.EOF - } - return names, nil -} - -// CreateKey creates a key named path under open key k. -// CreateKey returns the new key and a boolean flag that reports -// whether the key already existed. -// The access parameter specifies the access rights for the key -// to be created. -func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { - var h syscall.Handle - var d uint32 - err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), - 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) - if err != nil { - return 0, false, err - } - return Key(h), d == _REG_OPENED_EXISTING_KEY, nil -} - -// DeleteKey deletes the subkey path of key k and its values. -func DeleteKey(k Key, path string) error { - return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) -} - -// A KeyInfo describes the statistics of a key. It is returned by Stat. -type KeyInfo struct { - SubKeyCount uint32 - MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte - ValueCount uint32 - MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte - MaxValueLen uint32 // longest data component among the key's values, in bytes - lastWriteTime syscall.Filetime -} - -// ModTime returns the key's last write time. -func (ki *KeyInfo) ModTime() time.Time { - return time.Unix(0, ki.lastWriteTime.Nanoseconds()) -} - -// Stat retrieves information about the open key k. -func (k Key) Stat() (*KeyInfo, error) { - var ki KeyInfo - err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, - &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, - &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) - if err != nil { - return nil, err - } - return &ki, nil -} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go deleted file mode 100644 index cf843ce2b2..0000000000 --- a/vendor/golang.org/x/sys/windows/registry/mksyscall.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build generate - -package registry - -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go deleted file mode 100644 index e66643cbaa..0000000000 --- a/vendor/golang.org/x/sys/windows/registry/syscall.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package registry - -import "syscall" - -const ( - _REG_OPTION_NON_VOLATILE = 0 - - _REG_CREATED_NEW_KEY = 1 - _REG_OPENED_EXISTING_KEY = 2 - - _ERROR_NO_MORE_ITEMS syscall.Errno = 259 -) - -func LoadRegLoadMUIString() error { - return procRegLoadMUIStringW.Find() -} - -//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW -//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW -//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW -//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW -//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW -//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW -//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW - -//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go deleted file mode 100644 index 7487e05f8d..0000000000 --- a/vendor/golang.org/x/sys/windows/registry/value.go +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package registry - -import ( - "errors" - "io" - "syscall" - "unicode/utf16" - "unsafe" -) - -const ( - // Registry value types. - NONE = 0 - SZ = 1 - EXPAND_SZ = 2 - BINARY = 3 - DWORD = 4 - DWORD_BIG_ENDIAN = 5 - LINK = 6 - MULTI_SZ = 7 - RESOURCE_LIST = 8 - FULL_RESOURCE_DESCRIPTOR = 9 - RESOURCE_REQUIREMENTS_LIST = 10 - QWORD = 11 -) - -var ( - // ErrShortBuffer is returned when the buffer was too short for the operation. - ErrShortBuffer = syscall.ERROR_MORE_DATA - - // ErrNotExist is returned when a registry key or value does not exist. - ErrNotExist = syscall.ERROR_FILE_NOT_FOUND - - // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. - ErrUnexpectedType = errors.New("unexpected key value type") -) - -// GetValue retrieves the type and data for the specified value associated -// with an open key k. It fills up buffer buf and returns the retrieved -// byte count n. If buf is too small to fit the stored value it returns -// ErrShortBuffer error along with the required buffer size n. -// If no buffer is provided, it returns true and actual buffer size n. -// If no buffer is provided, GetValue returns the value's type only. -// If the value does not exist, the error returned is ErrNotExist. -// -// GetValue is a low level function. If value's type is known, use the appropriate -// Get*Value function instead. -func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { - pname, err := syscall.UTF16PtrFromString(name) - if err != nil { - return 0, 0, err - } - var pbuf *byte - if len(buf) > 0 { - pbuf = (*byte)(unsafe.Pointer(&buf[0])) - } - l := uint32(len(buf)) - err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) - if err != nil { - return int(l), valtype, err - } - return int(l), valtype, nil -} - -func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { - p, err := syscall.UTF16PtrFromString(name) - if err != nil { - return nil, 0, err - } - var t uint32 - n := uint32(len(buf)) - for { - err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) - if err == nil { - return buf[:n], t, nil - } - if err != syscall.ERROR_MORE_DATA { - return nil, 0, err - } - if n <= uint32(len(buf)) { - return nil, 0, err - } - buf = make([]byte, n) - } -} - -// GetStringValue retrieves the string value for the specified -// value name associated with an open key k. It also returns the value's type. -// If value does not exist, GetStringValue returns ErrNotExist. -// If value is not SZ or EXPAND_SZ, it will return the correct value -// type and ErrUnexpectedType. -func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { - data, typ, err2 := k.getValue(name, make([]byte, 64)) - if err2 != nil { - return "", typ, err2 - } - switch typ { - case SZ, EXPAND_SZ: - default: - return "", typ, ErrUnexpectedType - } - if len(data) == 0 { - return "", typ, nil - } - u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[:] - return syscall.UTF16ToString(u), typ, nil -} - -// GetMUIStringValue retrieves the localized string value for -// the specified value name associated with an open key k. -// If the value name doesn't exist or the localized string value -// can't be resolved, GetMUIStringValue returns ErrNotExist. -// GetMUIStringValue panics if the system doesn't support -// regLoadMUIString; use LoadRegLoadMUIString to check if -// regLoadMUIString is supported before calling this function. -func (k Key) GetMUIStringValue(name string) (string, error) { - pname, err := syscall.UTF16PtrFromString(name) - if err != nil { - return "", err - } - - buf := make([]uint16, 1024) - var buflen uint32 - var pdir *uint16 - - err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) - if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path - - // Try to resolve the string value using the system directory as - // a DLL search path; this assumes the string value is of the form - // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. - - // This approach works with tzres.dll but may have to be revised - // in the future to allow callers to provide custom search paths. - - var s string - s, err = ExpandString("%SystemRoot%\\system32\\") - if err != nil { - return "", err - } - pdir, err = syscall.UTF16PtrFromString(s) - if err != nil { - return "", err - } - - err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) - } - - for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed - if buflen <= uint32(len(buf)) { - break // Buffer not growing, assume race; break - } - buf = make([]uint16, buflen) - err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) - } - - if err != nil { - return "", err - } - - return syscall.UTF16ToString(buf), nil -} - -// ExpandString expands environment-variable strings and replaces -// them with the values defined for the current user. -// Use ExpandString to expand EXPAND_SZ strings. -func ExpandString(value string) (string, error) { - if value == "" { - return "", nil - } - p, err := syscall.UTF16PtrFromString(value) - if err != nil { - return "", err - } - r := make([]uint16, 100) - for { - n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) - if err != nil { - return "", err - } - if n <= uint32(len(r)) { - u := (*[1 << 29]uint16)(unsafe.Pointer(&r[0]))[:] - return syscall.UTF16ToString(u), nil - } - r = make([]uint16, n) - } -} - -// GetStringsValue retrieves the []string value for the specified -// value name associated with an open key k. It also returns the value's type. -// If value does not exist, GetStringsValue returns ErrNotExist. -// If value is not MULTI_SZ, it will return the correct value -// type and ErrUnexpectedType. -func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { - data, typ, err2 := k.getValue(name, make([]byte, 64)) - if err2 != nil { - return nil, typ, err2 - } - if typ != MULTI_SZ { - return nil, typ, ErrUnexpectedType - } - if len(data) == 0 { - return nil, typ, nil - } - p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[:len(data)/2] - if len(p) == 0 { - return nil, typ, nil - } - if p[len(p)-1] == 0 { - p = p[:len(p)-1] // remove terminating null - } - val = make([]string, 0, 5) - from := 0 - for i, c := range p { - if c == 0 { - val = append(val, string(utf16.Decode(p[from:i]))) - from = i + 1 - } - } - return val, typ, nil -} - -// GetIntegerValue retrieves the integer value for the specified -// value name associated with an open key k. It also returns the value's type. -// If value does not exist, GetIntegerValue returns ErrNotExist. -// If value is not DWORD or QWORD, it will return the correct value -// type and ErrUnexpectedType. -func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { - data, typ, err2 := k.getValue(name, make([]byte, 8)) - if err2 != nil { - return 0, typ, err2 - } - switch typ { - case DWORD: - if len(data) != 4 { - return 0, typ, errors.New("DWORD value is not 4 bytes long") - } - var val32 uint32 - copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) - return uint64(val32), DWORD, nil - case QWORD: - if len(data) != 8 { - return 0, typ, errors.New("QWORD value is not 8 bytes long") - } - copy((*[8]byte)(unsafe.Pointer(&val))[:], data) - return val, QWORD, nil - default: - return 0, typ, ErrUnexpectedType - } -} - -// GetBinaryValue retrieves the binary value for the specified -// value name associated with an open key k. It also returns the value's type. -// If value does not exist, GetBinaryValue returns ErrNotExist. -// If value is not BINARY, it will return the correct value -// type and ErrUnexpectedType. -func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { - data, typ, err2 := k.getValue(name, make([]byte, 64)) - if err2 != nil { - return nil, typ, err2 - } - if typ != BINARY { - return nil, typ, ErrUnexpectedType - } - return data, typ, nil -} - -func (k Key) setValue(name string, valtype uint32, data []byte) error { - p, err := syscall.UTF16PtrFromString(name) - if err != nil { - return err - } - if len(data) == 0 { - return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) - } - return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) -} - -// SetDWordValue sets the data and type of a name value -// under key k to value and DWORD. -func (k Key) SetDWordValue(name string, value uint32) error { - return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) -} - -// SetQWordValue sets the data and type of a name value -// under key k to value and QWORD. -func (k Key) SetQWordValue(name string, value uint64) error { - return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) -} - -func (k Key) setStringValue(name string, valtype uint32, value string) error { - v, err := syscall.UTF16FromString(value) - if err != nil { - return err - } - buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[:len(v)*2] - return k.setValue(name, valtype, buf) -} - -// SetStringValue sets the data and type of a name value -// under key k to value and SZ. The value must not contain a zero byte. -func (k Key) SetStringValue(name, value string) error { - return k.setStringValue(name, SZ, value) -} - -// SetExpandStringValue sets the data and type of a name value -// under key k to value and EXPAND_SZ. The value must not contain a zero byte. -func (k Key) SetExpandStringValue(name, value string) error { - return k.setStringValue(name, EXPAND_SZ, value) -} - -// SetStringsValue sets the data and type of a name value -// under key k to value and MULTI_SZ. The value strings -// must not contain a zero byte. -func (k Key) SetStringsValue(name string, value []string) error { - ss := "" - for _, s := range value { - for i := 0; i < len(s); i++ { - if s[i] == 0 { - return errors.New("string cannot have 0 inside") - } - } - ss += s + "\x00" - } - v := utf16.Encode([]rune(ss + "\x00")) - buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[:len(v)*2] - return k.setValue(name, MULTI_SZ, buf) -} - -// SetBinaryValue sets the data and type of a name value -// under key k to value and BINARY. -func (k Key) SetBinaryValue(name string, value []byte) error { - return k.setValue(name, BINARY, value) -} - -// DeleteValue removes a named value from the key k. -func (k Key) DeleteValue(name string) error { - return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) -} - -// ReadValueNames returns the value names of key k. -// The parameter n controls the number of returned names, -// analogous to the way os.File.Readdirnames works. -func (k Key) ReadValueNames(n int) ([]string, error) { - ki, err := k.Stat() - if err != nil { - return nil, err - } - names := make([]string, 0, ki.ValueCount) - buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character -loopItems: - for i := uint32(0); ; i++ { - if n > 0 { - if len(names) == n { - return names, nil - } - } - l := uint32(len(buf)) - for { - err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) - if err == nil { - break - } - if err == syscall.ERROR_MORE_DATA { - // Double buffer size and try again. - l = uint32(2 * len(buf)) - buf = make([]uint16, l) - continue - } - if err == _ERROR_NO_MORE_ITEMS { - break loopItems - } - return names, err - } - names = append(names, syscall.UTF16ToString(buf[:l])) - } - if n > len(names) { - return names, io.EOF - } - return names, nil -} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go deleted file mode 100644 index 3778075da0..0000000000 --- a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go +++ /dev/null @@ -1,120 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package registry - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") - procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") - procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") - procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") - procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") - procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") - procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") - procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") -) - -func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index ae93e24719..85c18b5a30 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -281,6 +281,23 @@ func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { lim.limit = newLimit } +// SetBurst is shorthand for SetBurstAt(time.Now(), newBurst). +func (lim *Limiter) SetBurst(newBurst int) { + lim.SetBurstAt(time.Now(), newBurst) +} + +// SetBurstAt sets a new burst size for the limiter. +func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { + lim.mu.Lock() + defer lim.mu.Unlock() + + now, _, tokens := lim.advance(now) + + lim.last = now + lim.tokens = tokens + lim.burst = newBurst +} + // reserveN is a helper method for AllowN, ReserveN, and WaitN. // maxFutureReserve specifies the maximum reservation wait duration allowed. // reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index 024408e646..f0f723f356 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -23,7 +23,7 @@ before_install: - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi - - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then VET_SKIP_PROTO=1; fi + - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi install: - try3() { eval "$*" || eval "$*" || eval "$*"; } diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md new file mode 100644 index 0000000000..9d4213ebca --- /dev/null +++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 6e69b28c27..4f1567e2f9 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -1,6 +1,8 @@ # How to contribute -We definitely welcome your patches and contributions to gRPC! +We definitely welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) +and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md new file mode 100644 index 0000000000..d6ff267471 --- /dev/null +++ b/vendor/google.golang.org/grpc/GOVERNANCE.md @@ -0,0 +1 @@ +This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md new file mode 100644 index 0000000000..093c82b3af --- /dev/null +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -0,0 +1,27 @@ +This page lists all active maintainers of this repository. If you were a +maintainer and would like to add your name to the Emeritus list, please send us a +PR. + +See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) +for governance guidelines and how to become a maintainer. +See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) +for general contribution guidelines. + +## Maintainers (in alphabetical order) +- [canguler](https://github.com/canguler), Google LLC +- [cesarghali](https://github.com/cesarghali), Google LLC +- [dfawley](https://github.com/dfawley), Google LLC +- [easwars](https://github.com/easwars), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC +- [menghanl](https://github.com/menghanl), Google LLC +- [srini100](https://github.com/srini100), Google LLC + +## Emeritus Maintainers (in alphabetical order) +- [adelez](https://github.com/adelez), Google LLC +- [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jtattermusch](https://github.com/jtattermusch), Google LLC +- [lyuxuan](https://github.com/lyuxuan), Google LLC +- [makmukhi](https://github.com/makmukhi), Google LLC +- [matt-kwong](https://github.com/matt-kwong), Google LLC +- [nicolasnoble](https://github.com/nicolasnoble), Google LLC +- [yongni](https://github.com/yongni), Google LLC diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index c1a8340c5b..c7f3fa3f36 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -1,12 +1,14 @@ module google.golang.org/grpc +go 1.11 + require ( cloud.google.com/go v0.26.0 // indirect github.com/BurntSushi/toml v0.3.1 // indirect github.com/client9/misspell v0.3.4 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/mock v1.1.1 - github.com/golang/protobuf v1.2.0 + github.com/golang/protobuf v1.3.2 github.com/google/go-cmp v0.2.0 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 golang.org/x/net v0.0.0-20190311183353-d8887717615a diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index 741677d2e8..7faff49e7d 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -8,8 +8,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekf github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index b8e0aa4db2..ddee20b6be 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -107,8 +107,8 @@ func (*registerStream) isTransportResponseFrame() bool { return false } type headerFrame struct { streamID uint32 hf []hpack.HeaderField - endStream bool // Valid on server side. - initStream func(uint32) (bool, error) // Used only on the client side. + endStream bool // Valid on server side. + initStream func(uint32) error // Used only on the client side. onWrite func() wq *writeQuota // write quota for the stream created. cleanup *cleanupStream // Valid on the server side. @@ -637,21 +637,17 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { func (l *loopyWriter) originateStream(str *outStream) error { hdr := str.itl.dequeue().(*headerFrame) - sendPing, err := hdr.initStream(str.id) - if err != nil { + if err := hdr.initStream(str.id); err != nil { if err == ErrConnClosing { return err } // Other errors(errStreamDrain) need not close transport. return nil } - if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err } l.estdStreams[str.id] = str - if sendPing { - return l.pingHandler(&ping{data: [8]byte{}}) - } return nil } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 41a79c5670..9bd8c27b36 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -62,8 +62,6 @@ type http2Client struct { // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. goAway chan struct{} - // awakenKeepalive is used to wake up keepalive when after it has gone dormant. - awakenKeepalive chan struct{} framer *framer // controlBuf delivers all the control related tasks (e.g., window @@ -110,6 +108,16 @@ type http2Client struct { // goAwayReason records the http2.ErrCode and debug data received with the // GoAway frame. goAwayReason GoAwayReason + // A condition variable used to signal when the keepalive goroutine should + // go dormant. The condition for dormancy is based on the number of active + // streams and the `PermitWithoutStream` keepalive client parameter. And + // since the number of active streams is guarded by the above mutex, we use + // the same for this condition variable as well. + kpDormancyCond *sync.Cond + // A boolean to track whether the keepalive goroutine is dormant or not. + // This is checked before attempting to signal the above condition + // variable. + kpDormant bool // Fields below are for channelz metric collection. channelzID int64 // channelz unique identification number @@ -232,7 +240,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - awakenKeepalive: make(chan struct{}, 1), framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, @@ -264,9 +271,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne updateFlowControl: t.updateFlowControl, } } - // Make sure awakenKeepalive can't be written upon. - // keepalive routine will make it writable, if need be. - t.awakenKeepalive <- struct{}{} if t.statsHandler != nil { t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, @@ -281,6 +285,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) } if t.keepaliveEnabled { + t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } // Start the reader goroutine for incoming message. Each transport has @@ -564,7 +569,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) (bool, error) { + initStream: func(id uint32) error { t.mu.Lock() if state := t.state; state != reachable { t.mu.Unlock() @@ -574,29 +579,19 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea err = ErrConnClosing } cleanup(err) - return false, err + return err } t.activeStreams[id] = s if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) } - var sendPing bool - // If the number of active streams change from 0 to 1, then check if keepalive - // has gone dormant. If so, wake it up. - if len(t.activeStreams) == 1 && t.keepaliveEnabled { - select { - case t.awakenKeepalive <- struct{}{}: - sendPing = true - // Fill the awakenKeepalive channel again as this channel must be - // kept non-writable except at the point that the keepalive() - // goroutine is waiting either to be awaken or shutdown. - t.awakenKeepalive <- struct{}{} - default: - } + // If the keepalive goroutine has gone dormant, wake it up. + if t.kpDormant { + t.kpDormancyCond.Signal() } t.mu.Unlock() - return sendPing, nil + return nil }, onOrphaned: cleanup, wq: s.wq, @@ -778,6 +773,11 @@ func (t *http2Client) Close() error { t.state = closing streams := t.activeStreams t.activeStreams = nil + if t.kpDormant { + // If the keepalive goroutine is blocked on this condition variable, we + // should unblock it so that the goroutine eventually exits. + t.kpDormancyCond.Signal() + } t.mu.Unlock() t.controlBuf.finish() t.cancel() @@ -853,11 +853,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e return t.controlBuf.put(df) } -func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { +func (t *http2Client) getStream(f http2.Frame) *Stream { t.mu.Lock() - defer t.mu.Unlock() - s, ok := t.activeStreams[f.Header().StreamID] - return s, ok + s := t.activeStreams[f.Header().StreamID] + t.mu.Unlock() + return s } // adjustWindow sends out extra window update over the initial window size @@ -937,8 +937,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) { t.controlBuf.put(bdpPing) } // Select the right stream to dispatch. - s, ok := t.getStream(f) - if !ok { + s := t.getStream(f) + if s == nil { return } if size > 0 { @@ -969,8 +969,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) { } func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { - s, ok := t.getStream(f) - if !ok { + s := t.getStream(f) + if s == nil { return } if f.ErrCode == http2.ErrCodeRefusedStream { @@ -1147,8 +1147,8 @@ func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { // operateHeaders takes action on the decoded headers. func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { - s, ok := t.getStream(frame) - if !ok { + s := t.getStream(frame) + if s == nil { return } endStream := frame.StreamEnded() @@ -1303,29 +1303,32 @@ func (t *http2Client) keepalive() { timer.Reset(t.kp.Time) continue } - // Check if keepalive should go dormant. t.mu.Lock() - if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { - // Make awakenKeepalive writable. - <-t.awakenKeepalive - t.mu.Unlock() - select { - case <-t.awakenKeepalive: - // If the control gets here a ping has been sent - // need to reset the timer with keepalive.Timeout. - case <-t.ctx.Done(): - return - } - } else { + if t.state == closing { + // If the transport is closing, we should exit from the + // keepalive goroutine here. If not, we could have a race + // between the call to Signal() from Close() and the call to + // Wait() here, whereby the keepalive goroutine ends up + // blocking on the condition variable which will never be + // signalled again. t.mu.Unlock() - if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) - } - // Send ping. - t.controlBuf.put(p) + return + } + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + t.kpDormant = true + t.kpDormancyCond.Wait() + } + t.kpDormant = false + t.mu.Unlock() + + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) } + // We get here either because we were dormant and a new stream was + // created which unblocked the Wait() call, or because the + // keepalive timer expired. In both cases, we need to send a ping. + t.controlBuf.put(p) - // By the time control gets here a ping has been sent one way or the other. timer.Reset(t.kp.Timeout) select { case <-timer.C: diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 4e26f6a1d6..33686a11ab 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -65,8 +65,7 @@ var ( // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { ctx context.Context - ctxDone <-chan struct{} // Cache the context.Done() chan - cancel context.CancelFunc + done chan struct{} conn net.Conn loopy *loopyWriter readerDone chan struct{} // sync point to enable testing. @@ -206,11 +205,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } - ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) t := &http2Server{ - ctx: ctx, - cancel: cancel, - ctxDone: ctx.Done(), + ctx: context.Background(), + done: done, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -231,7 +229,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err czData: new(channelzData), bufferPool: newBufferPool(), } - t.controlBuf = newControlBuffer(t.ctxDone) + t.controlBuf = newControlBuffer(t.done) if dynamicWindow { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, @@ -362,12 +360,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeRefusedStream, onWrite: func() {}, }) + s.cancel() return false } } t.mu.Lock() if t.state != reachable { t.mu.Unlock() + s.cancel() return false } if uint32(len(t.activeStreams)) >= t.maxStreams { @@ -378,12 +378,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeRefusedStream, onWrite: func() {}, }) + s.cancel() return false } if streamID%2 != 1 || streamID <= t.maxStreamID { t.mu.Unlock() // illegal gRPC stream id. errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + s.cancel() return true } t.maxStreamID = streamID @@ -885,7 +887,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e // TODO(mmukhi, dfawley): Should the server write also return io.EOF? s.cancel() select { - case <-t.ctx.Done(): + case <-t.done: return ErrConnClosing default: } @@ -907,7 +909,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { select { - case <-t.ctx.Done(): + case <-t.done: return ErrConnClosing default: } @@ -973,7 +975,7 @@ func (t *http2Server) keepalive() { t.Close() // Resetting the timer so that the clean-up doesn't deadlock. maxAge.Reset(infinity) - case <-t.ctx.Done(): + case <-t.done: } return case <-keepalive.C: @@ -995,7 +997,7 @@ func (t *http2Server) keepalive() { } t.controlBuf.put(p) keepalive.Reset(t.kp.Timeout) - case <-t.ctx.Done(): + case <-t.done: return } } @@ -1015,7 +1017,7 @@ func (t *http2Server) Close() error { t.activeStreams = nil t.mu.Unlock() t.controlBuf.finish() - t.cancel() + close(t.done) err := t.conn.Close() if channelz.IsOn() { channelz.RemoveEntry(t.channelzID) @@ -1155,7 +1157,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { select { case <-t.drainChan: case <-timer.C: - case <-t.ctx.Done(): + case <-t.done: return } t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) @@ -1205,7 +1207,7 @@ func (t *http2Server) getOutFlowWindow() int64 { select { case sz := <-resp: return int64(sz) - case <-t.ctxDone: + case <-t.done: return -1 case <-timer.C: return -2 diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index d0787f1e2a..686ad7ba61 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -310,6 +310,14 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { } break } + if sc.lbConfig == nil { + // We had a loadBalancingConfig field but did not encounter a + // supported policy. The config is considered invalid in this + // case. + err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found") + grpclog.Warningf(err.Error()) + return nil, err + } } if rsc.MethodConfig == nil { diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 5888505638..483ef8968a 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.23.1" +const Version = "1.24.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 661e1e1de9..2d79b1c694 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -67,7 +67,7 @@ elif [[ "$#" -ne 0 ]]; then fi # - Ensure all source files contain a copyright message. -git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | fail_on_output +(! git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go') # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. (! grep 'func Test[^(]' *_test.go) @@ -75,10 +75,10 @@ git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|wrr_test') +git grep -l '"math/rand"' -- "*.go" 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|wrr_test') # - Ensure all ptypes proto packages are renamed when importing. -git ls-files "*.go" | (! xargs grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/") +(! git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go") # - Check imports that are illegal in appengine (until Go 1.11). # TODO: Remove when we drop Go 1.10 support @@ -86,7 +86,7 @@ go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go # - gofmt, goimports, golint (with exceptions for generated code), go vet. gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") | fail_on_output +goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go") | fail_on_output golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") go vet -all . @@ -111,6 +111,7 @@ google.golang.org/grpc/balancer.go:SA1019 google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go:SA1019 google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go:SA1019 google.golang.org/grpc/xds/internal/balancer/edsbalancer/balancergroup.go:SA1019 +google.golang.org/grpc/xds/internal/resolver/xds_resolver.go:SA1019 google.golang.org/grpc/xds/internal/balancer/xds.go:SA1019 google.golang.org/grpc/xds/internal/balancer/xds_client.go:SA1019 google.golang.org/grpc/balancer_conn_wrappers.go:SA1019 diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh old mode 100755 new mode 100644 diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh old mode 100755 new mode 100644 diff --git a/vendor/modules.txt b/vendor/modules.txt index 7be46b3d9c..4367d70492 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -21,6 +21,7 @@ github.com/Microsoft/go-winio/backuptar github.com/Microsoft/go-winio/pkg/guid # github.com/Microsoft/hcsshim v0.8.6 github.com/Microsoft/hcsshim +github.com/Microsoft/hcsshim/osversion github.com/Microsoft/hcsshim/internal/guid github.com/Microsoft/hcsshim/internal/hcs github.com/Microsoft/hcsshim/internal/hcserror @@ -60,23 +61,27 @@ github.com/blang/semver # github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b github.com/checkpoint-restore/go-criu/rpc github.com/checkpoint-restore/go-criu -# github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50 +# github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa github.com/containerd/console -# github.com/containerd/containerd v1.2.9 +# github.com/containerd/containerd v1.3.0 github.com/containerd/containerd/api/types/task github.com/containerd/containerd/namespaces github.com/containerd/containerd/runtime/v2/shim github.com/containerd/containerd/runtime/v2/task github.com/containerd/containerd/errdefs +github.com/containerd/containerd/api/services/ttrpc/events/v1 github.com/containerd/containerd/events github.com/containerd/containerd/log +github.com/containerd/containerd/pkg/ttrpcutil github.com/containerd/containerd/sys github.com/containerd/containerd/sys/reaper github.com/containerd/containerd/api/types github.com/containerd/containerd/cio github.com/containerd/containerd/defaults # github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 -github.com/containerd/continuity/pathdriver +github.com/containerd/continuity/fs +github.com/containerd/continuity/sysx +github.com/containerd/continuity/syscallx # github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 github.com/containerd/fifo # github.com/containerd/go-runc v0.0.0-20190603165425-9007c2405372 @@ -94,7 +99,7 @@ github.com/containernetworking/cni/pkg/version github.com/containernetworking/cni/pkg/types/020 # github.com/containernetworking/plugins v0.8.2 github.com/containernetworking/plugins/pkg/ns -# github.com/containers/buildah v1.11.2 +# github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982 github.com/containers/buildah/pkg/parse github.com/containers/buildah github.com/containers/buildah/pkg/unshare @@ -105,12 +110,12 @@ github.com/containers/buildah/bind github.com/containers/buildah/chroot github.com/containers/buildah/docker github.com/containers/buildah/pkg/blobcache +github.com/containers/buildah/pkg/cgroups github.com/containers/buildah/pkg/chrootuser github.com/containers/buildah/pkg/overlay github.com/containers/buildah/pkg/umask -github.com/containers/buildah/pkg/cgroups github.com/containers/buildah/pkg/cli -# github.com/containers/conmon v2.0.0+incompatible +# github.com/containers/conmon v2.0.5+incompatible github.com/containers/conmon/runner/config # github.com/containers/image v3.0.2+incompatible github.com/containers/image/docker/reference @@ -121,41 +126,56 @@ github.com/containers/image/manifest github.com/containers/image/transports github.com/containers/image/version github.com/containers/image/pkg/strslice -github.com/containers/image/directory -github.com/containers/image/docker/archive -github.com/containers/image/oci/archive -github.com/containers/image/storage -github.com/containers/image/copy -github.com/containers/image/docker -github.com/containers/image/docker/tarfile -github.com/containers/image/image -github.com/containers/image/oci/layout -github.com/containers/image/transports/alltransports -github.com/containers/image/directory/explicitfilepath -github.com/containers/image/internal/tmpdir -github.com/containers/image/oci/internal -github.com/containers/image/pkg/blobinfocache/none -github.com/containers/image/tarball -github.com/containers/image/pkg/blobinfocache -github.com/containers/image/pkg/compression -github.com/containers/image/docker/policyconfiguration -github.com/containers/image/pkg/docker/config -github.com/containers/image/pkg/tlsclientconfig -github.com/containers/image/docker/daemon -github.com/containers/image/openshift -github.com/containers/image/ostree -github.com/containers/image/pkg/blobinfocache/boltdb -github.com/containers/image/pkg/blobinfocache/memory -github.com/containers/image/pkg/keyctl -github.com/containers/image/pkg/blobinfocache/internal/prioritize -# github.com/containers/libpod v1.5.1 +# github.com/containers/image/v5 v5.0.0 +github.com/containers/image/v5/pkg/sysregistriesv2 +github.com/containers/image/v5/types +github.com/containers/image/v5/docker/reference +github.com/containers/image/v5/pkg/compression/types +github.com/containers/image/v5/manifest +github.com/containers/image/v5/pkg/compression/internal +github.com/containers/image/v5/pkg/compression +github.com/containers/image/v5/pkg/strslice +github.com/containers/image/v5/directory +github.com/containers/image/v5/docker/archive +github.com/containers/image/v5/oci/archive +github.com/containers/image/v5/storage +github.com/containers/image/v5/copy +github.com/containers/image/v5/docker +github.com/containers/image/v5/docker/tarfile +github.com/containers/image/v5/image +github.com/containers/image/v5/oci/layout +github.com/containers/image/v5/signature +github.com/containers/image/v5/transports +github.com/containers/image/v5/transports/alltransports +github.com/containers/image/v5/directory/explicitfilepath +github.com/containers/image/v5/internal/tmpdir +github.com/containers/image/v5/oci/internal +github.com/containers/image/v5/pkg/blobinfocache/none +github.com/containers/image/v5/tarball +github.com/containers/image/v5/pkg/blobinfocache +github.com/containers/image/v5/docker/policyconfiguration +github.com/containers/image/v5/pkg/docker/config +github.com/containers/image/v5/pkg/tlsclientconfig +github.com/containers/image/v5/version +github.com/containers/image/v5/docker/daemon +github.com/containers/image/v5/openshift +github.com/containers/image/v5/ostree +github.com/containers/image/v5/pkg/blobinfocache/boltdb +github.com/containers/image/v5/pkg/blobinfocache/memory +github.com/containers/image/v5/internal/pkg/keyctl +github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize +# github.com/containers/libpod v1.6.3-0.20191101152258-04e8bf3dba50 +github.com/containers/libpod/pkg/hooks github.com/containers/libpod/pkg/rootless github.com/containers/libpod/pkg/spec +github.com/containers/libpod/pkg/hooks/1.0.0 github.com/containers/libpod/pkg/errorhandling github.com/containers/libpod/libpod +github.com/containers/libpod/libpod/config github.com/containers/libpod/libpod/define github.com/containers/libpod/pkg/cgroups github.com/containers/libpod/pkg/namespaces +github.com/containers/libpod/pkg/sysinfo github.com/containers/libpod/pkg/util github.com/containers/libpod/pkg/lookup github.com/containers/libpod/libpod/driver @@ -168,8 +188,6 @@ github.com/containers/libpod/pkg/annotations github.com/containers/libpod/pkg/apparmor github.com/containers/libpod/pkg/criu github.com/containers/libpod/pkg/ctime -github.com/containers/libpod/pkg/firewall -github.com/containers/libpod/pkg/hooks github.com/containers/libpod/pkg/hooks/exec github.com/containers/libpod/pkg/kubeutils github.com/containers/libpod/pkg/netns @@ -182,22 +200,23 @@ github.com/containers/libpod/cmd/podman/cliconfig github.com/containers/libpod/pkg/inspect github.com/containers/libpod/libpod/lock/file github.com/containers/libpod/libpod/lock/shm -github.com/containers/libpod/pkg/hooks/1.0.0 github.com/containers/libpod/pkg/resolvconf/dns -# github.com/containers/psgo v1.3.1 +# github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b +github.com/containers/libtrust +# github.com/containers/psgo v1.4.0 github.com/containers/psgo github.com/containers/psgo/internal/capabilities github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process +github.com/containers/psgo/internal/cgroups github.com/containers/psgo/internal/host -# github.com/containers/storage v1.13.4 +# github.com/containers/storage v1.13.5 github.com/containers/storage/pkg/config github.com/containers/storage/pkg/homedir github.com/containers/storage github.com/containers/storage/pkg/idtools github.com/containers/storage/pkg/mount -github.com/containers/storage/pkg/stringid github.com/containers/storage/drivers github.com/containers/storage/drivers/register github.com/containers/storage/pkg/archive @@ -205,6 +224,7 @@ github.com/containers/storage/pkg/directory github.com/containers/storage/pkg/ioutils github.com/containers/storage/pkg/lockfile github.com/containers/storage/pkg/parsers +github.com/containers/storage/pkg/stringid github.com/containers/storage/pkg/stringutils github.com/containers/storage/pkg/system github.com/containers/storage/pkg/tarlog @@ -230,7 +250,6 @@ github.com/containers/storage/pkg/parsers/kernel github.com/containers/storage/drivers/overlayutils github.com/containers/storage/drivers/quota github.com/containers/storage/pkg/fsutils -github.com/containers/storage/pkg/ostree github.com/containers/storage/drivers/copy # github.com/coreos/container-linux-config-transpiler v0.9.0 github.com/coreos/container-linux-config-transpiler/config @@ -240,8 +259,6 @@ github.com/coreos/container-linux-config-transpiler/config/platform github.com/coreos/container-linux-config-transpiler/config/templating github.com/coreos/container-linux-config-transpiler/config/types/util github.com/coreos/container-linux-config-transpiler/internal/util -# github.com/coreos/go-iptables v0.4.2 -github.com/coreos/go-iptables/iptables # github.com/coreos/go-semver v0.3.0 github.com/coreos/go-semver/semver # github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f @@ -271,7 +288,7 @@ github.com/coreos/ignition/config/v1/types github.com/coreos/pkg/dlopen # github.com/creack/pty v1.1.9 github.com/creack/pty -# github.com/cri-o/cri-o v1.9.0-beta.2.0.20191003162030-4775e1c05c26 +# github.com/cri-o/cri-o v1.16.2 github.com/cri-o/cri-o/pkg/config github.com/cri-o/cri-o/internal/oci github.com/cri-o/cri-o/internal/version @@ -300,42 +317,43 @@ github.com/docker/distribution/registry/client/transport github.com/docker/distribution/registry/storage/cache github.com/docker/distribution/registry/storage/cache/memory github.com/docker/distribution/metrics -# github.com/docker/docker v0.7.3-0.20190410184157-6d18c6a06295 +# github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce github.com/docker/docker/api/types/versions -github.com/docker/docker/pkg/signal github.com/docker/docker/oci/caps -github.com/docker/docker/profiles/seccomp github.com/docker/docker/pkg/pools +github.com/docker/docker/pkg/signal github.com/docker/docker/pkg/term github.com/docker/docker/pkg/symlink github.com/docker/docker/pkg/namesgenerator +github.com/docker/docker/pkg/parsers github.com/docker/docker/errdefs -github.com/docker/docker/api/types -github.com/docker/docker/pkg/parsers/kernel github.com/docker/docker/pkg/ioutils github.com/docker/docker/pkg/term/windows github.com/docker/docker/pkg/longpath github.com/docker/docker/pkg/system -github.com/docker/docker/api/types/container -github.com/docker/docker/api/types/filters -github.com/docker/docker/api/types/mount -github.com/docker/docker/api/types/network +github.com/docker/docker/pkg/mount github.com/docker/docker/api/types/registry github.com/docker/docker/api/types/swarm -github.com/docker/docker/pkg/mount +github.com/docker/docker/pkg/archive github.com/docker/docker/pkg/fileutils github.com/docker/docker/pkg/homedir +github.com/docker/docker/pkg/jsonmessage github.com/docker/docker/pkg/stdcopy -github.com/docker/docker/api/types/blkiodev -github.com/docker/docker/api/types/strslice -github.com/docker/docker/api/types/swarm/runtime github.com/docker/docker/client +github.com/docker/docker/api/types/container +github.com/docker/docker/api/types/mount +github.com/docker/docker/api/types/network +github.com/docker/docker/api/types/swarm/runtime github.com/docker/docker/pkg/idtools github.com/docker/docker/api +github.com/docker/docker/api/types github.com/docker/docker/api/types/events +github.com/docker/docker/api/types/filters github.com/docker/docker/api/types/image github.com/docker/docker/api/types/time github.com/docker/docker/api/types/volume +github.com/docker/docker/api/types/blkiodev +github.com/docker/docker/api/types/strslice # github.com/docker/docker-credential-helpers v0.6.3 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials @@ -367,11 +385,8 @@ github.com/evanphx/json-patch github.com/fatih/color # github.com/fsnotify/fsnotify v1.4.7 github.com/fsnotify/fsnotify -# github.com/fsouza/go-dockerclient v1.4.1 +# github.com/fsouza/go-dockerclient v1.5.0 github.com/fsouza/go-dockerclient -github.com/fsouza/go-dockerclient/internal/archive -github.com/fsouza/go-dockerclient/internal/jsonmessage -github.com/fsouza/go-dockerclient/internal/term # github.com/ghodss/yaml v1.0.0 github.com/ghodss/yaml # github.com/go-bindata/go-bindata v3.1.1+incompatible @@ -564,8 +579,6 @@ github.com/hpcloud/tail/watch github.com/hpcloud/tail/winfile # github.com/huandu/xstrings v1.2.0 github.com/huandu/xstrings -# github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd -github.com/ijc/Gotty # github.com/imdario/mergo v0.3.7 github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.0.0 @@ -579,7 +592,7 @@ github.com/json-iterator/go # github.com/kisielk/gotool v1.0.0 github.com/kisielk/gotool github.com/kisielk/gotool/internal/load -# github.com/klauspost/compress v1.7.2 +# github.com/klauspost/compress v1.8.1 github.com/klauspost/compress/zstd github.com/klauspost/compress/flate github.com/klauspost/compress/huff0 @@ -604,7 +617,7 @@ github.com/mailru/easyjson/buffer github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.9 github.com/mattn/go-isatty -# github.com/mattn/go-shellwords v1.0.5 +# github.com/mattn/go-shellwords v1.0.6 github.com/mattn/go-shellwords # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil @@ -618,6 +631,8 @@ github.com/mitchellh/mapstructure github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 github.com/modern-go/reflect2 +# github.com/morikuni/aec v1.0.0 +github.com/morikuni/aec # github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 github.com/mrunalp/fileutils # github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c @@ -634,7 +649,7 @@ github.com/nbutton23/zxcvbn-go/frequency github.com/nbutton23/zxcvbn-go/data # github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.0.1 +# github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 github.com/opencontainers/image-spec/specs-go/v1 github.com/opencontainers/image-spec/specs-go # github.com/opencontainers/runc v1.0.0-rc9 @@ -642,9 +657,9 @@ github.com/opencontainers/runc/libcontainer/user github.com/opencontainers/runc/libcontainer/configs github.com/opencontainers/runc/libcontainer/devices github.com/opencontainers/runc/libcontainer +github.com/opencontainers/runc/libcontainer/cgroups github.com/opencontainers/runc/libcontainer/system github.com/opencontainers/runc/libcontainer/apparmor -github.com/opencontainers/runc/libcontainer/cgroups github.com/opencontainers/runc/libcontainer/cgroups/fs github.com/opencontainers/runc/libcontainer/cgroups/systemd github.com/opencontainers/runc/libcontainer/configs/validate @@ -672,35 +687,35 @@ github.com/openshift/api/config/v1 github.com/openshift/api/operator/v1alpha1 github.com/openshift/api/operator/v1 # github.com/openshift/client-go v0.0.0-20191001081553-3b0e988f8cb0 -github.com/openshift/client-go/config/clientset/versioned github.com/openshift/client-go/operator/clientset/versioned -github.com/openshift/client-go/config/informers/externalversions github.com/openshift/client-go/operator/informers/externalversions +github.com/openshift/client-go/operator/informers/externalversions/operator/v1 +github.com/openshift/client-go/config/clientset/versioned +github.com/openshift/client-go/config/informers/externalversions github.com/openshift/client-go/config/informers/externalversions/config/v1 github.com/openshift/client-go/config/listers/config/v1 github.com/openshift/client-go/operator/informers/externalversions/operator/v1alpha1 github.com/openshift/client-go/operator/listers/operator/v1alpha1 github.com/openshift/client-go/config/clientset/versioned/scheme +github.com/openshift/client-go/operator/listers/operator/v1 github.com/openshift/client-go/config/clientset/versioned/typed/config/v1 github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1 github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1 -github.com/openshift/client-go/config/informers/externalversions/config -github.com/openshift/client-go/config/informers/externalversions/internalinterfaces github.com/openshift/client-go/operator/informers/externalversions/internalinterfaces github.com/openshift/client-go/operator/informers/externalversions/operator +github.com/openshift/client-go/config/informers/externalversions/config +github.com/openshift/client-go/config/informers/externalversions/internalinterfaces github.com/openshift/client-go/config/clientset/versioned/fake github.com/openshift/client-go/operator/clientset/versioned/fake github.com/openshift/client-go/operator/clientset/versioned/scheme -github.com/openshift/client-go/operator/informers/externalversions/operator/v1 github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake -github.com/openshift/client-go/operator/listers/operator/v1 # github.com/openshift/cluster-api v0.0.0-20190923092624-4024de4fa64d => github.com/openshift/cluster-api v0.0.0-20191004085540-83f32d3e7070 github.com/openshift/cluster-api/pkg/drain # github.com/openshift/cluster-etcd-operator v0.0.0-alpha.0.0.20191025163650-5854b5c48ce4 github.com/openshift/cluster-etcd-operator/pkg/operator/api -# github.com/openshift/imagebuilder v1.1.0 +# github.com/openshift/imagebuilder v1.1.1 github.com/openshift/imagebuilder github.com/openshift/imagebuilder/dockerfile/parser github.com/openshift/imagebuilder/dockerfile/command @@ -717,13 +732,13 @@ github.com/opentracing/opentracing-go/log # github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 github.com/ostreedev/ostree-go/pkg/glibobject github.com/ostreedev/ostree-go/pkg/otbuiltin -# github.com/pelletier/go-toml v1.4.0 +# github.com/pelletier/go-toml v1.2.0 github.com/pelletier/go-toml # github.com/pkg/errors v0.8.1 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 +# github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9 github.com/pquerna/ffjson/fflib/v1 github.com/pquerna/ffjson/inception github.com/pquerna/ffjson/shared @@ -765,7 +780,7 @@ github.com/spf13/cobra github.com/spf13/jwalterweatherman # github.com/spf13/pflag v1.0.5 github.com/spf13/pflag -# github.com/spf13/viper v1.4.0 +# github.com/spf13/viper v1.3.2 github.com/spf13/viper # github.com/stretchr/testify v1.4.0 github.com/stretchr/testify/assert @@ -834,17 +849,17 @@ golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e golang.org/x/sync/semaphore +golang.org/x/sync/errgroup # golang.org/x/sys v0.0.0-20191002091554-b397fe3ad8ed golang.org/x/sys/windows golang.org/x/sys/unix -golang.org/x/sys/windows/registry # golang.org/x/text v0.3.2 golang.org/x/text/secure/bidirule golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/transform golang.org/x/text/width -# golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 +# golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 golang.org/x/time/rate # golang.org/x/tools v0.0.0-20200115044656-831fdb1e1868 golang.org/x/tools/go/packages @@ -929,7 +944,7 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api # google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.23.1 +# google.golang.org/grpc v1.24.0 google.golang.org/grpc/metadata google.golang.org/grpc/codes google.golang.org/grpc/status