diff --git a/go.mod b/go.mod
index c641a11a6c8..6b77ac74c30 100644
--- a/go.mod
+++ b/go.mod
@@ -3,14 +3,14 @@ module github.com/containers/buildah
go 1.13
require (
- github.com/containerd/containerd v1.5.5
+ github.com/containerd/containerd v1.5.7
github.com/containernetworking/cni v0.8.1
github.com/containers/common v0.44.0
- github.com/containers/image/v5 v5.16.0
- github.com/containers/ocicrypt v1.1.2
- github.com/containers/storage v1.36.0
- github.com/docker/distribution v2.7.1+incompatible
- github.com/docker/go-units v0.4.0
+ github.com/containers/image/v5 v5.23.1
+ github.com/containers/ocicrypt v1.1.5
+ github.com/containers/storage v1.43.0
+ github.com/docker/distribution v2.8.1+incompatible
+ github.com/docker/go-units v0.5.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
github.com/fsouza/go-dockerclient v1.7.4
github.com/ghodss/yaml v1.0.0
@@ -18,27 +18,27 @@ require (
github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
github.com/mattn/go-shellwords v1.0.12
- github.com/onsi/ginkgo v1.16.4
- github.com/onsi/gomega v1.16.0
+ github.com/onsi/ginkgo v1.16.5
+ github.com/onsi/gomega v1.19.0
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283
- github.com/opencontainers/runc v1.0.2
+ github.com/opencontainers/image-spec v1.1.0-rc1
+ github.com/opencontainers/runc v1.1.4
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/runtime-tools v0.9.0
- github.com/opencontainers/selinux v1.8.5
+ github.com/opencontainers/selinux v1.10.2
github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656
github.com/pkg/errors v0.9.1
- github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf
- github.com/sirupsen/logrus v1.8.1
- github.com/spf13/cobra v1.2.1
+ github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646
+ github.com/sirupsen/logrus v1.9.0
+ github.com/spf13/cobra v1.5.0
github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.7.0
+ github.com/stretchr/testify v1.8.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
go.etcd.io/bbolt v1.3.6
- golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97
- golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55
- golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
+ golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0
+ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
+ golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8
+ golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
k8s.io/klog v1.0.0 // indirect
)
diff --git a/go.sum b/go.sum
index 1733a00cbf5..eeeb795e2bb 100644
--- a/go.sum
+++ b/go.sum
@@ -1,4 +1,6 @@
+4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo=
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
+bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
@@ -12,6 +14,7 @@ cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
@@ -19,45 +22,101 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U=
+cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
+cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
+cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
+cloud.google.com/go v0.103.0/go.mod h1:vwLx1nqLrzLX/fpwSMOXmFIqBOyHsvHbnAdbGSJ+mKk=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
+cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
+cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
+cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
+cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
+cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU=
+cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c=
+cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
+cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w=
+cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
+cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
+contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
+github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo=
+github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v66.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
+github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
+github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
+github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
+github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
+github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
+github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo=
+github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
+github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
+github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
+github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
+github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
+github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
@@ -66,8 +125,9 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
+github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
@@ -77,15 +137,22 @@ github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
-github.com/Microsoft/hcsshim v0.8.22 h1:CulZ3GW8sNJExknToo+RWD+U+6ZM5kkNfuxywSDPd08=
+github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0=
+github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I=
+github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
+github.com/ProtonMail/go-crypto v0.0.0-20220824120805-4b6e5c587895/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
+github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
@@ -94,14 +161,47 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
+github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE=
+github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
+github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/ashanbrown/forbidigo v1.2.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI=
+github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
+github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
+github.com/aws/aws-sdk-go v1.44.102/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/aws/aws-sdk-go-v2 v1.16.15/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k=
+github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k=
+github.com/aws/aws-sdk-go-v2/config v1.17.7/go.mod h1:dN2gja/QXxFF15hQreyrqYhLBaQo1d9ZKe/v/uplQoI=
+github.com/aws/aws-sdk-go-v2/credentials v1.12.20/go.mod h1:UKY5HyIux08bbNA7Blv4PcXQ8cTkGh7ghHMFklaviR4=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17/go.mod h1:yIkQcCDYNsZfXpd5UX2Cy+sWA1jPgIhGTw9cOBzfVnQ=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.22/go.mod h1:/vNv5Al0bpiF8YdX2Ov6Xy05VTiXsql94yUqJMYaj0w=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.16/go.mod h1:62dsXI0BqTIGomDl8Hpm33dv0OntGaVblri3ZRParVQ=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24/go.mod h1:jULHjqqjDlbyTa7pfM7WICATnOv+iOhjletM3N0Xbu8=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17/go.mod h1:4nYOrY41Lrbk2170/BGkcJKBhws9Pfn8MG3aGqjjeFI=
+github.com/aws/aws-sdk-go-v2/service/kms v1.18.10/go.mod h1:45pB2oUV71tilooilIi3dC1KVWWJHHhc7JnyqByuheo=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.23/go.mod h1:/w0eg9IhFGjGyyncHIQrXtU8wvNsTJOP0R6PPj0wf80=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.5/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA=
+github.com/aws/aws-sdk-go-v2/service/sts v1.16.19/go.mod h1:h4J3oPZQbxLhzGnk+j9dfYHi5qIOVJ5kczZd658/ydM=
+github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
+github.com/beeker1121/goque v1.0.3-0.20191103205551-d618510128af/go.mod h1:84CWnaDz4g1tEVnFLnuBigmGK15oPohy0RfvSN8d4eg=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -109,41 +209,68 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
-github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA=
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
+github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blizzy78/varnamelen v0.3.0/go.mod h1:hbwRdBvoBqxk34XyQ6HA0UH3G0/1TKuv5AC4eaBT0Ec=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc=
+github.com/breml/bidichk v0.1.1/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso=
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
+github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
+github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
+github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
+github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg=
+github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
-github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
+github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
+github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
+github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=
+github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
+github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
@@ -157,13 +284,15 @@ github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1
github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
-github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ=
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
+github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
+github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
@@ -178,8 +307,9 @@ github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
-github.com/containerd/containerd v1.5.5 h1:q1gxsZsGZ8ddVe98yO6pR21b5xQSMiR61lD0W96pgQo=
github.com/containerd/containerd v1.5.5/go.mod h1:oSTh0QpT1w6jYcGmbiSbxv9OSQYaa88mPyWIuU79zyo=
+github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM=
+github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -208,13 +338,16 @@ github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJ
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/stargz-snapshotter/estargz v0.8.0 h1:oA1wx8kTFfImfsT5bScbrZd8gK+WtQnn15q82Djvm0Y=
+github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
github.com/containerd/stargz-snapshotter/estargz v0.8.0/go.mod h1:mwIwuwb+D8FX2t45Trwi0hmWmZm5VW7zPP/rekwhWQU=
+github.com/containerd/stargz-snapshotter/estargz v0.12.0 h1:idtwRTLjk2erqiYhPWy2L844By8NRFYEwYHcXhoIWPM=
+github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
@@ -232,74 +365,101 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
github.com/containers/common v0.44.0 h1:YpjfOxmWrnVyxugYgiWV1Vo/Xg8JUfe32QZz3SAMfUk=
github.com/containers/common v0.44.0/go.mod h1:7sdP4vmI5Bm6FPFxb3lvAh1Iktb6tiO1MzjUzhxdoGo=
-github.com/containers/image/v5 v5.16.0 h1:WQcNSzb7+ngS2cfynx0vUwhk+scpgiKlldVcsF8GPbI=
github.com/containers/image/v5 v5.16.0/go.mod h1:XgTpfAPLRGOd1XYyCU5cISFr777bLmOerCSpt/v7+Q4=
-github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
+github.com/containers/image/v5 v5.23.1 h1:dUK9p5xfd38iTQXJEgMsUGqIHXbkDugrleZtd3zB+Wg=
+github.com/containers/image/v5 v5.23.1/go.mod h1:EXFFGEsL99S6aqLqK2mQJ3yrNh6Q05UCHt4mhF9JNoM=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
+github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
+github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
-github.com/containers/ocicrypt v1.1.2 h1:Ez+GAMP/4GLix5Ywo/fL7O0nY771gsBIigiqUm1aXz0=
github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/containers/ocicrypt v1.1.5 h1:UO+gBnBXvMvC7HTXLh0bPgLslfW8HlY+oxYcoSHBcZQ=
+github.com/containers/ocicrypt v1.1.5/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc=
github.com/containers/storage v1.35.0/go.mod h1:qzYhasQP2/V9D9XdO+vRwkHBhsBO0oznMLzzRDQ8s20=
-github.com/containers/storage v1.36.0 h1:OelxllCW19tnNngYuZw2ty/zLabVMG5rSs3KSwO1Lzc=
github.com/containers/storage v1.36.0/go.mod h1:vbd3SKVQNHdmU5qQI6hTEcKPxnZkGqydG4f6uwrI5a8=
+github.com/containers/storage v1.43.0 h1:P+zulGXA3mqe2GnYmZU0xu87Wy1M0PVHM2ucrgmvTdU=
+github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
+github.com/coreos/go-oidc/v3 v3.4.0/go.mod h1:eHUXhZtXPQLgEaDrOVTgwbgmz1xGOkJNye6h3zkD2Pw=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI=
+github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
+github.com/daixiang0/gci v0.2.9/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc=
github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg=
+github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0=
+github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA=
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8=
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
+github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v20.10.8+incompatible h1:RVqD337BgQicVCzYrrlhLDWhq6OAD2PJDUg2LsEUvKM=
+github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
+github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
+github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc=
+github.com/docker/docker v20.10.18+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
+github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
+github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
-github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
-github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 h1:moehPjPiGUaWdwgOl92xRyFHJyaqXDHcCyW9M6nmCK4=
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
@@ -309,6 +469,7 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/eggsampler/acme/v3 v3.3.0/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
@@ -318,45 +479,168 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
+github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/esimonov/ifshort v1.0.3/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE=
+github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
+github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
+github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA=
+github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0=
+github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
+github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg=
+github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01/go.mod h1:ypD5nozFk9vcGw1ATYefw6jHe/jZP++Z15/+VTMcWhc=
+github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8=
+github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52/go.mod h1:yIquW87NGRw1FU5p5lEkpnt/QxoH5uPAOUlOVkAUuMg=
+github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=
+github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
+github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk=
+github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
+github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
+github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
+github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
+github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
+github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
+github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsouza/go-dockerclient v1.7.4 h1:daYb0km2a91aNt2KTc4AEcTwgExYtQXHhkt5mjdRD1o=
github.com/fsouza/go-dockerclient v1.7.4/go.mod h1:het+LPt7NaTEVGgwXJAKxPn77RZrQKb2EXJb4e+BHv0=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
+github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM=
+github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E=
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.7.1/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY=
+github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U=
+github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
+github.com/go-critic/go-critic v0.6.1/go.mod h1:SdNCfU0yF3UBjtaZGw6586/WocupMOJuiqgom5DsQxM=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
+github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
+github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
+github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
+github.com/go-rod/rod v0.109.3/go.mod h1:GZDtmEs6RpF6kBRYpGCZXxXlKNneKVPiKOjaMbmVVjE=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
+github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
+github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
+github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
+github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
+github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
+github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw=
+github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw=
+github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU=
+github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI=
+github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc=
+github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8=
+github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
+github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
+github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
+github.com/gobuffalo/attrs v0.1.0/go.mod h1:fmNpaWyHM0tRm8gCZWKx8yY9fvaNLo2PyzBNSrBZ5Hw=
+github.com/gobuffalo/attrs v1.0.1/go.mod h1:qGdnq2RukKtBl4ASJit0OFckc5XGSyTFk98SvRpMFrQ=
+github.com/gobuffalo/envy v1.8.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w=
+github.com/gobuffalo/envy v1.9.0/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w=
+github.com/gobuffalo/envy v1.10.1/go.mod h1:AWx4++KnNOW3JOeEvhSaq+mvgAvnMYOY1XSIin4Mago=
+github.com/gobuffalo/fizz v1.10.0/go.mod h1:J2XGPO0AfJ1zKw7+2BA+6FEGAkyEsdCOLvN93WCT2WI=
+github.com/gobuffalo/fizz v1.14.0/go.mod h1:0aF1kAZYCfKqbLM/lmZ3jXFyqqWE/kY/nIOKnNdAYXQ=
+github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
+github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
+github.com/gobuffalo/flect v0.2.1/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc=
+github.com/gobuffalo/flect v0.2.4/go.mod h1:1ZyCLIbg0YD7sDkzvFdPoOydPtD8y9JQnrOROolUcM8=
+github.com/gobuffalo/flect v0.2.5/go.mod h1:1ZyCLIbg0YD7sDkzvFdPoOydPtD8y9JQnrOROolUcM8=
+github.com/gobuffalo/genny/v2 v2.0.5/go.mod h1:kRkJuAw9mdI37AiEYjV4Dl+TgkBDYf8HZVjLkqe5eBg=
+github.com/gobuffalo/genny/v2 v2.0.8/go.mod h1:R45scCyQfff2HysNJHNanjrpvPw4Qu+rM1MOMDBB5oU=
+github.com/gobuffalo/genny/v2 v2.0.9/go.mod h1:R45scCyQfff2HysNJHNanjrpvPw4Qu+rM1MOMDBB5oU=
+github.com/gobuffalo/github_flavored_markdown v1.1.0/go.mod h1:TSpTKWcRTI0+v7W3x8dkSKMLJSUpuVitlptCkpeY8ic=
+github.com/gobuffalo/github_flavored_markdown v1.1.1/go.mod h1:yU32Pen+eorS58oxh/bNZx76zUOCJwmvyV5FBrvzOKQ=
+github.com/gobuffalo/helpers v0.6.0/go.mod h1:pncVrer7x/KRvnL5aJABLAuT/RhKRR9klL6dkUOhyv8=
+github.com/gobuffalo/helpers v0.6.1/go.mod h1:wInbDi0vTJKZBviURTLRMFLE4+nF2uRuuL2fnlYo7w4=
+github.com/gobuffalo/helpers v0.6.4/go.mod h1:m2aOKsTl3KB0RUwwpxf3tykaaitujQ3irivqrlNAcJ0=
+github.com/gobuffalo/logger v1.0.3/go.mod h1:SoeejUwldiS7ZsyCBphOGURmWdwUFXs0J7TCjEhjKxM=
+github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs=
+github.com/gobuffalo/nulls v0.2.0/go.mod h1:w4q8RoSCEt87Q0K0sRIZWYeIxkxog5mh3eN3C/n+dUc=
+github.com/gobuffalo/nulls v0.4.1/go.mod h1:pp8e1hWTRJZFpMl4fj/CVbSMlaxjeGKkFq4RuBZi3w8=
+github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q=
+github.com/gobuffalo/packd v1.0.0/go.mod h1:6VTc4htmJRFB7u1m/4LeMTWjFoYrUiBkU9Fdec9hrhI=
+github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY=
+github.com/gobuffalo/packr/v2 v2.8.0/go.mod h1:PDk2k3vGevNE3SwVyVRgQCCXETC9SaONCNSXT1Q8M1g=
+github.com/gobuffalo/plush/v4 v4.0.0/go.mod h1:ErFS3UxKqEb8fpFJT7lYErfN/Nw6vHGiDMTjxpk5bQ0=
+github.com/gobuffalo/plush/v4 v4.1.9/go.mod h1:9OOII9uAM5pZnhWu1OkQnboXJjaWMQ7kcTl3zNcxvTM=
+github.com/gobuffalo/plush/v4 v4.1.11/go.mod h1:9OOII9uAM5pZnhWu1OkQnboXJjaWMQ7kcTl3zNcxvTM=
+github.com/gobuffalo/pop/v5 v5.3.1/go.mod h1:vcEDhh6cJ3WVENqJDFt/6z7zNb7lLnlN8vj3n5G9rYA=
+github.com/gobuffalo/pop/v6 v6.0.0/go.mod h1:5rd3OnViLhjteR8+0i/mT9Q4CzkTzCoR7tm/9mmAic4=
+github.com/gobuffalo/pop/v6 v6.0.4/go.mod h1:dFcrMNPOwk+sl1Oa0lOb/jGbmjv+JV+5CZjMWNYR3KI=
+github.com/gobuffalo/tags/v3 v3.0.2/go.mod h1:ZQeN6TCTiwAFnS0dNcbDtSgZDwNKSpqajvVtt6mlYpA=
+github.com/gobuffalo/tags/v3 v3.1.0/go.mod h1:ZQeN6TCTiwAFnS0dNcbDtSgZDwNKSpqajvVtt6mlYpA=
+github.com/gobuffalo/tags/v3 v3.1.2/go.mod h1:o3ldUfKv50jxWAC8eZHXMm8dnKW3YvyZUMr0xqUcZTI=
+github.com/gobuffalo/validate/v3 v3.0.0/go.mod h1:HFpjq+AIiA2RHoQnQVTFKF/ZpUPXwyw82LgyDPxQ9r0=
+github.com/gobuffalo/validate/v3 v3.1.0/go.mod h1:HFpjq+AIiA2RHoQnQVTFKF/ZpUPXwyw82LgyDPxQ9r0=
+github.com/gobuffalo/validate/v3 v3.3.1/go.mod h1:Ehu8ieNJQuUM4peDDr/0VapzdGA7RgTc3wbe51vHfS0=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v4.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -366,13 +650,18 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
+github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@@ -381,6 +670,8 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -399,9 +690,24 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
+github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
+github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8=
+github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
+github.com/golangci/golangci-lint v1.43.0/go.mod h1:VIFlUqidx5ggxDfQagdvd9E67UjMXtTHBkBQ7sHoC5Q=
+github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
+github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
+github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA=
+github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY=
+github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
+github.com/google/certificate-transparency-go v1.0.22-0.20181127102053-c25855a82c75/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
+github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -413,115 +719,305 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
+github.com/google/go-containerregistry v0.11.0 h1:Xt8x1adcREjFcmDoDK8OdOsjxu90PHkGuwNP8GiHMLM=
+github.com/google/go-containerregistry v0.11.0/go.mod h1:BBaYtsHPHA42uEgAvd/NejvAfPSlz281sJWqupjSxfk=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw=
+github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
+github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
+github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
+github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
+github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw=
+github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA=
+github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c=
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
+github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
+github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw=
+github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0=
+github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc=
+github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI=
+github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado=
+github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM=
+github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak=
+github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A=
+github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M=
+github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
+github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ=
+github.com/hashicorp/go-plugin v1.4.4/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
+github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw=
+github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I=
+github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
+github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
+github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
+github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E=
+github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc=
+github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
+github.com/hashicorp/yamux v0.1.0/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
+github.com/honeycombio/beeline-go v1.1.1/go.mod h1:kN0cfUGBMfA87DyCYbiiLoSzWsnw3bluZvNEWtatHxk=
+github.com/honeycombio/beeline-go v1.9.0 h1:MM/20HBFE2AKno0N9bOgEicSHHiTkTklGdtjdtAEWbk=
+github.com/honeycombio/beeline-go v1.9.0/go.mod h1:/8gmL2gGFbAnIhfldNUj26AFi7+JTdjtTfIujJww6yI=
+github.com/honeycombio/libhoney-go v1.15.2/go.mod h1:JzhRPYgoBCd0rZvudrqmej4Ntx0w7AT3wAJpf5+t1WA=
+github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA=
+github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
+github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
+github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee h1:PAXLXk1heNZ5yokbMBpVLZQxo43wCZxRwl00mX+dd44=
github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
+github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
+github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
+github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
+github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
+github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
+github.com/jackc/pgconn v1.6.0/go.mod h1:yeseQo4xhQbgyJs2c87RAXOH2i624N0Fh1KSPJya7qo=
+github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
+github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
+github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
+github.com/jackc/pgconn v1.10.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
+github.com/jackc/pgconn v1.10.1/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
+github.com/jackc/pgconn v1.12.1/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono=
+github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
+github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
+github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=
+github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=
+github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.3.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
+github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
+github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
+github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
+github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
+github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik=
+github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
+github.com/jackc/pgtype v1.8.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
+github.com/jackc/pgtype v1.11.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
+github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
+github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
+github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
+github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
+github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oAlxAg=
+github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
+github.com/jackc/pgx/v4 v4.13.0/go.mod h1:9P4X524sErlaxj0XSGZk7s+LD0eOyu1ZDUrrpznYDF0=
+github.com/jackc/pgx/v4 v4.16.1/go.mod h1:SIhx0D5hoADaiXZVyv+3gSm3LCIIINTVO0PficsvWGQ=
+github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jellydator/ttlcache/v2 v2.11.1/go.mod h1:RtE5Snf0/57e+2cLWFYWCCsLas2Hy3c5Z4n14XmSvTI=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
+github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI=
+github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI=
+github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ=
+github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
+github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4=
+github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E=
+github.com/jhump/protoreflect v1.12.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI=
+github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
github.com/jinzhu/copier v0.3.2 h1:QdBOCbaouLDYaIPFfi1bKv5F5tPpeTwXe4sD0jqtz5w=
github.com/jinzhu/copier v0.3.2/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro=
+github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4=
+github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo=
+github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
+github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
+github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
+github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/josharian/txtarfs v0.0.0-20210218200122-0702f000015a/go.mod h1:izVPOvVRsHiKkeGCT6tYBNWyDVuzj9wAaBb5R9qamfw=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a h1:FaWFmfWdAUKbSCtOU2QjDaorUexogfaMgbipgYATUMU=
github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU=
+github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0=
+github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
+github.com/karrick/godirwalk v1.15.3/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
+github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c=
+github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -536,96 +1032,220 @@ github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a h1:weJVJJRzAJBFRlAiJQROKQs8oC9vOxvm4rZmBBk0ONw=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U=
+github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg=
+github.com/labstack/echo/v4 v4.3.0/go.mod h1:PvmtTvhVqKDzDQy4d3bWzPjZLzom4iQbAZy2sgZ/qI8=
+github.com/labstack/echo/v4 v4.7.2/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks=
+github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
+github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
+github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0=
+github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88=
+github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/letsencrypt/boulder v0.0.0-20220723181115-27de4befb95e h1:2ba+yBBeT8ZFyZjRLPDKvkqVrWX4CCYAuR6nuJGojD0=
+github.com/letsencrypt/boulder v0.0.0-20220723181115-27de4befb95e/go.mod h1:54WQpg5QI0mpRhxoj9bxysLqA5WJylVsLtXOrb3zAiU=
+github.com/letsencrypt/challtestsrv v1.2.1/go.mod h1:Ur4e4FvELUXLGhkMztHOsPIsvGxD/kzSJninOrkM+zc=
+github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
+github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
+github.com/luna-duclos/instrumentedsql v1.1.3/go.mod h1:9J1njvFds+zN7y85EDhN9XNQLANWwZt2ULeIC8yMNYs=
github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/manifoldco/promptui v0.8.0 h1:R95mMF+McvXZQ7j1g8ucVZE1gLP3Sv6j9vlF9kyRqQo=
github.com/manifoldco/promptui v0.8.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ=
+github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
+github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
+github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU=
+github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc=
+github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI=
+github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
-github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
+github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
+github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
+github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
+github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc=
+github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg=
+github.com/mgechev/revive v1.1.2/go.mod h1:bnXsMr+ZTH09V5rssEI+jHAZ4z+ZdyhgO/zsy3EhK+0=
+github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
+github.com/microcosm-cc/bluemonday v1.0.16/go.mod h1:Z0r70sCuXHig8YpBzCc5eGHAap2K7e/u082ZUpDRRqM=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw=
+github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
+github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
+github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
+github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
+github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
+github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/mistifyio/go-zfs/v3 v3.0.0 h1:J5QK618xRcXnQYZ2GE5FdmpS1ufIrWue+lR/mpe6/14=
+github.com/mistifyio/go-zfs/v3 v3.0.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
+github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
+github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
+github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
+github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM=
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
+github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
+github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
+github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
+github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8=
+github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s=
+github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
-github.com/mtrmac/gpgme v0.1.2 h1:dNOmvYmsrakgW7LcgiprD0yfRuQQe8/C8F6Z+zogO3s=
github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo=
+github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE=
+github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nishanths/exhaustive v0.2.3/go.mod h1:bhIX678Nx8inLM9PbpvK1yv6oGtoP8BfaIeMzgBNKvc=
+github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ=
+github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ=
+github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
+github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
+github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -634,16 +1254,20 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283 h1:TVzvdjOalkJBNkbpPVMAr4KV9QRf2IjfxdyxwAK78Gs=
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198/go.mod h1:j4h1pJW6ZcJTgMZWP3+7RlG3zTaP02aDZ/Qw0sppK7Q=
+github.com/opencontainers/image-spec v1.1.0-rc1 h1:lfG+OTa7V8PD3PKvkocSG9KAcA9MANqJn53m31Fvwkc=
+github.com/opencontainers/image-spec v1.1.0-rc1/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
-github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg=
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
+github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg=
+github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -658,34 +1282,62 @@ github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqi
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
github.com/opencontainers/selinux v1.8.4/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo=
-github.com/opencontainers/selinux v1.8.5 h1:OkT6bMHOQ1JQQO4ihjQ49sj0+wciDcjziSVTRn8VeTA=
github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo=
+github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
+github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
+github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4=
+github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ=
github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656 h1:WaxyNFpmIDu4i6so9r6LVFIbSaXqsj8oitMitt86ae4=
github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
-github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
+github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
+github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
+github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
+github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
+github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
+github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
+github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
+github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/polyfloyd/go-errorlint v0.0.0-20210722154253-910bb7978349/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
+github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
+github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
+github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
+github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -697,8 +1349,12 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
+github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -709,40 +1365,98 @@ github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
+github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA=
+github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q=
+github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
+github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30=
+github.com/quasilyte/go-ruleguard v0.3.13/go.mod h1:Ul8wwdqR6kBVOCt2dipDBkE+T6vAV/iixkrKuRTN1oQ=
+github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
+github.com/quasilyte/go-ruleguard/dsl v0.3.10/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
+github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc=
+github.com/quasilyte/go-ruleguard/rules v0.0.0-20210428214800-545e0d2e0bf7/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50=
+github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
+github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
+github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
+github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg=
+github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE=
+github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
+github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
-github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf h1:b0+ZBD3rohnkQ4q5duD1+RyTXTg9yk+qTOPMSQtapO0=
github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
+github.com/securego/gosec/v2 v2.9.1/go.mod h1:oDcDLcatOJxkCGaCaq8lua1jTnYf6Sou4wdiJ1n4iHc=
+github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
+github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
+github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
+github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sigstore/sigstore v1.4.2 h1:fTppzuZBAmQ/skgl7FWJRLyby70pxCqJGKyWfkSuMR8=
+github.com/sigstore/sigstore v1.4.2/go.mod h1:wCv58Fia7u1snVJyPcxdgIh/3uw1XdOLhxPExTwwyt4=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0=
+github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI=
+github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
+github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
+github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
+github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
+github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=
+github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@@ -751,44 +1465,89 @@ github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
+github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4=
+github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
+github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/sylabs/sif/v2 v2.8.0 h1:FIfWA1fYSFynKD1LJwGbWJ2ib8ylT8XwZl9naLlciPE=
+github.com/sylabs/sif/v2 v2.8.0/go.mod h1:LQOdYXC9a8i7BleTKRw9lohi0rTbXkJOeS9u0ebvgyM=
+github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
+github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM=
+github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
+github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
+github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8=
+github.com/theupdateframework/go-tuf v0.5.1-0.20220920170306-f237d7ca5b42/go.mod h1:vAqWV3zEs89byeFsAYoh/Q14vJTgJkHwnnRCWBBBINY=
+github.com/theupdateframework/go-tuf v0.5.1 h1:kd8in4+boNxtYf8AYPbm9//Ps75RG5yURkN9Fng4Qs4=
+github.com/theupdateframework/go-tuf v0.5.1/go.mod h1:vAqWV3zEs89byeFsAYoh/Q14vJTgJkHwnnRCWBBBINY=
+github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
+github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
+github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
+github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs=
+github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tomarrell/wrapcheck/v2 v2.4.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY=
+github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
+github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
+github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA=
+github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
+github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
+github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
+github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
+github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
+github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
-github.com/vbauerster/mpb/v7 v7.1.3 h1:VJkiLuuBs/re5SCHLVkYOPYAs+1jagk5QIDHgAXLVVA=
github.com/vbauerster/mpb/v7 v7.1.3/go.mod h1:X5GlohZw2fIpypMXWaKart+HGSAjpz49skxkDk+ZL7c=
+github.com/vbauerster/mpb/v7 v7.5.3 h1:BkGfmb6nMrrBQDFECR/Q7RkKCw7ylMetCb4079CGs4w=
+github.com/vbauerster/mpb/v7 v7.5.3/go.mod h1:i+h4QY6lmLvBNK2ah1fSreiw3ajskRlBp9AhY/PnuOE=
+github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA=
@@ -797,37 +1556,70 @@ github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmF
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4=
+github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
+github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
+github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY=
+github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
+github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
+github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
+github.com/weppos/publicsuffix-go v0.15.1-0.20210807195340-dc689ff0bb59/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
+github.com/weppos/publicsuffix-go v0.15.1-0.20220413065649-906f534b73a4/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA=
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc=
+github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18=
+github.com/ysmood/got v0.31.3/go.mod h1:pE1l4LOwOBhQg6A/8IAatkGp7uZjnalzrZolnlhhMgY=
+github.com/ysmood/gotrace v0.6.0/go.mod h1:TzhIG7nHDry5//eYZDYcTzuJLYQIkykJzCRIo4/dzQM=
+github.com/ysmood/gson v0.7.1/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg=
+github.com/ysmood/gson v0.7.2/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg=
+github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
+github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
+github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
+github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
+github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
+github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
+github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE=
+github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is=
+github.com/zmap/zcrypto v0.0.0-20210811211718-6f9bc4aff20f/go.mod h1:y/9hjFEub4DtQxTHp/pqticBgdYeCwL97vojV3lsvHY=
+github.com/zmap/zlint/v3 v3.3.1-0.20211019173530-cb17369b4628/go.mod h1:O+4OXRfNLKqOyDl4eKZ1SBlYudKGUBGRFcv+m1KLr28=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
-go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
+go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
+go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@@ -836,29 +1628,66 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/contrib/propagators v0.19.0/go.mod h1:4QOdZClXISU5S43xZxk5tYaWcpb+lehqfKtE6PK6msE=
+go.opentelemetry.io/otel v0.19.0/go.mod h1:j9bF567N9EfomkSidSfmMwIwIBuP37AMAIzVW85OxSg=
+go.opentelemetry.io/otel/metric v0.19.0/go.mod h1:8f9fglJPRnXuskQmKpnad31lcLJ2VmNNqIsx/uIwBSc=
+go.opentelemetry.io/otel/oteltest v0.19.0/go.mod h1:tI4yxwh8U21v7JD6R3BcA/2+RBoTKFexE/PJ/nSO7IA=
+go.opentelemetry.io/otel/trace v0.19.0/go.mod h1:4IXiNextNOpPnRlI4ryK69mn5iC84bjBWZQA5DXz/qg=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+goji.io/v3 v3.0.0/go.mod h1:c02FFnNiVNCDo+DpR2IhBQpM9r5G1BG/MkHNTPUJ13U=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI=
+golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
+golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 h1:a5Yg6ylndHHYJqIPrdq0AhvR6KTvDTAvgBtaidhEevY=
+golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -869,6 +1698,7 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -894,6 +1724,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -917,6 +1750,8 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -925,6 +1760,7 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
@@ -943,8 +1779,28 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI=
+golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -957,18 +1813,33 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220718184931-c8730f7fcb92/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -976,8 +1847,11 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -989,15 +1863,21 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1005,6 +1885,7 @@ golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1015,12 +1896,14 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1041,6 +1924,8 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1050,14 +1935,58 @@ golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55 h1:rw6UNGRMfarCepjI8qOepea/SXwIBVfTKjztZ5gBbq4=
+golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc=
+golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
+golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1065,24 +1994,37 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
+golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 h1:ftMN5LMiBFjbzleLqtoBZk7KdJwhuybIU+FckUHgoyQ=
+golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
@@ -1090,18 +2032,30 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@@ -1110,35 +2064,79 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200308013534-11ec41452d41/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
+golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
+golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
+golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
+golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
+golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
+golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
+golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
@@ -1157,15 +2155,39 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
+google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
+google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
+google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
+google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
+google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
+google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
+google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
+google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
+google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
+google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
+google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -1174,6 +2196,7 @@ google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dT
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
@@ -1188,12 +2211,16 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@@ -1207,10 +2234,52 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220720214146-176da50484ac h1:EOa+Yrhx1C0O+4pHeXeWrCwdI0tWI6IfUU56Vebs9wQ=
+google.golang.org/genproto v0.0.0-20220720214146-176da50484ac/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -1223,6 +2292,7 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
@@ -1233,8 +2303,22 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
+google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
+google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1246,42 +2330,58 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc=
+gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
+gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
@@ -1294,6 +2394,7 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY=
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
@@ -1306,6 +2407,7 @@ k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
+k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
@@ -1314,18 +2416,27 @@ k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48=
+mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
+mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
+mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore
index cd11be96530..fe79e3adda2 100644
--- a/vendor/github.com/BurntSushi/toml/.gitignore
+++ b/vendor/github.com/BurntSushi/toml/.gitignore
@@ -1,2 +1,2 @@
-toml.test
+/toml.test
/toml-test
diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE
deleted file mode 100644
index f621b01196c..00000000000
--- a/vendor/github.com/BurntSushi/toml/COMPATIBLE
+++ /dev/null
@@ -1 +0,0 @@
-Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
index 64410cf7556..3651cfa9609 100644
--- a/vendor/github.com/BurntSushi/toml/README.md
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -1,10 +1,5 @@
-## TOML parser and encoder for Go with reflection
-
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
-reflection interface similar to Go's standard library `json` and `xml`
-packages. This package also supports the `encoding.TextUnmarshaler` and
-`encoding.TextMarshaler` interfaces so that you can define custom data
-representations. (There is an example of this below.)
+reflection interface similar to Go's standard library `json` and `xml` packages.
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
@@ -14,28 +9,18 @@ See the [releases page](https://github.com/BurntSushi/toml/releases) for a
changelog; this information is also in the git tag annotations (e.g. `git show
v0.4.0`).
-This library requires Go 1.13 or newer; install it with:
+This library requires Go 1.13 or newer; add it to your go.mod with:
- $ go get github.com/BurntSushi/toml
+ % go get github.com/BurntSushi/toml@latest
It also comes with a TOML validator CLI tool:
- $ go get github.com/BurntSushi/toml/cmd/tomlv
- $ tomlv some-toml-file.toml
-
-### Testing
-
-This package passes all tests in
-[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
-and the encoder.
+ % go install github.com/BurntSushi/toml/cmd/tomlv@latest
+ % tomlv some-toml-file.toml
### Examples
-
-This package works similarly to how the Go standard library handles XML and
-JSON. Namely, data is loaded into Go values via reflection.
-
-For the simplest example, consider some TOML file as just a list of keys
-and values:
+For the simplest example, consider some TOML file as just a list of keys and
+values:
```toml
Age = 25
@@ -45,7 +30,7 @@ Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z
```
-Which could be defined in Go as:
+Which can be decoded with:
```go
type Config struct {
@@ -53,21 +38,15 @@ type Config struct {
Cats []string
Pi float64
Perfection []int
- DOB time.Time // requires `import time`
+ DOB time.Time
}
-```
-And then decoded with:
-
-```go
var conf Config
-if _, err := toml.Decode(tomlData, &conf); err != nil {
- // handle error
-}
+_, err := toml.Decode(tomlData, &conf)
```
-You can also use struct tags if your struct field name doesn't map to a TOML
-key value directly:
+You can also use struct tags if your struct field name doesn't map to a TOML key
+value directly:
```toml
some_key_NAME = "wat"
@@ -75,146 +54,67 @@ some_key_NAME = "wat"
```go
type TOML struct {
- ObscureKey string `toml:"some_key_NAME"`
+ ObscureKey string `toml:"some_key_NAME"`
}
```
-Beware that like other most other decoders **only exported fields** are
-considered when encoding and decoding; private fields are silently ignored.
-
-### Using the `encoding.TextUnmarshaler` interface
+Beware that like other decoders **only exported fields** are considered when
+encoding and decoding; private fields are silently ignored.
-Here's an example that automatically parses duration strings into
-`time.Duration` values:
+### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces
+Here's an example that automatically parses values in a `mail.Address`:
```toml
-[[song]]
-name = "Thunder Road"
-duration = "4m49s"
-
-[[song]]
-name = "Stairway to Heaven"
-duration = "8m03s"
-```
-
-Which can be decoded with:
-
-```go
-type song struct {
- Name string
- Duration duration
-}
-type songs struct {
- Song []song
-}
-var favorites songs
-if _, err := toml.Decode(blob, &favorites); err != nil {
- log.Fatal(err)
-}
-
-for _, s := range favorites.Song {
- fmt.Printf("%s (%s)\n", s.Name, s.Duration)
-}
+contacts = [
+ "Donald Duck ",
+ "Scrooge McDuck ",
+]
```
-And you'll also need a `duration` type that satisfies the
-`encoding.TextUnmarshaler` interface:
+Can be decoded with:
```go
-type duration struct {
- time.Duration
+// Create address type which satisfies the encoding.TextUnmarshaler interface.
+type address struct {
+ *mail.Address
}
-func (d *duration) UnmarshalText(text []byte) error {
+func (a *address) UnmarshalText(text []byte) error {
var err error
- d.Duration, err = time.ParseDuration(string(text))
+ a.Address, err = mail.ParseAddress(string(text))
return err
}
+
+// Decode it.
+func decode() {
+ blob := `
+ contacts = [
+ "Donald Duck ",
+ "Scrooge McDuck ",
+ ]
+ `
+
+ var contacts struct {
+ Contacts []address
+ }
+
+ _, err := toml.Decode(blob, &contacts)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ for _, c := range contacts.Contacts {
+ fmt.Printf("%#v\n", c.Address)
+ }
+
+ // Output:
+ // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"}
+ // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"}
+}
```
To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
a similar way.
### More complex usage
-
-Here's an example of how to load the example from the official spec page:
-
-```toml
-# This is a TOML document. Boom.
-
-title = "TOML Example"
-
-[owner]
-name = "Tom Preston-Werner"
-organization = "GitHub"
-bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
-dob = 1979-05-27T07:32:00Z # First class dates? Why not?
-
-[database]
-server = "192.168.1.1"
-ports = [ 8001, 8001, 8002 ]
-connection_max = 5000
-enabled = true
-
-[servers]
-
- # You can indent as you please. Tabs or spaces. TOML don't care.
- [servers.alpha]
- ip = "10.0.0.1"
- dc = "eqdc10"
-
- [servers.beta]
- ip = "10.0.0.2"
- dc = "eqdc10"
-
-[clients]
-data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
-
-# Line breaks are OK when inside arrays
-hosts = [
- "alpha",
- "omega"
-]
-```
-
-And the corresponding Go types are:
-
-```go
-type tomlConfig struct {
- Title string
- Owner ownerInfo
- DB database `toml:"database"`
- Servers map[string]server
- Clients clients
-}
-
-type ownerInfo struct {
- Name string
- Org string `toml:"organization"`
- Bio string
- DOB time.Time
-}
-
-type database struct {
- Server string
- Ports []int
- ConnMax int `toml:"connection_max"`
- Enabled bool
-}
-
-type server struct {
- IP string
- DC string
-}
-
-type clients struct {
- Data [][]interface{}
- Hosts []string
-}
-```
-
-Note that a case insensitive match will be tried if an exact match can't be
-found.
-
-A working example of the above can be found in `_examples/example.{go,toml}`.
-
+See the [`_example/`](/_example) directory for a more complex example.
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
index d3d3b8397ca..09523315b83 100644
--- a/vendor/github.com/BurntSushi/toml/decode.go
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -1,13 +1,16 @@
package toml
import (
+ "bytes"
"encoding"
+ "encoding/json"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"reflect"
+ "strconv"
"strings"
"time"
)
@@ -18,12 +21,30 @@ type Unmarshaler interface {
UnmarshalTOML(interface{}) error
}
-// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
-func Unmarshal(p []byte, v interface{}) error {
- _, err := Decode(string(p), v)
+// Unmarshal decodes the contents of `data` in TOML format into a pointer `v`.
+func Unmarshal(data []byte, v interface{}) error {
+ _, err := NewDecoder(bytes.NewReader(data)).Decode(v)
return err
}
+// Decode the TOML data in to the pointer v.
+//
+// See the documentation on Decoder for a description of the decoding process.
+func Decode(data string, v interface{}) (MetaData, error) {
+ return NewDecoder(strings.NewReader(data)).Decode(v)
+}
+
+// DecodeFile is just like Decode, except it will automatically read the
+// contents of the file at path and decode it for you.
+func DecodeFile(path string, v interface{}) (MetaData, error) {
+ fp, err := os.Open(path)
+ if err != nil {
+ return MetaData{}, err
+ }
+ defer fp.Close()
+ return NewDecoder(fp).Decode(v)
+}
+
// Primitive is a TOML value that hasn't been decoded into a Go value.
//
// This type can be used for any value, which will cause decoding to be delayed.
@@ -40,22 +61,12 @@ type Primitive struct {
context Key
}
-// PrimitiveDecode is just like the other `Decode*` functions, except it
-// decodes a TOML value that has already been parsed. Valid primitive values
-// can *only* be obtained from values filled by the decoder functions,
-// including this method. (i.e., `v` may contain more `Primitive`
-// values.)
-//
-// Meta data for primitive values is included in the meta data returned by
-// the `Decode*` functions with one exception: keys returned by the Undecoded
-// method will only reflect keys that were decoded. Namely, any keys hidden
-// behind a Primitive will be considered undecoded. Executing this method will
-// update the undecoded keys in the meta data. (See the example.)
-func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
- md.context = primValue.context
- defer func() { md.context = nil }()
- return md.unify(primValue.undecoded, rvalue(v))
-}
+// The significand precision for float32 and float64 is 24 and 53 bits; this is
+// the range a natural number can be stored in a float without loss of data.
+const (
+ maxSafeFloat32Int = 16777215 // 2^24-1
+ maxSafeFloat64Int = int64(9007199254740991) // 2^53-1
+)
// Decoder decodes TOML data.
//
@@ -67,6 +78,9 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
// in the local timezone.
//
+// time.Duration types are treated as nanoseconds if the TOML value is an
+// integer, or they're parsed with time.ParseDuration() if they're strings.
+//
// All other TOML types (float, string, int, bool and array) correspond to the
// obvious Go types.
//
@@ -74,7 +88,7 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
// interface, in which case any primitive TOML value (floats, strings, integers,
// booleans, datetimes) will be converted to a []byte and given to the value's
// UnmarshalText method. See the Unmarshaler example for a demonstration with
-// time duration strings.
+// email addresses.
//
// Key mapping
//
@@ -100,18 +114,39 @@ func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
+var (
+ unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+ unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+ primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem()
+)
+
// Decode TOML data in to the pointer `v`.
func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
- return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
+ s := "%q"
+ if reflect.TypeOf(v) == nil {
+ s = "%v"
+ }
+
+ return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v))
}
if rv.IsNil() {
- return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
+ return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v))
}
- // TODO: have parser should read from io.Reader? Or at the very least, make
- // it read from []byte rather than string
+ // Check if this is a supported type: struct, map, interface{}, or something
+ // that implements UnmarshalTOML or UnmarshalText.
+ rv = indirect(rv)
+ rt := rv.Type()
+ if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
+ !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) &&
+ !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) {
+ return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt)
+ }
+
+ // TODO: parser should read from io.Reader? Or at the very least, make it
+ // read from []byte rather than string
data, err := ioutil.ReadAll(dec.r)
if err != nil {
return MetaData{}, err
@@ -121,29 +156,33 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
if err != nil {
return MetaData{}, err
}
+
md := MetaData{
- p.mapping, p.types, p.ordered,
- make(map[string]bool, len(p.ordered)), nil,
- }
- return md, md.unify(p.mapping, indirect(rv))
+ mapping: p.mapping,
+ keyInfo: p.keyInfo,
+ keys: p.ordered,
+ decoded: make(map[string]struct{}, len(p.ordered)),
+ context: nil,
+ data: data,
+ }
+ return md, md.unify(p.mapping, rv)
}
-// Decode the TOML data in to the pointer v.
+// PrimitiveDecode is just like the other `Decode*` functions, except it
+// decodes a TOML value that has already been parsed. Valid primitive values
+// can *only* be obtained from values filled by the decoder functions,
+// including this method. (i.e., `v` may contain more `Primitive`
+// values.)
//
-// See the documentation on Decoder for a description of the decoding process.
-func Decode(data string, v interface{}) (MetaData, error) {
- return NewDecoder(strings.NewReader(data)).Decode(v)
-}
-
-// DecodeFile is just like Decode, except it will automatically read the
-// contents of the file at path and decode it for you.
-func DecodeFile(path string, v interface{}) (MetaData, error) {
- fp, err := os.Open(path)
- if err != nil {
- return MetaData{}, err
- }
- defer fp.Close()
- return NewDecoder(fp).Decode(v)
+// Meta data for primitive values is included in the meta data returned by
+// the `Decode*` functions with one exception: keys returned by the Undecoded
+// method will only reflect keys that were decoded. Namely, any keys hidden
+// behind a Primitive will be considered undecoded. Executing this method will
+// update the undecoded keys in the meta data. (See the example.)
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md.context = primValue.context
+ defer func() { md.context = nil }()
+ return md.unify(primValue.undecoded, rvalue(v))
}
// unify performs a sort of type unification based on the structure of `rv`,
@@ -154,7 +193,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
// Special case. Look for a `Primitive` value.
// TODO: #76 would make this superfluous after implemented.
- if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
+ if rv.Type() == primitiveType {
// Save the undecoded data and the key context into the primitive
// value.
context := make(Key, len(md.context))
@@ -166,17 +205,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
return nil
}
- // Special case. Unmarshaler Interface support.
- if rv.CanAddr() {
- if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
- return v.UnmarshalTOML(data)
- }
+ rvi := rv.Interface()
+ if v, ok := rvi.(Unmarshaler); ok {
+ return v.UnmarshalTOML(data)
}
-
- // Special case. Look for a value satisfying the TextUnmarshaler interface.
- if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
+ if v, ok := rvi.(encoding.TextUnmarshaler); ok {
return md.unifyText(data, v)
}
+
// TODO:
// The behavior here is incorrect whenever a Go type satisfies the
// encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
@@ -187,7 +223,6 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
k := rv.Kind()
- // laziness
if k >= reflect.Int && k <= reflect.Uint64 {
return md.unifyInt(data, rv)
}
@@ -213,17 +248,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
case reflect.Bool:
return md.unifyBool(data, rv)
case reflect.Interface:
- // we only support empty interfaces.
- if rv.NumMethod() > 0 {
- return e("unsupported type %s", rv.Type())
+ if rv.NumMethod() > 0 { // Only support empty interfaces are supported.
+ return md.e("unsupported type %s", rv.Type())
}
return md.unifyAnything(data, rv)
- case reflect.Float32:
- fallthrough
- case reflect.Float64:
+ case reflect.Float32, reflect.Float64:
return md.unifyFloat64(data, rv)
}
- return e("unsupported type %s", rv.Kind())
+ return md.e("unsupported type %s", rv.Kind())
}
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
@@ -232,7 +264,7 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
if mapping == nil {
return nil
}
- return e("type mismatch for %s: expected table but found %T",
+ return md.e("type mismatch for %s: expected table but found %T",
rv.Type().String(), mapping)
}
@@ -254,17 +286,18 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
for _, i := range f.index {
subv = indirect(subv.Field(i))
}
+
if isUnifiable(subv) {
- md.decoded[md.context.add(key).String()] = true
+ md.decoded[md.context.add(key).String()] = struct{}{}
md.context = append(md.context, key)
- if err := md.unify(datum, subv); err != nil {
+
+ err := md.unify(datum, subv)
+ if err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" {
- // Bad user! No soup for you!
- return e("cannot write unexported field %s.%s",
- rv.Type().String(), f.name)
+ return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
}
}
}
@@ -272,10 +305,10 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
}
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
- if k := rv.Type().Key().Kind(); k != reflect.String {
- return fmt.Errorf(
- "toml: cannot decode to a map with non-string key type (%s in %q)",
- k, rv.Type())
+ keyType := rv.Type().Key().Kind()
+ if keyType != reflect.String && keyType != reflect.Interface {
+ return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)",
+ keyType, rv.Type())
}
tmap, ok := mapping.(map[string]interface{})
@@ -283,23 +316,32 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
if tmap == nil {
return nil
}
- return badtype("map", mapping)
+ return md.badtype("map", mapping)
}
if rv.IsNil() {
rv.Set(reflect.MakeMap(rv.Type()))
}
for k, v := range tmap {
- md.decoded[md.context.add(k).String()] = true
+ md.decoded[md.context.add(k).String()] = struct{}{}
md.context = append(md.context, k)
- rvkey := indirect(reflect.New(rv.Type().Key()))
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
- if err := md.unify(v, rvval); err != nil {
+
+ err := md.unify(v, indirect(rvval))
+ if err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
- rvkey.SetString(k)
+ rvkey := indirect(reflect.New(rv.Type().Key()))
+
+ switch keyType {
+ case reflect.Interface:
+ rvkey.Set(reflect.ValueOf(k))
+ case reflect.String:
+ rvkey.SetString(k)
+ }
+
rv.SetMapIndex(rvkey, rvval)
}
return nil
@@ -311,10 +353,10 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
if !datav.IsValid() {
return nil
}
- return badtype("slice", data)
+ return md.badtype("slice", data)
}
if l := datav.Len(); l != rv.Len() {
- return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
+ return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l)
}
return md.unifySliceArray(datav, rv)
}
@@ -325,7 +367,7 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
if !datav.IsValid() {
return nil
}
- return badtype("slice", data)
+ return md.badtype("slice", data)
}
n := datav.Len()
if rv.IsNil() || rv.Cap() < n {
@@ -346,26 +388,35 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
return nil
}
-func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
- if _, ok := data.(time.Time); ok {
- rv.Set(reflect.ValueOf(data))
+func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+ _, ok := rv.Interface().(json.Number)
+ if ok {
+ if i, ok := data.(int64); ok {
+ rv.SetString(strconv.FormatInt(i, 10))
+ } else if f, ok := data.(float64); ok {
+ rv.SetString(strconv.FormatFloat(f, 'f', -1, 64))
+ } else {
+ return md.badtype("string", data)
+ }
return nil
}
- return badtype("time.Time", data)
-}
-func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
if s, ok := data.(string); ok {
rv.SetString(s)
return nil
}
- return badtype("string", data)
+ return md.badtype("string", data)
}
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+ rvk := rv.Kind()
+
if num, ok := data.(float64); ok {
- switch rv.Kind() {
+ switch rvk {
case reflect.Float32:
+ if num < -math.MaxFloat32 || num > math.MaxFloat32 {
+ return md.parseErr(errParseRange{i: num, size: rvk.String()})
+ }
fallthrough
case reflect.Float64:
rv.SetFloat(num)
@@ -374,54 +425,60 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
}
return nil
}
- return badtype("float", data)
+
+ if num, ok := data.(int64); ok {
+ if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) ||
+ (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) {
+ return md.parseErr(errParseRange{i: num, size: rvk.String()})
+ }
+ rv.SetFloat(float64(num))
+ return nil
+ }
+
+ return md.badtype("float", data)
}
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
- if num, ok := data.(int64); ok {
- if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
- switch rv.Kind() {
- case reflect.Int, reflect.Int64:
- // No bounds checking necessary.
- case reflect.Int8:
- if num < math.MinInt8 || num > math.MaxInt8 {
- return e("value %d is out of range for int8", num)
- }
- case reflect.Int16:
- if num < math.MinInt16 || num > math.MaxInt16 {
- return e("value %d is out of range for int16", num)
- }
- case reflect.Int32:
- if num < math.MinInt32 || num > math.MaxInt32 {
- return e("value %d is out of range for int32", num)
- }
- }
- rv.SetInt(num)
- } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
- unum := uint64(num)
- switch rv.Kind() {
- case reflect.Uint, reflect.Uint64:
- // No bounds checking necessary.
- case reflect.Uint8:
- if num < 0 || unum > math.MaxUint8 {
- return e("value %d is out of range for uint8", num)
- }
- case reflect.Uint16:
- if num < 0 || unum > math.MaxUint16 {
- return e("value %d is out of range for uint16", num)
- }
- case reflect.Uint32:
- if num < 0 || unum > math.MaxUint32 {
- return e("value %d is out of range for uint32", num)
- }
+ _, ok := rv.Interface().(time.Duration)
+ if ok {
+ // Parse as string duration, and fall back to regular integer parsing
+ // (as nanosecond) if this is not a string.
+ if s, ok := data.(string); ok {
+ dur, err := time.ParseDuration(s)
+ if err != nil {
+ return md.parseErr(errParseDuration{s})
}
- rv.SetUint(unum)
- } else {
- panic("unreachable")
+ rv.SetInt(int64(dur))
+ return nil
}
- return nil
}
- return badtype("integer", data)
+
+ num, ok := data.(int64)
+ if !ok {
+ return md.badtype("integer", data)
+ }
+
+ rvk := rv.Kind()
+ switch {
+ case rvk >= reflect.Int && rvk <= reflect.Int64:
+ if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) ||
+ (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) ||
+ (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) {
+ return md.parseErr(errParseRange{i: num, size: rvk.String()})
+ }
+ rv.SetInt(num)
+ case rvk >= reflect.Uint && rvk <= reflect.Uint64:
+ unum := uint64(num)
+ if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) ||
+ rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) ||
+ rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) {
+ return md.parseErr(errParseRange{i: num, size: rvk.String()})
+ }
+ rv.SetUint(unum)
+ default:
+ panic("unreachable")
+ }
+ return nil
}
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
@@ -429,7 +486,7 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
rv.SetBool(b)
return nil
}
- return badtype("boolean", data)
+ return md.badtype("boolean", data)
}
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
@@ -440,7 +497,13 @@ func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
- case TextMarshaler:
+ case Marshaler:
+ text, err := sdata.MarshalTOML()
+ if err != nil {
+ return err
+ }
+ s = string(text)
+ case encoding.TextMarshaler:
text, err := sdata.MarshalText()
if err != nil {
return err
@@ -457,7 +520,7 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
case float64:
s = fmt.Sprintf("%f", sdata)
default:
- return badtype("primitive (string-like)", data)
+ return md.badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
return err
@@ -465,22 +528,54 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
return nil
}
+func (md *MetaData) badtype(dst string, data interface{}) error {
+ return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst)
+}
+
+func (md *MetaData) parseErr(err error) error {
+ k := md.context.String()
+ return ParseError{
+ LastKey: k,
+ Position: md.keyInfo[k].pos,
+ Line: md.keyInfo[k].pos.Line,
+ err: err,
+ input: string(md.data),
+ }
+}
+
+func (md *MetaData) e(format string, args ...interface{}) error {
+ f := "toml: "
+ if len(md.context) > 0 {
+ f = fmt.Sprintf("toml: (last key %q): ", md.context)
+ p := md.keyInfo[md.context.String()].pos
+ if p.Line > 0 {
+ f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context)
+ }
+ }
+ return fmt.Errorf(f+format, args...)
+}
+
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
func rvalue(v interface{}) reflect.Value {
return indirect(reflect.ValueOf(v))
}
// indirect returns the value pointed to by a pointer.
-// Pointers are followed until the value is not a pointer.
-// New values are allocated for each nil pointer.
//
-// An exception to this rule is if the value satisfies an interface of
-// interest to us (like encoding.TextUnmarshaler).
+// Pointers are followed until the value is not a pointer. New values are
+// allocated for each nil pointer.
+//
+// An exception to this rule is if the value satisfies an interface of interest
+// to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
if v.CanSet() {
pv := v.Addr()
- if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
+ pvi := pv.Interface()
+ if _, ok := pvi.(encoding.TextUnmarshaler); ok {
+ return pv
+ }
+ if _, ok := pvi.(Unmarshaler); ok {
return pv
}
}
@@ -496,16 +591,12 @@ func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() {
return true
}
- if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
+ rvi := rv.Interface()
+ if _, ok := rvi.(encoding.TextUnmarshaler); ok {
+ return true
+ }
+ if _, ok := rvi.(Unmarshaler); ok {
return true
}
return false
}
-
-func e(format string, args ...interface{}) error {
- return fmt.Errorf("toml: "+format, args...)
-}
-
-func badtype(expected string, data interface{}) error {
- return e("cannot load TOML value of type %T into a Go %s", data, expected)
-}
diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go
index 38aa75fdce9..eddfb641b86 100644
--- a/vendor/github.com/BurntSushi/toml/decode_go116.go
+++ b/vendor/github.com/BurntSushi/toml/decode_go116.go
@@ -1,3 +1,4 @@
+//go:build go1.16
// +build go1.16
package toml
diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go
index db89eac1d1b..c6af3f239dd 100644
--- a/vendor/github.com/BurntSushi/toml/deprecated.go
+++ b/vendor/github.com/BurntSushi/toml/deprecated.go
@@ -5,29 +5,17 @@ import (
"io"
)
-// DEPRECATED!
-//
-// Use the identical encoding.TextMarshaler instead. It is defined here to
-// support Go 1.1 and older.
+// Deprecated: use encoding.TextMarshaler
type TextMarshaler encoding.TextMarshaler
-// DEPRECATED!
-//
-// Use the identical encoding.TextUnmarshaler instead. It is defined here to
-// support Go 1.1 and older.
+// Deprecated: use encoding.TextUnmarshaler
type TextUnmarshaler encoding.TextUnmarshaler
-// DEPRECATED!
-//
-// Use MetaData.PrimitiveDecode instead.
+// Deprecated: use MetaData.PrimitiveDecode.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
- md := MetaData{decoded: make(map[string]bool)}
+ md := MetaData{decoded: make(map[string]struct{})}
return md.unify(primValue.undecoded, rvalue(v))
}
-// DEPRECATED!
-//
-// Use NewDecoder(reader).Decode(&v) instead.
-func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
- return NewDecoder(r).Decode(v)
-}
+// Deprecated: use NewDecoder(reader).Decode(&value).
+func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
index 10d88ac6327..dc8568d1b9b 100644
--- a/vendor/github.com/BurntSushi/toml/encode.go
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -3,6 +3,7 @@ package toml
import (
"bufio"
"encoding"
+ "encoding/json"
"errors"
"fmt"
"io"
@@ -21,12 +22,11 @@ type tomlEncodeError struct{ error }
var (
errArrayNilElement = errors.New("toml: cannot encode array with nil element")
errNonString = errors.New("toml: cannot encode a map with non-string key type")
- errAnonNonStruct = errors.New("toml: cannot encode an anonymous field that is not a struct")
errNoKey = errors.New("toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing
)
-var quotedReplacer = strings.NewReplacer(
+var dblQuotedReplacer = strings.NewReplacer(
"\"", "\\\"",
"\\", "\\\\",
"\x00", `\u0000`,
@@ -64,35 +64,62 @@ var quotedReplacer = strings.NewReplacer(
"\x7f", `\u007f`,
)
+var (
+ marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+ timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
+)
+
+// Marshaler is the interface implemented by types that can marshal themselves
+// into valid TOML.
+type Marshaler interface {
+ MarshalTOML() ([]byte, error)
+}
+
// Encoder encodes a Go to a TOML document.
//
// The mapping between Go values and TOML values should be precisely the same as
-// for the Decode* functions. Similarly, the TextMarshaler interface is
-// supported by encoding the resulting bytes as strings. If you want to write
-// arbitrary binary data then you will need to use something like base64 since
-// TOML does not have any binary types.
+// for the Decode* functions.
+//
+// time.Time is encoded as a RFC 3339 string, and time.Duration as its string
+// representation.
+//
+// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to
+// encoding the value as custom TOML.
+//
+// If you want to write arbitrary binary data then you will need to use
+// something like base64 since TOML does not have any binary types.
//
// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes
// are encoded first.
//
// Go maps will be sorted alphabetically by key for deterministic output.
//
+// The toml struct tag can be used to provide the key name; if omitted the
+// struct field name will be used. If the "omitempty" option is present the
+// following value will be skipped:
+//
+// - arrays, slices, maps, and string with len of 0
+// - struct with all zero values
+// - bool false
+//
+// If omitzero is given all int and float types with a value of 0 will be
+// skipped.
+//
// Encoding Go values without a corresponding TOML representation will return an
// error. Examples of this includes maps with non-string keys, slices with nil
// elements, embedded non-struct types, and nested slices containing maps or
// structs. (e.g. [][]map[string]string is not allowed but []map[string]string
// is okay, as is []map[string][]string).
//
-// NOTE: Only exported keys are encoded due to the use of reflection. Unexported
+// NOTE: only exported keys are encoded due to the use of reflection. Unexported
// keys are silently discarded.
type Encoder struct {
- // The string to use for a single indentation level. The default is two
- // spaces.
+ // String to use for a single indentation level; default is two spaces.
Indent string
- // hasWritten is whether we have written any output to w yet.
- hasWritten bool
w *bufio.Writer
+ hasWritten bool // written any output to w yet?
}
// NewEncoder create a new Encoder.
@@ -130,17 +157,15 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
}
func (enc *Encoder) encode(key Key, rv reflect.Value) {
- // Special case. Time needs to be in ISO8601 format.
- // Special case. If we can marshal the type to text, then we used that.
- // Basically, this prevents the encoder for handling these types as
- // generic structs (or whatever the underlying type of a TextMarshaler is).
- switch t := rv.Interface().(type) {
- case time.Time, encoding.TextMarshaler:
+ // If we can marshal the type to text, then we use that. This prevents the
+ // encoder for handling these types as generic structs (or whatever the
+ // underlying type of a TextMarshaler is).
+ switch {
+ case isMarshaler(rv):
enc.writeKeyValue(key, rv, false)
return
- // TODO: #76 would make this superfluous after implemented.
- case Primitive:
- enc.encode(key, reflect.ValueOf(t.undecoded))
+ case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented.
+ enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded))
return
}
@@ -200,17 +225,49 @@ func (enc *Encoder) eElement(rv reflect.Value) {
enc.wf(v.In(time.UTC).Format(format))
}
return
+ case Marshaler:
+ s, err := v.MarshalTOML()
+ if err != nil {
+ encPanic(err)
+ }
+ if s == nil {
+ encPanic(errors.New("MarshalTOML returned nil and no error"))
+ }
+ enc.w.Write(s)
+ return
case encoding.TextMarshaler:
- // Use text marshaler if it's available for this value.
- if s, err := v.MarshalText(); err != nil {
+ s, err := v.MarshalText()
+ if err != nil {
encPanic(err)
- } else {
- enc.writeQuoted(string(s))
}
+ if s == nil {
+ encPanic(errors.New("MarshalText returned nil and no error"))
+ }
+ enc.writeQuoted(string(s))
return
+ case time.Duration:
+ enc.writeQuoted(v.String())
+ return
+ case json.Number:
+ n, _ := rv.Interface().(json.Number)
+
+ if n == "" { /// Useful zero value.
+ enc.w.WriteByte('0')
+ return
+ } else if v, err := n.Int64(); err == nil {
+ enc.eElement(reflect.ValueOf(v))
+ return
+ } else if v, err := n.Float64(); err == nil {
+ enc.eElement(reflect.ValueOf(v))
+ return
+ }
+ encPanic(errors.New(fmt.Sprintf("Unable to convert \"%s\" to neither int64 nor float64", n)))
}
switch rv.Kind() {
+ case reflect.Ptr:
+ enc.eElement(rv.Elem())
+ return
case reflect.String:
enc.writeQuoted(rv.String())
case reflect.Bool:
@@ -246,7 +303,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
case reflect.Interface:
enc.eElement(rv.Elem())
default:
- encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface()))
+ encPanic(fmt.Errorf("unexpected type: %T", rv.Interface()))
}
}
@@ -260,14 +317,14 @@ func floatAddDecimal(fstr string) string {
}
func (enc *Encoder) writeQuoted(s string) {
- enc.wf("\"%s\"", quotedReplacer.Replace(s))
+ enc.wf("\"%s\"", dblQuotedReplacer.Replace(s))
}
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
length := rv.Len()
enc.wf("[")
for i := 0; i < length; i++ {
- elem := rv.Index(i)
+ elem := eindirect(rv.Index(i))
enc.eElement(elem)
if i != length-1 {
enc.wf(", ")
@@ -281,12 +338,12 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
encPanic(errNoKey)
}
for i := 0; i < rv.Len(); i++ {
- trv := rv.Index(i)
+ trv := eindirect(rv.Index(i))
if isNil(trv) {
continue
}
enc.newline()
- enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.wf("%s[[%s]]", enc.indentStr(key), key)
enc.newline()
enc.eMapOrStruct(key, trv, false)
}
@@ -299,14 +356,14 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) {
enc.newline()
}
if len(key) > 0 {
- enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.wf("%s[%s]", enc.indentStr(key), key)
enc.newline()
}
enc.eMapOrStruct(key, rv, false)
}
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
- switch rv := eindirect(rv); rv.Kind() {
+ switch rv.Kind() {
case reflect.Map:
enc.eMap(key, rv, inline)
case reflect.Struct:
@@ -328,7 +385,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
var mapKeysDirect, mapKeysSub []string
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
- if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
+ if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
mapKeysSub = append(mapKeysSub, k)
} else {
mapKeysDirect = append(mapKeysDirect, k)
@@ -338,7 +395,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
var writeMapKeys = func(mapKeys []string, trailC bool) {
sort.Strings(mapKeys)
for i, mapKey := range mapKeys {
- val := rv.MapIndex(reflect.ValueOf(mapKey))
+ val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
if isNil(val) {
continue
}
@@ -364,6 +421,15 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
}
}
+const is32Bit = (32 << (^uint(0) >> 63)) == 32
+
+func pointerTo(t reflect.Type) reflect.Type {
+ if t.Kind() == reflect.Ptr {
+ return pointerTo(t.Elem())
+ }
+ return t
+}
+
func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
// Write keys for fields directly under this key first, because if we write
// a field that creates a new table then all keys under it will be in that
@@ -380,38 +446,42 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i)
- if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
+ isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct
+ if f.PkgPath != "" && !isEmbed { /// Skip unexported fields.
+ continue
+ }
+ opts := getOptions(f.Tag)
+ if opts.skip {
continue
}
- frv := rv.Field(i)
+ frv := eindirect(rv.Field(i))
// Treat anonymous struct fields with tag names as though they are
// not anonymous, like encoding/json does.
//
// Non-struct anonymous fields use the normal encoding logic.
- if f.Anonymous {
- t := f.Type
- switch t.Kind() {
- case reflect.Struct:
- if getOptions(f.Tag).name == "" {
- addFields(t, frv, append(start, f.Index...))
- continue
- }
- case reflect.Ptr:
- if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" {
- if !frv.IsNil() {
- addFields(t.Elem(), frv.Elem(), append(start, f.Index...))
- }
- continue
- }
+ if isEmbed {
+ if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct {
+ addFields(frv.Type(), frv, append(start, f.Index...))
+ continue
}
}
- if typeIsHash(tomlTypeOfGo(frv)) {
+ if typeIsTable(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
- fieldsDirect = append(fieldsDirect, append(start, f.Index...))
+ // Copy so it works correct on 32bit archs; not clear why this
+ // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
+ // This also works fine on 64bit, but 32bit archs are somewhat
+ // rare and this is a wee bit faster.
+ if is32Bit {
+ copyStart := make([]int, len(start))
+ copy(copyStart, start)
+ fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...))
+ } else {
+ fieldsDirect = append(fieldsDirect, append(start, f.Index...))
+ }
}
}
}
@@ -420,7 +490,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
writeFields := func(fields [][]int) {
for _, fieldIndex := range fields {
fieldType := rt.FieldByIndex(fieldIndex)
- fieldVal := rv.FieldByIndex(fieldIndex)
+ fieldVal := eindirect(rv.FieldByIndex(fieldIndex))
if isNil(fieldVal) { /// Don't write anything for nil fields.
continue
@@ -462,17 +532,32 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
}
}
-// tomlTypeName returns the TOML type name of the Go value's type. It is
-// used to determine whether the types of array elements are mixed (which is
-// forbidden). If the Go value is nil, then it is illegal for it to be an array
-// element, and valueIsNil is returned as true.
-
-// Returns the TOML type of a Go value. The type may be `nil`, which means
-// no concrete TOML type could be found.
+// tomlTypeOfGo returns the TOML type name of the Go value's type.
+//
+// It is used to determine whether the types of array elements are mixed (which
+// is forbidden). If the Go value is nil, then it is illegal for it to be an
+// array element, and valueIsNil is returned as true.
+//
+// The type may be `nil`, which means no concrete TOML type could be found.
func tomlTypeOfGo(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() {
return nil
}
+
+ if rv.Kind() == reflect.Struct {
+ if rv.Type() == timeType {
+ return tomlDatetime
+ }
+ if isMarshaler(rv) {
+ return tomlString
+ }
+ return tomlHash
+ }
+
+ if isMarshaler(rv) {
+ return tomlString
+ }
+
switch rv.Kind() {
case reflect.Bool:
return tomlBool
@@ -484,7 +569,7 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
case reflect.Float32, reflect.Float64:
return tomlFloat
case reflect.Array, reflect.Slice:
- if typeEqual(tomlHash, tomlArrayType(rv)) {
+ if isTableArray(rv) {
return tomlArrayHash
}
return tomlArray
@@ -494,56 +579,35 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
return tomlString
case reflect.Map:
return tomlHash
- case reflect.Struct:
- switch rv.Interface().(type) {
- case time.Time:
- return tomlDatetime
- case encoding.TextMarshaler:
- return tomlString
- default:
- // Someone used a pointer receiver: we can make it work for pointer
- // values.
- if rv.CanAddr() {
- _, ok := rv.Addr().Interface().(encoding.TextMarshaler)
- if ok {
- return tomlString
- }
- }
- return tomlHash
- }
default:
- _, ok := rv.Interface().(encoding.TextMarshaler)
- if ok {
- return tomlString
- }
encPanic(errors.New("unsupported type: " + rv.Kind().String()))
- panic("") // Need *some* return value
+ panic("unreachable")
}
}
-// tomlArrayType returns the element type of a TOML array. The type returned
-// may be nil if it cannot be determined (e.g., a nil slice or a zero length
-// slize). This function may also panic if it finds a type that cannot be
-// expressed in TOML (such as nil elements, heterogeneous arrays or directly
-// nested arrays of tables).
-func tomlArrayType(rv reflect.Value) tomlType {
- if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
- return nil
+func isMarshaler(rv reflect.Value) bool {
+ return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml)
+}
+
+// isTableArray reports if all entries in the array or slice are a table.
+func isTableArray(arr reflect.Value) bool {
+ if isNil(arr) || !arr.IsValid() || arr.Len() == 0 {
+ return false
}
- /// Don't allow nil.
- rvlen := rv.Len()
- for i := 1; i < rvlen; i++ {
- if tomlTypeOfGo(rv.Index(i)) == nil {
+ ret := true
+ for i := 0; i < arr.Len(); i++ {
+ tt := tomlTypeOfGo(eindirect(arr.Index(i)))
+ // Don't allow nil.
+ if tt == nil {
encPanic(errArrayNilElement)
}
- }
- firstType := tomlTypeOfGo(rv.Index(0))
- if firstType == nil {
- encPanic(errArrayNilElement)
+ if ret && !typeEqual(tomlHash, tt) {
+ ret = false
+ }
}
- return firstType
+ return ret
}
type tagOptions struct {
@@ -588,6 +652,8 @@ func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
+ case reflect.Struct:
+ return reflect.Zero(rv.Type()).Interface() == rv.Interface()
case reflect.Bool:
return !rv.Bool()
}
@@ -604,7 +670,14 @@ func (enc *Encoder) newline() {
//
// key =
//
-// If inline is true it won't add a newline at the end.
+// This is also used for "k = v" in inline tables; so something like this will
+// be written in three calls:
+//
+// ┌────────────────────┐
+// │ ┌───┐ ┌─────┐│
+// v v v v vv
+// key = {k = v, k2 = v2}
+//
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
if len(key) == 0 {
encPanic(errNoKey)
@@ -617,7 +690,8 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
}
func (enc *Encoder) wf(format string, v ...interface{}) {
- if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
+ _, err := fmt.Fprintf(enc.w, format, v...)
+ if err != nil {
encPanic(err)
}
enc.hasWritten = true
@@ -631,13 +705,25 @@ func encPanic(err error) {
panic(tomlEncodeError{err})
}
+// Resolve any level of pointers to the actual value (e.g. **string → string).
func eindirect(v reflect.Value) reflect.Value {
- switch v.Kind() {
- case reflect.Ptr, reflect.Interface:
- return eindirect(v.Elem())
- default:
+ if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface {
+ if isMarshaler(v) {
+ return v
+ }
+ if v.CanAddr() { /// Special case for marshalers; see #358.
+ if pv := v.Addr(); isMarshaler(pv) {
+ return pv
+ }
+ }
return v
}
+
+ if v.IsNil() {
+ return v
+ }
+
+ return eindirect(v.Elem())
}
func isNil(rv reflect.Value) bool {
diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go
new file mode 100644
index 00000000000..2ac24e77eb8
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/error.go
@@ -0,0 +1,276 @@
+package toml
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ParseError is returned when there is an error parsing the TOML syntax.
+//
+// For example invalid syntax, duplicate keys, etc.
+//
+// In addition to the error message itself, you can also print detailed location
+// information with context by using ErrorWithPosition():
+//
+// toml: error: Key 'fruit' was already created and cannot be used as an array.
+//
+// At line 4, column 2-7:
+//
+// 2 | fruit = []
+// 3 |
+// 4 | [[fruit]] # Not allowed
+// ^^^^^
+//
+// Furthermore, the ErrorWithUsage() can be used to print the above with some
+// more detailed usage guidance:
+//
+// toml: error: newlines not allowed within inline tables
+//
+// At line 1, column 18:
+//
+// 1 | x = [{ key = 42 #
+// ^
+//
+// Error help:
+//
+// Inline tables must always be on a single line:
+//
+// table = {key = 42, second = 43}
+//
+// It is invalid to split them over multiple lines like so:
+//
+// # INVALID
+// table = {
+// key = 42,
+// second = 43
+// }
+//
+// Use regular for this:
+//
+// [table]
+// key = 42
+// second = 43
+type ParseError struct {
+ Message string // Short technical message.
+ Usage string // Longer message with usage guidance; may be blank.
+ Position Position // Position of the error
+ LastKey string // Last parsed key, may be blank.
+ Line int // Line the error occurred. Deprecated: use Position.
+
+ err error
+ input string
+}
+
+// Position of an error.
+type Position struct {
+ Line int // Line number, starting at 1.
+ Start int // Start of error, as byte offset starting at 0.
+ Len int // Lenght in bytes.
+}
+
+func (pe ParseError) Error() string {
+ msg := pe.Message
+ if msg == "" { // Error from errorf()
+ msg = pe.err.Error()
+ }
+
+ if pe.LastKey == "" {
+ return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
+ }
+ return fmt.Sprintf("toml: line %d (last key %q): %s",
+ pe.Position.Line, pe.LastKey, msg)
+}
+
+// ErrorWithUsage() returns the error with detailed location context.
+//
+// See the documentation on ParseError.
+func (pe ParseError) ErrorWithPosition() string {
+ if pe.input == "" { // Should never happen, but just in case.
+ return pe.Error()
+ }
+
+ var (
+ lines = strings.Split(pe.input, "\n")
+ col = pe.column(lines)
+ b = new(strings.Builder)
+ )
+
+ msg := pe.Message
+ if msg == "" {
+ msg = pe.err.Error()
+ }
+
+ // TODO: don't show control characters as literals? This may not show up
+ // well everywhere.
+
+ if pe.Position.Len == 1 {
+ fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
+ msg, pe.Position.Line, col+1)
+ } else {
+ fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
+ msg, pe.Position.Line, col, col+pe.Position.Len)
+ }
+ if pe.Position.Line > 2 {
+ fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3])
+ }
+ if pe.Position.Line > 1 {
+ fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2])
+ }
+ fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1])
+ fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len))
+ return b.String()
+}
+
+// ErrorWithUsage() returns the error with detailed location context and usage
+// guidance.
+//
+// See the documentation on ParseError.
+func (pe ParseError) ErrorWithUsage() string {
+ m := pe.ErrorWithPosition()
+ if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {
+ lines := strings.Split(strings.TrimSpace(u.Usage()), "\n")
+ for i := range lines {
+ if lines[i] != "" {
+ lines[i] = " " + lines[i]
+ }
+ }
+ return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n"
+ }
+ return m
+}
+
+func (pe ParseError) column(lines []string) int {
+ var pos, col int
+ for i := range lines {
+ ll := len(lines[i]) + 1 // +1 for the removed newline
+ if pos+ll >= pe.Position.Start {
+ col = pe.Position.Start - pos
+ if col < 0 { // Should never happen, but just in case.
+ col = 0
+ }
+ break
+ }
+ pos += ll
+ }
+
+ return col
+}
+
+type (
+ errLexControl struct{ r rune }
+ errLexEscape struct{ r rune }
+ errLexUTF8 struct{ b byte }
+ errLexInvalidNum struct{ v string }
+ errLexInvalidDate struct{ v string }
+ errLexInlineTableNL struct{}
+ errLexStringNL struct{}
+ errParseRange struct {
+ i interface{} // int or float
+ size string // "int64", "uint16", etc.
+ }
+ errParseDuration struct{ d string }
+)
+
+func (e errLexControl) Error() string {
+ return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r)
+}
+func (e errLexControl) Usage() string { return "" }
+
+func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) }
+func (e errLexEscape) Usage() string { return usageEscape }
+func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
+func (e errLexUTF8) Usage() string { return "" }
+func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) }
+func (e errLexInvalidNum) Usage() string { return "" }
+func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) }
+func (e errLexInvalidDate) Usage() string { return "" }
+func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
+func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
+func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
+func (e errLexStringNL) Usage() string { return usageStringNewline }
+func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) }
+func (e errParseRange) Usage() string { return usageIntOverflow }
+func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
+func (e errParseDuration) Usage() string { return usageDuration }
+
+const usageEscape = `
+A '\' inside a "-delimited string is interpreted as an escape character.
+
+The following escape sequences are supported:
+\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX
+
+To prevent a '\' from being recognized as an escape character, use either:
+
+- a ' or '''-delimited string; escape characters aren't processed in them; or
+- write two backslashes to get a single backslash: '\\'.
+
+If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/'
+instead of '\' will usually also work: "C:/Users/martin".
+`
+
+const usageInlineNewline = `
+Inline tables must always be on a single line:
+
+ table = {key = 42, second = 43}
+
+It is invalid to split them over multiple lines like so:
+
+ # INVALID
+ table = {
+ key = 42,
+ second = 43
+ }
+
+Use regular for this:
+
+ [table]
+ key = 42
+ second = 43
+`
+
+const usageStringNewline = `
+Strings must always be on a single line, and cannot span more than one line:
+
+ # INVALID
+ string = "Hello,
+ world!"
+
+Instead use """ or ''' to split strings over multiple lines:
+
+ string = """Hello,
+ world!"""
+`
+
+const usageIntOverflow = `
+This number is too large; this may be an error in the TOML, but it can also be a
+bug in the program that uses too small of an integer.
+
+The maximum and minimum values are:
+
+ size │ lowest │ highest
+ ───────┼────────────────┼──────────
+ int8 │ -128 │ 127
+ int16 │ -32,768 │ 32,767
+ int32 │ -2,147,483,648 │ 2,147,483,647
+ int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷
+ uint8 │ 0 │ 255
+ uint16 │ 0 │ 65535
+ uint32 │ 0 │ 4294967295
+ uint64 │ 0 │ 1.8 × 10¹⁸
+
+int refers to int32 on 32-bit systems and int64 on 64-bit systems.
+`
+
+const usageDuration = `
+A duration must be as "number", without any spaces. Valid units are:
+
+ ns nanoseconds (billionth of a second)
+ us, µs microseconds (millionth of a second)
+ ms milliseconds (thousands of a second)
+ s seconds
+ m minutes
+ h hours
+
+You can combine multiple units; for example "5m10s" for 5 minutes and 10
+seconds.
+`
diff --git a/vendor/github.com/BurntSushi/toml/go.sum b/vendor/github.com/BurntSushi/toml/go.sum
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
index adc4eb5d537..28ed4dd353c 100644
--- a/vendor/github.com/BurntSushi/toml/lex.go
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -37,28 +37,14 @@ const (
itemInlineTableEnd
)
-const (
- eof = 0
- comma = ','
- tableStart = '['
- tableEnd = ']'
- arrayTableStart = '['
- arrayTableEnd = ']'
- tableSep = '.'
- keySep = '='
- arrayStart = '['
- arrayEnd = ']'
- commentStart = '#'
- stringStart = '"'
- stringEnd = '"'
- rawStringStart = '\''
- rawStringEnd = '\''
- inlineTableStart = '{'
- inlineTableEnd = '}'
-)
+const eof = 0
type stateFn func(lx *lexer) stateFn
+func (p Position) String() string {
+ return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len)
+}
+
type lexer struct {
input string
start int
@@ -67,26 +53,26 @@ type lexer struct {
state stateFn
items chan item
- // Allow for backing up up to four runes.
- // This is necessary because TOML contains 3-rune tokens (""" and ''').
+ // Allow for backing up up to 4 runes. This is necessary because TOML
+ // contains 3-rune tokens (""" and ''').
prevWidths [4]int
- nprev int // how many of prevWidths are in use
- // If we emit an eof, we can still back up, but it is not OK to call
- // next again.
- atEOF bool
+ nprev int // how many of prevWidths are in use
+ atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again.
// A stack of state functions used to maintain context.
- // The idea is to reuse parts of the state machine in various places.
- // For example, values can appear at the top level or within arbitrarily
- // nested arrays. The last state on the stack is used after a value has
- // been lexed. Similarly for comments.
+ //
+ // The idea is to reuse parts of the state machine in various places. For
+ // example, values can appear at the top level or within arbitrarily nested
+ // arrays. The last state on the stack is used after a value has been lexed.
+ // Similarly for comments.
stack []stateFn
}
type item struct {
- typ itemType
- val string
- line int
+ typ itemType
+ val string
+ err error
+ pos Position
}
func (lx *lexer) nextItem() item {
@@ -96,7 +82,7 @@ func (lx *lexer) nextItem() item {
return item
default:
lx.state = lx.state(lx)
- //fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack)
+ //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack)
}
}
}
@@ -105,9 +91,9 @@ func lex(input string) *lexer {
lx := &lexer{
input: input,
state: lexTop,
- line: 1,
items: make(chan item, 10),
stack: make([]stateFn, 0, 10),
+ line: 1,
}
return lx
}
@@ -129,13 +115,30 @@ func (lx *lexer) current() string {
return lx.input[lx.start:lx.pos]
}
+func (lx lexer) getPos() Position {
+ p := Position{
+ Line: lx.line,
+ Start: lx.start,
+ Len: lx.pos - lx.start,
+ }
+ if p.Len <= 0 {
+ p.Len = 1
+ }
+ return p
+}
+
func (lx *lexer) emit(typ itemType) {
- lx.items <- item{typ, lx.current(), lx.line}
+ // Needed for multiline strings ending with an incomplete UTF-8 sequence.
+ if lx.start > lx.pos {
+ lx.error(errLexUTF8{lx.input[lx.pos]})
+ return
+ }
+ lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()}
lx.start = lx.pos
}
func (lx *lexer) emitTrim(typ itemType) {
- lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
+ lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())}
lx.start = lx.pos
}
@@ -160,7 +163,13 @@ func (lx *lexer) next() (r rune) {
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
if r == utf8.RuneError {
- lx.errorf("invalid UTF-8 byte at position %d (line %d): 0x%02x", lx.pos, lx.line, lx.input[lx.pos])
+ lx.error(errLexUTF8{lx.input[lx.pos]})
+ return utf8.RuneError
+ }
+
+ // Note: don't use peek() here, as this calls next().
+ if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) {
+ lx.errorControlChar(r)
return utf8.RuneError
}
@@ -188,6 +197,7 @@ func (lx *lexer) backup() {
lx.prevWidths[1] = lx.prevWidths[2]
lx.prevWidths[2] = lx.prevWidths[3]
lx.nprev--
+
lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line--
@@ -223,18 +233,58 @@ func (lx *lexer) skip(pred func(rune) bool) {
}
}
-// errorf stops all lexing by emitting an error and returning `nil`.
+// error stops all lexing by emitting an error and returning `nil`.
+//
// Note that any value that is a character is escaped if it's a special
// character (newlines, tabs, etc.).
+func (lx *lexer) error(err error) stateFn {
+ if lx.atEOF {
+ return lx.errorPrevLine(err)
+ }
+ lx.items <- item{typ: itemError, pos: lx.getPos(), err: err}
+ return nil
+}
+
+// errorfPrevline is like error(), but sets the position to the last column of
+// the previous line.
+//
+// This is so that unexpected EOF or NL errors don't show on a new blank line.
+func (lx *lexer) errorPrevLine(err error) stateFn {
+ pos := lx.getPos()
+ pos.Line--
+ pos.Len = 1
+ pos.Start = lx.pos - 1
+ lx.items <- item{typ: itemError, pos: pos, err: err}
+ return nil
+}
+
+// errorPos is like error(), but allows explicitly setting the position.
+func (lx *lexer) errorPos(start, length int, err error) stateFn {
+ pos := lx.getPos()
+ pos.Start = start
+ pos.Len = length
+ lx.items <- item{typ: itemError, pos: pos, err: err}
+ return nil
+}
+
+// errorf is like error, and creates a new error.
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
- lx.items <- item{
- itemError,
- fmt.Sprintf(format, values...),
- lx.line,
+ if lx.atEOF {
+ pos := lx.getPos()
+ pos.Line--
+ pos.Len = 1
+ pos.Start = lx.pos - 1
+ lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
+ return nil
}
+ lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)}
return nil
}
+func (lx *lexer) errorControlChar(cc rune) stateFn {
+ return lx.errorPos(lx.pos-1, 1, errLexControl{cc})
+}
+
// lexTop consumes elements at the top level of TOML data.
func lexTop(lx *lexer) stateFn {
r := lx.next()
@@ -242,10 +292,10 @@ func lexTop(lx *lexer) stateFn {
return lexSkip(lx, lexTop)
}
switch r {
- case commentStart:
+ case '#':
lx.push(lexTop)
return lexCommentStart
- case tableStart:
+ case '[':
return lexTableStart
case eof:
if lx.pos > lx.start {
@@ -268,7 +318,7 @@ func lexTop(lx *lexer) stateFn {
func lexTopEnd(lx *lexer) stateFn {
r := lx.next()
switch {
- case r == commentStart:
+ case r == '#':
// a comment will read to a newline for us.
lx.push(lexTop)
return lexCommentStart
@@ -292,7 +342,7 @@ func lexTopEnd(lx *lexer) stateFn {
// It also handles the case that this is an item in an array of tables.
// e.g., '[[name]]'.
func lexTableStart(lx *lexer) stateFn {
- if lx.peek() == arrayTableStart {
+ if lx.peek() == '[' {
lx.next()
lx.emit(itemArrayTableStart)
lx.push(lexArrayTableEnd)
@@ -309,10 +359,8 @@ func lexTableEnd(lx *lexer) stateFn {
}
func lexArrayTableEnd(lx *lexer) stateFn {
- if r := lx.next(); r != arrayTableEnd {
- return lx.errorf(
- "expected end of table array name delimiter %q, but got %q instead",
- arrayTableEnd, r)
+ if r := lx.next(); r != ']' {
+ return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
@@ -321,11 +369,11 @@ func lexArrayTableEnd(lx *lexer) stateFn {
func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
- case r == tableEnd || r == eof:
+ case r == ']' || r == eof:
return lx.errorf("unexpected end of table name (table names cannot be empty)")
- case r == tableSep:
+ case r == '.':
return lx.errorf("unexpected table separator (table names cannot be empty)")
- case r == stringStart || r == rawStringStart:
+ case r == '"' || r == '\'':
lx.ignore()
lx.push(lexTableNameEnd)
return lexQuotedName
@@ -342,10 +390,10 @@ func lexTableNameEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case isWhitespace(r):
return lexTableNameEnd
- case r == tableSep:
+ case r == '.':
lx.ignore()
return lexTableNameStart
- case r == tableEnd:
+ case r == ']':
return lx.pop()
default:
return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r)
@@ -379,10 +427,10 @@ func lexQuotedName(lx *lexer) stateFn {
switch {
case isWhitespace(r):
return lexSkip(lx, lexValue)
- case r == stringStart:
+ case r == '"':
lx.ignore() // ignore the '"'
return lexString
- case r == rawStringStart:
+ case r == '\'':
lx.ignore() // ignore the "'"
return lexRawString
case r == eof:
@@ -400,7 +448,7 @@ func lexKeyStart(lx *lexer) stateFn {
return lx.errorf("unexpected '=': key name appears blank")
case r == '.':
return lx.errorf("unexpected '.': keys cannot start with a '.'")
- case r == stringStart || r == rawStringStart:
+ case r == '"' || r == '\'':
lx.ignore()
fallthrough
default: // Bare key
@@ -416,7 +464,7 @@ func lexKeyNameStart(lx *lexer) stateFn {
return lx.errorf("unexpected '='")
case r == '.':
return lx.errorf("unexpected '.'")
- case r == stringStart || r == rawStringStart:
+ case r == '"' || r == '\'':
lx.ignore()
lx.push(lexKeyEnd)
return lexQuotedName
@@ -434,7 +482,7 @@ func lexKeyEnd(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
case r == eof:
- return lx.errorf("unexpected EOF; expected key separator %q", keySep)
+ return lx.errorf("unexpected EOF; expected key separator '='")
case r == '.':
lx.ignore()
return lexKeyNameStart
@@ -461,17 +509,17 @@ func lexValue(lx *lexer) stateFn {
return lexNumberOrDateStart
}
switch r {
- case arrayStart:
+ case '[':
lx.ignore()
lx.emit(itemArray)
return lexArrayValue
- case inlineTableStart:
+ case '{':
lx.ignore()
lx.emit(itemInlineTableStart)
return lexInlineTableValue
- case stringStart:
- if lx.accept(stringStart) {
- if lx.accept(stringStart) {
+ case '"':
+ if lx.accept('"') {
+ if lx.accept('"') {
lx.ignore() // Ignore """
return lexMultilineString
}
@@ -479,9 +527,9 @@ func lexValue(lx *lexer) stateFn {
}
lx.ignore() // ignore the '"'
return lexString
- case rawStringStart:
- if lx.accept(rawStringStart) {
- if lx.accept(rawStringStart) {
+ case '\'':
+ if lx.accept('\'') {
+ if lx.accept('\'') {
lx.ignore() // Ignore """
return lexMultilineRawString
}
@@ -520,14 +568,12 @@ func lexArrayValue(lx *lexer) stateFn {
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValue)
- case r == commentStart:
+ case r == '#':
lx.push(lexArrayValue)
return lexCommentStart
- case r == comma:
+ case r == ',':
return lx.errorf("unexpected comma")
- case r == arrayEnd:
- // NOTE(caleb): The spec isn't clear about whether you can have
- // a trailing comma or not, so we'll allow it.
+ case r == ']':
return lexArrayEnd
}
@@ -540,22 +586,20 @@ func lexArrayValue(lx *lexer) stateFn {
// the next value (or the end of the array): it ignores whitespace and newlines
// and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
+ switch r := lx.next(); {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValueEnd)
- case r == commentStart:
+ case r == '#':
lx.push(lexArrayValueEnd)
return lexCommentStart
- case r == comma:
+ case r == ',':
lx.ignore()
return lexArrayValue // move on to the next value
- case r == arrayEnd:
+ case r == ']':
return lexArrayEnd
+ default:
+ return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r))
}
- return lx.errorf(
- "expected a comma or array terminator %q, but got %s instead",
- arrayEnd, runeOrEOF(r))
}
// lexArrayEnd finishes the lexing of an array.
@@ -574,13 +618,13 @@ func lexInlineTableValue(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
- return lx.errorf("newlines not allowed within inline tables")
- case r == commentStart:
+ return lx.errorPrevLine(errLexInlineTableNL{})
+ case r == '#':
lx.push(lexInlineTableValue)
return lexCommentStart
- case r == comma:
+ case r == ',':
return lx.errorf("unexpected comma")
- case r == inlineTableEnd:
+ case r == '}':
return lexInlineTableEnd
}
lx.backup()
@@ -596,23 +640,21 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
- return lx.errorf("newlines not allowed within inline tables")
- case r == commentStart:
+ return lx.errorPrevLine(errLexInlineTableNL{})
+ case r == '#':
lx.push(lexInlineTableValueEnd)
return lexCommentStart
- case r == comma:
+ case r == ',':
lx.ignore()
lx.skip(isWhitespace)
if lx.peek() == '}' {
return lx.errorf("trailing comma not allowed in inline tables")
}
return lexInlineTableValue
- case r == inlineTableEnd:
+ case r == '}':
return lexInlineTableEnd
default:
- return lx.errorf(
- "expected a comma or an inline table terminator %q, but got %s instead",
- inlineTableEnd, runeOrEOF(r))
+ return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r))
}
}
@@ -638,14 +680,12 @@ func lexString(lx *lexer) stateFn {
switch {
case r == eof:
return lx.errorf(`unexpected EOF; expected '"'`)
- case isControl(r) || r == '\r':
- return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
case isNL(r):
- return lx.errorf("strings cannot contain newlines")
+ return lx.errorPrevLine(errLexStringNL{})
case r == '\\':
lx.push(lexString)
return lexStringEscape
- case r == stringEnd:
+ case r == '"':
lx.backup()
lx.emit(itemString)
lx.next()
@@ -660,26 +700,33 @@ func lexString(lx *lexer) stateFn {
func lexMultilineString(lx *lexer) stateFn {
r := lx.next()
switch r {
+ default:
+ return lexMultilineString
case eof:
return lx.errorf(`unexpected EOF; expected '"""'`)
- case '\r':
- if lx.peek() != '\n' {
- return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
- }
- return lexMultilineString
case '\\':
return lexMultilineStringEscape
- case stringEnd:
+ case '"':
/// Found " → try to read two more "".
- if lx.accept(stringEnd) {
- if lx.accept(stringEnd) {
+ if lx.accept('"') {
+ if lx.accept('"') {
/// Peek ahead: the string can contain " and "", including at the
/// end: """str"""""
/// 6 or more at the end, however, is an error.
- if lx.peek() == stringEnd {
+ if lx.peek() == '"' {
/// Check if we already lexed 5 's; if so we have 6 now, and
/// that's just too many man!
- if strings.HasSuffix(lx.current(), `"""""`) {
+ ///
+ /// Second check is for the edge case:
+ ///
+ /// two quotes allowed.
+ /// vv
+ /// """lol \""""""
+ /// ^^ ^^^---- closing three
+ /// escaped
+ ///
+ /// But ugly, but it works
+ if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) {
return lx.errorf(`unexpected '""""""'`)
}
lx.backup()
@@ -699,12 +746,8 @@ func lexMultilineString(lx *lexer) stateFn {
}
lx.backup()
}
+ return lexMultilineString
}
-
- if isControl(r) {
- return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
- }
- return lexMultilineString
}
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
@@ -712,20 +755,19 @@ func lexMultilineString(lx *lexer) stateFn {
func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
+ default:
+ return lexRawString
case r == eof:
return lx.errorf(`unexpected EOF; expected "'"`)
- case isControl(r) || r == '\r':
- return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
case isNL(r):
- return lx.errorf("strings cannot contain newlines")
- case r == rawStringEnd:
+ return lx.errorPrevLine(errLexStringNL{})
+ case r == '\'':
lx.backup()
lx.emit(itemRawString)
lx.next()
lx.ignore()
return lx.pop()
}
- return lexRawString
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
@@ -734,21 +776,18 @@ func lexRawString(lx *lexer) stateFn {
func lexMultilineRawString(lx *lexer) stateFn {
r := lx.next()
switch r {
+ default:
+ return lexMultilineRawString
case eof:
return lx.errorf(`unexpected EOF; expected "'''"`)
- case '\r':
- if lx.peek() != '\n' {
- return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
- }
- return lexMultilineRawString
- case rawStringEnd:
+ case '\'':
/// Found ' → try to read two more ''.
- if lx.accept(rawStringEnd) {
- if lx.accept(rawStringEnd) {
+ if lx.accept('\'') {
+ if lx.accept('\'') {
/// Peek ahead: the string can contain ' and '', including at the
/// end: '''str'''''
/// 6 or more at the end, however, is an error.
- if lx.peek() == rawStringEnd {
+ if lx.peek() == '\'' {
/// Check if we already lexed 5 's; if so we have 6 now, and
/// that's just too many man!
if strings.HasSuffix(lx.current(), "'''''") {
@@ -771,19 +810,14 @@ func lexMultilineRawString(lx *lexer) stateFn {
}
lx.backup()
}
+ return lexMultilineRawString
}
-
- if isControl(r) {
- return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
- }
- return lexMultilineRawString
}
// lexMultilineStringEscape consumes an escaped character. It assumes that the
// preceding '\\' has already been consumed.
func lexMultilineStringEscape(lx *lexer) stateFn {
- // Handle the special case first:
- if isNL(lx.next()) {
+ if isNL(lx.next()) { /// \ escaping newline.
return lexMultilineString
}
lx.backup()
@@ -817,8 +851,7 @@ func lexStringEscape(lx *lexer) stateFn {
case 'U':
return lexLongUnicodeEscape
}
- return lx.errorf("invalid escape character %q; only the following escape characters are allowed: "+
- `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
+ return lx.error(errLexEscape{r})
}
func lexShortUnicodeEscape(lx *lexer) stateFn {
@@ -1108,8 +1141,6 @@ func lexComment(lx *lexer) stateFn {
lx.backup()
lx.emit(itemText)
return lx.pop()
- case isControl(r):
- return lx.errorf("control characters are not allowed inside comments: '0x%02x'", r)
default:
return lexComment
}
@@ -1121,52 +1152,6 @@ func lexSkip(lx *lexer, nextState stateFn) stateFn {
return nextState
}
-// isWhitespace returns true if `r` is a whitespace character according
-// to the spec.
-func isWhitespace(r rune) bool {
- return r == '\t' || r == ' '
-}
-
-func isNL(r rune) bool {
- return r == '\n' || r == '\r'
-}
-
-// Control characters except \n, \t
-func isControl(r rune) bool {
- switch r {
- case '\t', '\r', '\n':
- return false
- default:
- return (r >= 0x00 && r <= 0x1f) || r == 0x7f
- }
-}
-
-func isDigit(r rune) bool {
- return r >= '0' && r <= '9'
-}
-
-func isHexadecimal(r rune) bool {
- return (r >= '0' && r <= '9') ||
- (r >= 'a' && r <= 'f') ||
- (r >= 'A' && r <= 'F')
-}
-
-func isOctal(r rune) bool {
- return r >= '0' && r <= '7'
-}
-
-func isBinary(r rune) bool {
- return r == '0' || r == '1'
-}
-
-func isBareKeyChar(r rune) bool {
- return (r >= 'A' && r <= 'Z') ||
- (r >= 'a' && r <= 'z') ||
- (r >= '0' && r <= '9') ||
- r == '_' ||
- r == '-'
-}
-
func (s stateFn) String() string {
name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name()
if i := strings.LastIndexByte(name, '.'); i > -1 {
@@ -1223,3 +1208,26 @@ func (itype itemType) String() string {
func (item item) String() string {
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
}
+
+func isWhitespace(r rune) bool { return r == '\t' || r == ' ' }
+func isNL(r rune) bool { return r == '\n' || r == '\r' }
+func isControl(r rune) bool { // Control characters except \t, \r, \n
+ switch r {
+ case '\t', '\r', '\n':
+ return false
+ default:
+ return (r >= 0x00 && r <= 0x1f) || r == 0x7f
+ }
+}
+func isDigit(r rune) bool { return r >= '0' && r <= '9' }
+func isBinary(r rune) bool { return r == '0' || r == '1' }
+func isOctal(r rune) bool { return r >= '0' && r <= '7' }
+func isHexadecimal(r rune) bool {
+ return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')
+}
+func isBareKeyChar(r rune) bool {
+ return (r >= 'A' && r <= 'Z') ||
+ (r >= 'a' && r <= 'z') ||
+ (r >= '0' && r <= '9') ||
+ r == '_' || r == '-'
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/meta.go
similarity index 75%
rename from vendor/github.com/BurntSushi/toml/decode_meta.go
rename to vendor/github.com/BurntSushi/toml/meta.go
index ad8899c6cf8..d284f2a0c8a 100644
--- a/vendor/github.com/BurntSushi/toml/decode_meta.go
+++ b/vendor/github.com/BurntSushi/toml/meta.go
@@ -1,34 +1,40 @@
package toml
-import "strings"
+import (
+ "strings"
+)
-// MetaData allows access to meta information about TOML data that may not be
-// inferable via reflection. In particular, whether a key has been defined and
-// the TOML type of a key.
+// MetaData allows access to meta information about TOML data that's not
+// accessible otherwise.
+//
+// It allows checking if a key is defined in the TOML data, whether any keys
+// were undecoded, and the TOML type of a key.
type MetaData struct {
+ context Key // Used only during decoding.
+
+ keyInfo map[string]keyInfo
mapping map[string]interface{}
- types map[string]tomlType
keys []Key
- decoded map[string]bool
- context Key // Used only during decoding.
+ decoded map[string]struct{}
+ data []byte // Input file; for errors.
}
// IsDefined reports if the key exists in the TOML data.
//
// The key should be specified hierarchically, for example to access the TOML
-// key "a.b.c" you would use:
+// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive.
//
-// IsDefined("a", "b", "c")
-//
-// IsDefined will return false if an empty key given. Keys are case sensitive.
+// Returns false for an empty key.
func (md *MetaData) IsDefined(key ...string) bool {
if len(key) == 0 {
return false
}
- var hash map[string]interface{}
- var ok bool
- var hashOrVal interface{} = md.mapping
+ var (
+ hash map[string]interface{}
+ ok bool
+ hashOrVal interface{} = md.mapping
+ )
for _, k := range key {
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
return false
@@ -45,51 +51,12 @@ func (md *MetaData) IsDefined(key ...string) bool {
// Type will return the empty string if given an empty key or a key that does
// not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string {
- fullkey := strings.Join(key, ".")
- if typ, ok := md.types[fullkey]; ok {
- return typ.typeString()
+ if ki, ok := md.keyInfo[Key(key).String()]; ok {
+ return ki.tomlType.typeString()
}
return ""
}
-// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
-// values of this type.
-type Key []string
-
-func (k Key) String() string { return strings.Join(k, ".") }
-
-func (k Key) maybeQuotedAll() string {
- var ss []string
- for i := range k {
- ss = append(ss, k.maybeQuoted(i))
- }
- return strings.Join(ss, ".")
-}
-
-func (k Key) maybeQuoted(i int) string {
- if k[i] == "" {
- return `""`
- }
- quote := false
- for _, c := range k[i] {
- if !isBareKeyChar(c) {
- quote = true
- break
- }
- }
- if quote {
- return `"` + quotedReplacer.Replace(k[i]) + `"`
- }
- return k[i]
-}
-
-func (k Key) add(piece string) Key {
- newKey := make(Key, len(k)+1)
- copy(newKey, k)
- newKey[len(k)] = piece
- return newKey
-}
-
// Keys returns a slice of every key in the TOML data, including key groups.
//
// Each key is itself a slice, where the first element is the top of the
@@ -115,9 +82,40 @@ func (md *MetaData) Keys() []Key {
func (md *MetaData) Undecoded() []Key {
undecoded := make([]Key, 0, len(md.keys))
for _, key := range md.keys {
- if !md.decoded[key.String()] {
+ if _, ok := md.decoded[key.String()]; !ok {
undecoded = append(undecoded, key)
}
}
return undecoded
}
+
+// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
+// values of this type.
+type Key []string
+
+func (k Key) String() string {
+ ss := make([]string, len(k))
+ for i := range k {
+ ss[i] = k.maybeQuoted(i)
+ }
+ return strings.Join(ss, ".")
+}
+
+func (k Key) maybeQuoted(i int) string {
+ if k[i] == "" {
+ return `""`
+ }
+ for _, c := range k[i] {
+ if !isBareKeyChar(c) {
+ return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
+ }
+ }
+ return k[i]
+}
+
+func (k Key) add(piece string) Key {
+ newKey := make(Key, len(k)+1)
+ copy(newKey, k)
+ newKey[len(k)] = piece
+ return newKey
+}
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
index d9ae5db9464..d2542d6f926 100644
--- a/vendor/github.com/BurntSushi/toml/parse.go
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -1,7 +1,6 @@
package toml
import (
- "errors"
"fmt"
"strconv"
"strings"
@@ -12,35 +11,29 @@ import (
)
type parser struct {
- mapping map[string]interface{}
- types map[string]tomlType
- lx *lexer
+ lx *lexer
+ context Key // Full key for the current hash in scope.
+ currentKey string // Base key name for everything except hashes.
+ pos Position // Current position in the TOML file.
- ordered []Key // List of keys in the order that they appear in the TOML data.
- context Key // Full key for the current hash in scope.
- currentKey string // Base key name for everything except hashes.
- approxLine int // Rough approximation of line number
- implicits map[string]bool // Record implied keys (e.g. 'key.group.names').
-}
+ ordered []Key // List of keys in the order that they appear in the TOML data.
-// ParseError is used when a file can't be parsed: for example invalid integer
-// literals, duplicate keys, etc.
-type ParseError struct {
- Message string
- Line int
- LastKey string
+ keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
+ mapping map[string]interface{} // Map keyname → key value.
+ implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
}
-func (pe ParseError) Error() string {
- return fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
- pe.Line, pe.LastKey, pe.Message)
+type keyInfo struct {
+ pos Position
+ tomlType tomlType
}
func parse(data string) (p *parser, err error) {
defer func() {
if r := recover(); r != nil {
- var ok bool
- if err, ok = r.(ParseError); ok {
+ if pErr, ok := r.(ParseError); ok {
+ pErr.input = data
+ err = pErr
return
}
panic(r)
@@ -60,16 +53,21 @@ func parse(data string) (p *parser, err error) {
if len(data) < 6 {
ex = len(data)
}
- if strings.ContainsRune(data[:ex], 0) {
- return nil, errors.New("files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8")
+ if i := strings.IndexRune(data[:ex], 0); i > -1 {
+ return nil, ParseError{
+ Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
+ Position: Position{Line: 1, Start: i, Len: 1},
+ Line: 1,
+ input: data,
+ }
}
p = &parser{
+ keyInfo: make(map[string]keyInfo),
mapping: make(map[string]interface{}),
- types: make(map[string]tomlType),
lx: lex(data),
ordered: make([]Key, 0),
- implicits: make(map[string]bool),
+ implicits: make(map[string]struct{}),
}
for {
item := p.next()
@@ -82,24 +80,57 @@ func parse(data string) (p *parser, err error) {
return p, nil
}
+func (p *parser) panicErr(it item, err error) {
+ panic(ParseError{
+ err: err,
+ Position: it.pos,
+ Line: it.pos.Len,
+ LastKey: p.current(),
+ })
+}
+
+func (p *parser) panicItemf(it item, format string, v ...interface{}) {
+ panic(ParseError{
+ Message: fmt.Sprintf(format, v...),
+ Position: it.pos,
+ Line: it.pos.Len,
+ LastKey: p.current(),
+ })
+}
+
func (p *parser) panicf(format string, v ...interface{}) {
- msg := fmt.Sprintf(format, v...)
panic(ParseError{
- Message: msg,
- Line: p.approxLine,
- LastKey: p.current(),
+ Message: fmt.Sprintf(format, v...),
+ Position: p.pos,
+ Line: p.pos.Line,
+ LastKey: p.current(),
})
}
func (p *parser) next() item {
it := p.lx.nextItem()
- //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
+ //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val)
if it.typ == itemError {
- p.panicf("%s", it.val)
+ if it.err != nil {
+ panic(ParseError{
+ Position: it.pos,
+ Line: it.pos.Line,
+ LastKey: p.current(),
+ err: it.err,
+ })
+ }
+
+ p.panicItemf(it, "%s", it.val)
}
return it
}
+func (p *parser) nextPos() item {
+ it := p.next()
+ p.pos = it.pos
+ return it
+}
+
func (p *parser) bug(format string, v ...interface{}) {
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
}
@@ -119,11 +150,9 @@ func (p *parser) assertEqual(expected, got itemType) {
func (p *parser) topLevel(item item) {
switch item.typ {
case itemCommentStart: // # ..
- p.approxLine = item.line
p.expect(itemText)
case itemTableStart: // [ .. ]
- name := p.next()
- p.approxLine = name.line
+ name := p.nextPos()
var key Key
for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() {
@@ -132,11 +161,10 @@ func (p *parser) topLevel(item item) {
p.assertEqual(itemTableEnd, name.typ)
p.addContext(key, false)
- p.setType("", tomlHash)
+ p.setType("", tomlHash, item.pos)
p.ordered = append(p.ordered, key)
case itemArrayTableStart: // [[ .. ]]
- name := p.next()
- p.approxLine = name.line
+ name := p.nextPos()
var key Key
for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() {
@@ -145,13 +173,12 @@ func (p *parser) topLevel(item item) {
p.assertEqual(itemArrayTableEnd, name.typ)
p.addContext(key, true)
- p.setType("", tomlArrayHash)
+ p.setType("", tomlArrayHash, item.pos)
p.ordered = append(p.ordered, key)
case itemKeyStart: // key = ..
outerContext := p.context
/// Read all the key parts (e.g. 'a' and 'b' in 'a.b')
- k := p.next()
- p.approxLine = k.line
+ k := p.nextPos()
var key Key
for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
key = append(key, p.keyString(k))
@@ -169,8 +196,9 @@ func (p *parser) topLevel(item item) {
}
/// Set value.
- val, typ := p.value(p.next(), false)
- p.set(p.currentKey, val, typ)
+ vItem := p.next()
+ val, typ := p.value(vItem, false)
+ p.set(p.currentKey, val, typ, vItem.pos)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
/// Remove the context we added (preserving any context from [tbl] lines).
@@ -206,9 +234,9 @@ var datetimeRepl = strings.NewReplacer(
func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
switch it.typ {
case itemString:
- return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
+ return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
case itemMultilineString:
- return p.replaceEscapes(stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
+ return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
@@ -240,10 +268,10 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
func (p *parser) valueInteger(it item) (interface{}, tomlType) {
if !numUnderscoresOK(it.val) {
- p.panicf("Invalid integer %q: underscores must be surrounded by digits", it.val)
+ p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
}
if numHasLeadingZero(it.val) {
- p.panicf("Invalid integer %q: cannot have leading zeroes", it.val)
+ p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val)
}
num, err := strconv.ParseInt(it.val, 0, 64)
@@ -254,7 +282,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) {
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
- p.panicf("Integer '%s' is out of the range of 64-bit signed integers.", it.val)
+ p.panicErr(it, errParseRange{i: it.val, size: "int64"})
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
@@ -272,18 +300,18 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
})
for _, part := range parts {
if !numUnderscoresOK(part) {
- p.panicf("Invalid float %q: underscores must be surrounded by digits", it.val)
+ p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val)
}
}
if len(parts) > 0 && numHasLeadingZero(parts[0]) {
- p.panicf("Invalid float %q: cannot have leading zeroes", it.val)
+ p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val)
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
- p.panicf("Invalid float %q: '.' must be followed by one or more digits", it.val)
+ p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
@@ -292,9 +320,9 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
num, err := strconv.ParseFloat(val, 64)
if err != nil {
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
- p.panicf("Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
+ p.panicErr(it, errParseRange{i: it.val, size: "float64"})
} else {
- p.panicf("Invalid float value: %q", it.val)
+ p.panicItemf(it, "Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
@@ -325,18 +353,21 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
}
}
if !ok {
- p.panicf("Invalid TOML Datetime: %q.", it.val)
+ p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
}
func (p *parser) valueArray(it item) (interface{}, tomlType) {
- p.setType(p.currentKey, tomlArray)
+ p.setType(p.currentKey, tomlArray, it.pos)
- // p.setType(p.currentKey, typ)
var (
- array []interface{}
types []tomlType
+
+ // Initialize to a non-nil empty slice. This makes it consistent with
+ // how S = [] decodes into a non-nil slice inside something like struct
+ // { S []string }. See #338
+ array = []interface{}{}
)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
@@ -347,6 +378,12 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) {
val, typ := p.value(it, true)
array = append(array, val)
types = append(types, typ)
+
+ // XXX: types isn't used here, we need it to record the accurate type
+ // information.
+ //
+ // Not entirely sure how to best store this; could use "key[0]",
+ // "key[1]" notation, or maybe store it on the Array type?
}
return array, tomlArray
}
@@ -373,8 +410,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
}
/// Read all key parts.
- k := p.next()
- p.approxLine = k.line
+ k := p.nextPos()
var key Key
for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
key = append(key, p.keyString(k))
@@ -393,7 +429,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
/// Set the value.
val, typ := p.value(p.next(), false)
- p.set(p.currentKey, val, typ)
+ p.set(p.currentKey, val, typ, it.pos)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[p.currentKey] = val
@@ -408,7 +444,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
// +/- signs, and base prefixes.
func numHasLeadingZero(s string) bool {
- if len(s) > 1 && s[0] == '0' && isDigit(rune(s[1])) { // >1 to allow "0" and isDigit to allow 0x
+ if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x
return true
}
if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' {
@@ -503,7 +539,7 @@ func (p *parser) addContext(key Key, array bool) {
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{}))
} else {
- p.panicf("Key '%s' was already created and cannot be used as an array.", keyContext)
+ p.panicf("Key '%s' was already created and cannot be used as an array.", key)
}
} else {
p.setValue(key[len(key)-1], make(map[string]interface{}))
@@ -512,9 +548,10 @@ func (p *parser) addContext(key Key, array bool) {
}
// set calls setValue and setType.
-func (p *parser) set(key string, val interface{}, typ tomlType) {
- p.setValue(p.currentKey, val)
- p.setType(p.currentKey, typ)
+func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) {
+ p.setValue(key, val)
+ p.setType(key, typ, pos)
+
}
// setValue sets the given key to the given value in the current context.
@@ -573,28 +610,32 @@ func (p *parser) setValue(key string, value interface{}) {
hash[key] = value
}
-// setType sets the type of a particular value at a given key.
-// It should be called immediately AFTER setValue.
+// setType sets the type of a particular value at a given key. It should be
+// called immediately AFTER setValue.
//
// Note that if `key` is empty, then the type given will be applied to the
// current context (which is either a table or an array of tables).
-func (p *parser) setType(key string, typ tomlType) {
+func (p *parser) setType(key string, typ tomlType, pos Position) {
keyContext := make(Key, 0, len(p.context)+1)
- for _, k := range p.context {
- keyContext = append(keyContext, k)
- }
+ keyContext = append(keyContext, p.context...)
if len(key) > 0 { // allow type setting for hashes
keyContext = append(keyContext, key)
}
- p.types[keyContext.String()] = typ
+ // Special case to make empty keys ("" = 1) work.
+ // Without it it will set "" rather than `""`.
+ // TODO: why is this needed? And why is this only needed here?
+ if len(keyContext) == 0 {
+ keyContext = Key{""}
+ }
+ p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos}
}
// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
-func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = true }
-func (p *parser) removeImplicit(key Key) { p.implicits[key.String()] = false }
-func (p *parser) isImplicit(key Key) bool { return p.implicits[key.String()] }
-func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray }
+func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
+func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
+func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
+func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
func (p *parser) addImplicitContext(key Key) {
p.addImplicit(key)
p.addContext(key, false)
@@ -622,7 +663,7 @@ func stripFirstNewline(s string) string {
}
// Remove newlines inside triple-quoted strings if a line ends with "\".
-func stripEscapedNewlines(s string) string {
+func (p *parser) stripEscapedNewlines(s string) string {
split := strings.Split(s, "\n")
if len(split) < 1 {
return s
@@ -654,6 +695,10 @@ func stripEscapedNewlines(s string) string {
continue
}
+ if i == len(split)-1 {
+ p.panicf("invalid escape: '\\ '")
+ }
+
split[i] = line[:len(line)-1] // Remove \
if len(split)-1 > i {
split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
@@ -662,8 +707,8 @@ func stripEscapedNewlines(s string) string {
return strings.Join(split, "")
}
-func (p *parser) replaceEscapes(str string) string {
- var replaced []rune
+func (p *parser) replaceEscapes(it item, str string) string {
+ replaced := make([]rune, 0, len(str))
s := []byte(str)
r := 0
for r < len(s) {
@@ -681,10 +726,8 @@ func (p *parser) replaceEscapes(str string) string {
switch s[r] {
default:
p.bug("Expected valid escape code after \\, but got %q.", s[r])
- return ""
case ' ', '\t':
- p.panicf("invalid escape: '\\%c'", s[r])
- return ""
+ p.panicItemf(it, "invalid escape: '\\%c'", s[r])
case 'b':
replaced = append(replaced, rune(0x0008))
r += 1
@@ -710,14 +753,14 @@ func (p *parser) replaceEscapes(str string) string {
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
// for us.)
- escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
+ escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5])
replaced = append(replaced, escaped)
r += 5
case 'U':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
// for us.)
- escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
+ escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9])
replaced = append(replaced, escaped)
r += 9
}
@@ -725,15 +768,14 @@ func (p *parser) replaceEscapes(str string) string {
return string(replaced)
}
-func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
+func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune {
s := string(bs)
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
- p.bug("Could not parse '%s' as a hexadecimal number, but the "+
- "lexer claims it's OK: %s", s, err)
+ p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
}
if !utf8.ValidRune(rune(hex)) {
- p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
+ p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s)
}
return rune(hex)
}
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
index 608997c22f6..254ca82e549 100644
--- a/vendor/github.com/BurntSushi/toml/type_fields.go
+++ b/vendor/github.com/BurntSushi/toml/type_fields.go
@@ -70,8 +70,8 @@ func typeFields(t reflect.Type) []field {
next := []field{{typ: t}}
// Count of queued names for current level and the next.
- count := map[reflect.Type]int{}
- nextCount := map[reflect.Type]int{}
+ var count map[reflect.Type]int
+ var nextCount map[reflect.Type]int
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_toml.go
similarity index 98%
rename from vendor/github.com/BurntSushi/toml/type_check.go
rename to vendor/github.com/BurntSushi/toml/type_toml.go
index d56aa80fafd..4e90d77373b 100644
--- a/vendor/github.com/BurntSushi/toml/type_check.go
+++ b/vendor/github.com/BurntSushi/toml/type_toml.go
@@ -16,7 +16,7 @@ func typeEqual(t1, t2 tomlType) bool {
return t1.typeString() == t2.typeString()
}
-func typeIsHash(t tomlType) bool {
+func typeIsTable(t tomlType) bool {
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
}
diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md
index 60c93fe5068..683be1dcf9c 100644
--- a/vendor/github.com/Microsoft/go-winio/README.md
+++ b/vendor/github.com/Microsoft/go-winio/README.md
@@ -11,12 +11,27 @@ package.
Please see the LICENSE file for licensing information.
-This project has adopted the [Microsoft Open Source Code of
-Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
-see the [Code of Conduct
-FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
-[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
-questions or comments.
+## Contributing
+This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA)
+declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com.
+
+When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR
+appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA.
+
+We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves
+or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can
+attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
+
+
+## Code of Conduct
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
+contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
+
+
+
+## Special Thanks
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
for another named pipe implementation.
diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
index cb461ca3153..2342a7fcd6f 100644
--- a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
+++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
@@ -5,7 +5,6 @@ package backuptar
import (
"archive/tar"
"encoding/base64"
- "errors"
"fmt"
"io"
"io/ioutil"
@@ -42,19 +41,14 @@ const (
hdrCreationTime = "LIBARCHIVE.creationtime"
)
-func writeZeroes(w io.Writer, count int64) error {
- buf := make([]byte, 8192)
- c := len(buf)
- for i := int64(0); i < count; i += int64(c) {
- if int64(c) > count-i {
- c = int(count - i)
- }
- _, err := w.Write(buf[:c])
- if err != nil {
- return err
- }
+// zeroReader is an io.Reader that always returns 0s.
+type zeroReader struct{}
+
+func (zr zeroReader) Read(b []byte) (int, error) {
+ for i := range b {
+ b[i] = 0
}
- return nil
+ return len(b), nil
}
func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
@@ -71,16 +65,26 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
return fmt.Errorf("unexpected stream %d", bhdr.Id)
}
+ // We can't seek backwards, since we have already written that data to the tar.Writer.
+ if bhdr.Offset < curOffset {
+ return fmt.Errorf("cannot seek back from %d to %d", curOffset, bhdr.Offset)
+ }
// archive/tar does not support writing sparse files
// so just write zeroes to catch up to the current offset.
- err = writeZeroes(t, bhdr.Offset-curOffset)
+ if _, err := io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil {
+ return fmt.Errorf("seek to offset %d: %s", bhdr.Offset, err)
+ }
if bhdr.Size == 0 {
+ // A sparse block with size = 0 is used to mark the end of the sparse blocks.
break
}
n, err := io.Copy(t, br)
if err != nil {
return err
}
+ if n != bhdr.Size {
+ return fmt.Errorf("copied %d bytes instead of %d at offset %d", n, bhdr.Size, bhdr.Offset)
+ }
curOffset = bhdr.Offset + n
}
return nil
@@ -109,6 +113,69 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta
return hdr
}
+// SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file
+// from the tar header and returns the security descriptor into a byte slice.
+func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) {
+ // Maintaining old SDDL-based behavior for backward
+ // compatibility. All new tar headers written by this library
+ // will have raw binary for the security descriptor.
+ var sd []byte
+ var err error
+ if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
+ sd, err = winio.SddlToSecurityDescriptor(sddl)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
+ sd, err = base64.StdEncoding.DecodeString(sdraw)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return sd, nil
+}
+
+// ExtendedAttributesFromTarHeader reads the EAs associated with the header of the
+// current file from the tar header and returns it as a byte slice.
+func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) {
+ var eas []winio.ExtendedAttribute
+ var eadata []byte
+ var err error
+ for k, v := range hdr.PAXRecords {
+ if !strings.HasPrefix(k, hdrEaPrefix) {
+ continue
+ }
+ data, err := base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ return nil, err
+ }
+ eas = append(eas, winio.ExtendedAttribute{
+ Name: k[len(hdrEaPrefix):],
+ Value: data,
+ })
+ }
+ if len(eas) != 0 {
+ eadata, err = winio.EncodeExtendedAttributes(eas)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return eadata, nil
+}
+
+// EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header
+// and encodes it into a byte slice. The file for which this function is called must be a
+// symlink.
+func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte {
+ _, isMountPoint := hdr.PAXRecords[hdrMountPoint]
+ rp := winio.ReparsePoint{
+ Target: filepath.FromSlash(hdr.Linkname),
+ IsMountPoint: isMountPoint,
+ }
+ return winio.EncodeReparsePoint(&rp)
+}
+
// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream.
//
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
@@ -221,20 +288,44 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
}
}
+ // The logic for copying file contents is fairly complicated due to the need for handling sparse files,
+ // and the weird ways they are represented by BackupRead. A normal file will always either have a data stream
+ // with size and content, or no data stream at all (if empty). However, for a sparse file, the content can also
+ // be represented using a series of sparse block streams following the data stream. Additionally, the way sparse
+ // files are handled by BackupRead has changed in the OS recently. The specifics of the representation are described
+ // in the list at the bottom of this block comment.
+ //
+ // Sparse files can be represented in four different ways, based on the specifics of the file.
+ // - Size = 0:
+ // Previously: BackupRead yields no data stream and no sparse block streams.
+ // Recently: BackupRead yields a data stream with size = 0. There are no following sparse block streams.
+ // - Size > 0, no allocated ranges:
+ // BackupRead yields a data stream with size = 0. Following is a single sparse block stream with
+ // size = 0 and offset = .
+ // - Size > 0, one allocated range:
+ // BackupRead yields a data stream with size = containing the file contents. There are no
+ // sparse block streams. This is the case if you take a normal file with contents and simply set the
+ // sparse flag on it.
+ // - Size > 0, multiple allocated ranges:
+ // BackupRead yields a data stream with size = 0. Following are sparse block streams for each allocated
+ // range of the file containing the range contents. Finally there is a sparse block stream with
+ // size = 0 and offset = .
+
if dataHdr != nil {
// A data stream was found. Copy the data.
- if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 {
+ // We assume that we will either have a data stream size > 0 XOR have sparse block streams.
+ if dataHdr.Size > 0 || (dataHdr.Attributes&winio.StreamSparseAttributes) == 0 {
if size != dataHdr.Size {
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
}
- _, err = io.Copy(t, br)
- if err != nil {
- return err
+ if _, err = io.Copy(t, br); err != nil {
+ return fmt.Errorf("%s: copying contents from data stream: %s", name, err)
}
- } else {
- err = copySparse(t, br)
- if err != nil {
- return err
+ } else if size > 0 {
+ // As of a recent OS change, BackupRead now returns a data stream for empty sparse files.
+ // These files have no sparse block streams, so skip the copySparse call if file size = 0.
+ if err = copySparse(t, br); err != nil {
+ return fmt.Errorf("%s: copying contents from sparse block stream: %s", name, err)
}
}
}
@@ -279,7 +370,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
} else {
// Unsupported for now, since the size of the alternate stream is not present
// in the backup stream until after the data has been read.
- return errors.New("tar of sparse alternate data streams is unsupported")
+ return fmt.Errorf("%s: tar of sparse alternate data streams is unsupported", name)
}
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
// ignore these streams
@@ -330,21 +421,10 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
// tar file that was not processed, or io.EOF is there are no more.
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
bw := winio.NewBackupStreamWriter(w)
- var sd []byte
- var err error
- // Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
- // by this library will have raw binary for the security descriptor.
- if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
- sd, err = winio.SddlToSecurityDescriptor(sddl)
- if err != nil {
- return nil, err
- }
- }
- if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
- sd, err = base64.StdEncoding.DecodeString(sdraw)
- if err != nil {
- return nil, err
- }
+
+ sd, err := SecurityDescriptorFromTarHeader(hdr)
+ if err != nil {
+ return nil, err
}
if len(sd) != 0 {
bhdr := winio.BackupHeader{
@@ -360,25 +440,12 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
return nil, err
}
}
- var eas []winio.ExtendedAttribute
- for k, v := range hdr.PAXRecords {
- if !strings.HasPrefix(k, hdrEaPrefix) {
- continue
- }
- data, err := base64.StdEncoding.DecodeString(v)
- if err != nil {
- return nil, err
- }
- eas = append(eas, winio.ExtendedAttribute{
- Name: k[len(hdrEaPrefix):],
- Value: data,
- })
+
+ eadata, err := ExtendedAttributesFromTarHeader(hdr)
+ if err != nil {
+ return nil, err
}
- if len(eas) != 0 {
- eadata, err := winio.EncodeExtendedAttributes(eas)
- if err != nil {
- return nil, err
- }
+ if len(eadata) != 0 {
bhdr := winio.BackupHeader{
Id: winio.BackupEaData,
Size: int64(len(eadata)),
@@ -392,13 +459,9 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
return nil, err
}
}
+
if hdr.Typeflag == tar.TypeSymlink {
- _, isMountPoint := hdr.PAXRecords[hdrMountPoint]
- rp := winio.ReparsePoint{
- Target: filepath.FromSlash(hdr.Linkname),
- IsMountPoint: isMountPoint,
- }
- reparse := winio.EncodeReparsePoint(&rp)
+ reparse := EncodeReparsePointFromTarHeader(hdr)
bhdr := winio.BackupHeader{
Id: winio.BackupReparseData,
Size: int64(len(reparse)),
@@ -411,7 +474,9 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
if err != nil {
return nil, err
}
+
}
+
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
bhdr := winio.BackupHeader{
Id: winio.BackupData,
diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go
index 0385e410812..293ab54c80c 100644
--- a/vendor/github.com/Microsoft/go-winio/file.go
+++ b/vendor/github.com/Microsoft/go-winio/file.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package winio
@@ -143,6 +144,11 @@ func (f *win32File) Close() error {
return nil
}
+// IsClosed checks if the file has been closed
+func (f *win32File) IsClosed() bool {
+ return f.closing.isSet()
+}
+
// prepareIo prepares for a new IO operation.
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
func (f *win32File) prepareIo() (*ioOperation, error) {
diff --git a/vendor/github.com/Microsoft/go-winio/go.mod b/vendor/github.com/Microsoft/go-winio/go.mod
index 98a8dea0e7e..f39a608da3a 100644
--- a/vendor/github.com/Microsoft/go-winio/go.mod
+++ b/vendor/github.com/Microsoft/go-winio/go.mod
@@ -1,9 +1,8 @@
module github.com/Microsoft/go-winio
-go 1.12
+go 1.13
require (
- github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.7.0
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
)
diff --git a/vendor/github.com/Microsoft/go-winio/go.sum b/vendor/github.com/Microsoft/go-winio/go.sum
index aa6ad3b571a..9bdcd9cfd9a 100644
--- a/vendor/github.com/Microsoft/go-winio/go.sum
+++ b/vendor/github.com/Microsoft/go-winio/go.sum
@@ -1,14 +1,11 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go
index b632f8f8bb9..b2b644d002a 100644
--- a/vendor/github.com/Microsoft/go-winio/hvsock.go
+++ b/vendor/github.com/Microsoft/go-winio/hvsock.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package winio
@@ -252,15 +253,23 @@ func (conn *HvsockConn) Close() error {
return conn.sock.Close()
}
+func (conn *HvsockConn) IsClosed() bool {
+ return conn.sock.IsClosed()
+}
+
func (conn *HvsockConn) shutdown(how int) error {
- err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
+ if conn.IsClosed() {
+ return ErrFileClosed
+ }
+
+ err := syscall.Shutdown(conn.sock.handle, how)
if err != nil {
return os.NewSyscallError("shutdown", err)
}
return nil
}
-// CloseRead shuts down the read end of the socket.
+// CloseRead shuts down the read end of the socket, preventing future read operations.
func (conn *HvsockConn) CloseRead() error {
err := conn.shutdown(syscall.SHUT_RD)
if err != nil {
@@ -269,8 +278,8 @@ func (conn *HvsockConn) CloseRead() error {
return nil
}
-// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
-// no more data will be written.
+// CloseWrite shuts down the write end of the socket, preventing future write operations and
+// notifying the other endpoint that no more data will be written.
func (conn *HvsockConn) CloseWrite() error {
err := conn.shutdown(syscall.SHUT_WR)
if err != nil {
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
index f497c0e3917..2d9161e2dee 100644
--- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
@@ -14,8 +14,6 @@ import (
"encoding/binary"
"fmt"
"strconv"
-
- "golang.org/x/sys/windows"
)
// Variant specifies which GUID variant (or "type") of the GUID. It determines
@@ -41,13 +39,6 @@ type Version uint8
var _ = (encoding.TextMarshaler)(GUID{})
var _ = (encoding.TextUnmarshaler)(&GUID{})
-// GUID represents a GUID/UUID. It has the same structure as
-// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
-// that type. It is defined as its own type so that stringification and
-// marshaling can be supported. The representation matches that used by native
-// Windows code.
-type GUID windows.GUID
-
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
func NewV4() (GUID, error) {
var b [16]byte
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
new file mode 100644
index 00000000000..f64d828c0ba
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package guid
+
+// GUID represents a GUID/UUID. It has the same structure as
+// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
+// that type. It is defined as its own type as that is only available to builds
+// targeted at `windows`. The representation matches that used by native Windows
+// code.
+type GUID struct {
+ Data1 uint32
+ Data2 uint16
+ Data3 uint16
+ Data4 [8]byte
+}
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
new file mode 100644
index 00000000000..83617f4eee9
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
@@ -0,0 +1,10 @@
+package guid
+
+import "golang.org/x/sys/windows"
+
+// GUID represents a GUID/UUID. It has the same structure as
+// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
+// that type. It is defined as its own type so that stringification and
+// marshaling can be supported. The representation matches that used by native
+// Windows code.
+type GUID windows.GUID
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
index fca241590cc..602920786c9 100644
--- a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
+++ b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
@@ -3,11 +3,10 @@
package security
import (
+ "fmt"
"os"
"syscall"
"unsafe"
-
- "github.com/pkg/errors"
)
type (
@@ -72,7 +71,7 @@ func GrantVmGroupAccess(name string) error {
// Stat (to determine if `name` is a directory).
s, err := os.Stat(name)
if err != nil {
- return errors.Wrapf(err, "%s os.Stat %s", gvmga, name)
+ return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err)
}
// Get a handle to the file/directory. Must defer Close on success.
@@ -88,7 +87,7 @@ func GrantVmGroupAccess(name string) error {
sd := uintptr(0)
origDACL := uintptr(0)
if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil {
- return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name)
+ return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err)
}
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd)))
@@ -102,7 +101,7 @@ func GrantVmGroupAccess(name string) error {
// And finally use SetSecurityInfo to apply the updated DACL.
if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil {
- return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name)
+ return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err)
}
return nil
@@ -120,7 +119,7 @@ func createFile(name string, isDir bool) (syscall.Handle, error) {
}
fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0)
if err != nil {
- return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name)
+ return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err)
}
return fd, nil
}
@@ -131,7 +130,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
// Generate pointers to the SIDs based on the string SIDs
sid, err := syscall.StringToSid(sidVmGroup)
if err != nil {
- return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup)
+ return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err)
}
inheritance := inheritModeNoInheritance
@@ -154,7 +153,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
modifiedDACL := uintptr(0)
if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil {
- return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name)
+ return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err)
}
return modifiedDACL, nil
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go
index c40c2739b7c..d7096716ce2 100644
--- a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go
+++ b/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go
@@ -2,6 +2,6 @@ package security
//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go
-//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) [failretval!=0] = advapi32.GetSecurityInfo
-//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) [failretval!=0] = advapi32.SetSecurityInfo
-//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) [failretval!=0] = advapi32.SetEntriesInAclW
+//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) = advapi32.GetSecurityInfo
+//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) = advapi32.SetSecurityInfo
+//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) = advapi32.SetEntriesInAclW
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go
index 4a90cb3cc81..4084680e0f0 100644
--- a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go
@@ -45,26 +45,26 @@ var (
procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo")
)
-func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0)
- if r1 != 0 {
- err = errnoErr(e1)
+func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) {
+ r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0)
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
}
return
}
-func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0)
- if r1 != 0 {
- err = errnoErr(e1)
+func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) {
+ r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0)
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
}
return
}
-func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) {
- r1, _, e1 := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0)
- if r1 != 0 {
- err = errnoErr(e1)
+func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) {
+ r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0)
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
}
return
}
diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go
index b03b789e657..f7f78fc2304 100644
--- a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go
+++ b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package vhd
@@ -7,17 +8,16 @@ import (
"syscall"
"github.com/Microsoft/go-winio/pkg/guid"
- "github.com/pkg/errors"
"golang.org/x/sys/windows"
)
//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go
-//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.CreateVirtualDisk
-//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk
-//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk
-//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = virtdisk.DetachVirtualDisk
-//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) [failretval != 0] = virtdisk.GetVirtualDiskPhysicalPath
+//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk
+//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
+//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk
+//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk
+//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath
type (
CreateVirtualDiskFlag uint32
@@ -62,13 +62,27 @@ type OpenVirtualDiskParameters struct {
Version2 OpenVersion2
}
+// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However,
+// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating
+// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods.
+type openVersion2 struct {
+ getInfoOnly int32
+ readOnly int32
+ resiliencyGUID guid.GUID
+}
+
+type openVirtualDiskParameters struct {
+ version uint32
+ version2 openVersion2
+}
+
type AttachVersion2 struct {
RestrictedOffset uint64
RestrictedLength uint64
}
type AttachVirtualDiskParameters struct {
- Version uint32 // Must always be set to 2
+ Version uint32
Version2 AttachVersion2
}
@@ -146,16 +160,13 @@ func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
return err
}
- if err := syscall.CloseHandle(handle); err != nil {
- return err
- }
- return nil
+ return syscall.CloseHandle(handle)
}
// DetachVirtualDisk detaches a virtual hard disk by handle.
func DetachVirtualDisk(handle syscall.Handle) (err error) {
if err := detachVirtualDisk(handle, 0, 0); err != nil {
- return errors.Wrap(err, "failed to detach virtual disk")
+ return fmt.Errorf("failed to detach virtual disk: %w", err)
}
return nil
}
@@ -185,7 +196,7 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua
parameters,
nil,
); err != nil {
- return errors.Wrap(err, "failed to attach virtual disk")
+ return fmt.Errorf("failed to attach virtual disk: %w", err)
}
return nil
}
@@ -209,7 +220,7 @@ func AttachVhd(path string) (err error) {
AttachVirtualDiskFlagNone,
¶ms,
); err != nil {
- return errors.Wrap(err, "failed to attach virtual disk")
+ return fmt.Errorf("failed to attach virtual disk: %w", err)
}
return nil
}
@@ -234,19 +245,35 @@ func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask Virtual
var (
handle syscall.Handle
defaultType VirtualStorageType
+ getInfoOnly int32
+ readOnly int32
)
if parameters.Version != 2 {
return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version)
}
+ if parameters.Version2.GetInfoOnly {
+ getInfoOnly = 1
+ }
+ if parameters.Version2.ReadOnly {
+ readOnly = 1
+ }
+ params := &openVirtualDiskParameters{
+ version: parameters.Version,
+ version2: openVersion2{
+ getInfoOnly,
+ readOnly,
+ parameters.Version2.ResiliencyGUID,
+ },
+ }
if err := openVirtualDisk(
&defaultType,
vhdPath,
uint32(virtualDiskAccessMask),
uint32(openVirtualDiskFlags),
- parameters,
+ params,
&handle,
); err != nil {
- return 0, errors.Wrap(err, "failed to open virtual disk")
+ return 0, fmt.Errorf("failed to open virtual disk: %w", err)
}
return handle, nil
}
@@ -272,7 +299,7 @@ func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask,
nil,
&handle,
); err != nil {
- return handle, errors.Wrap(err, "failed to create virtual disk")
+ return handle, fmt.Errorf("failed to create virtual disk: %w", err)
}
return handle, nil
}
@@ -290,7 +317,7 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) {
&diskPathSizeInBytes,
&diskPhysicalPathBuf[0],
); err != nil {
- return "", errors.Wrap(err, "failed to get disk physical path")
+ return "", fmt.Errorf("failed to get disk physical path: %w", err)
}
return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil
}
@@ -314,10 +341,10 @@ func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error
createParams,
)
if err != nil {
- return fmt.Errorf("failed to create differencing vhd: %s", err)
+ return fmt.Errorf("failed to create differencing vhd: %w", err)
}
if err := syscall.CloseHandle(vhdHandle); err != nil {
- return fmt.Errorf("failed to close differencing vhd handle: %s", err)
+ return fmt.Errorf("failed to close differencing vhd handle: %w", err)
}
return nil
}
diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
index 572f7b42f10..1d7498db3be 100644
--- a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
+++ b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
@@ -47,60 +47,60 @@ var (
procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk")
)
-func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)))
- if r1 != 0 {
- err = errnoErr(e1)
+func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) {
+ r0, _, _ := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
}
return
}
-func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) {
+func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) {
var _p0 *uint16
- _p0, err = syscall.UTF16PtrFromString(path)
- if err != nil {
+ _p0, win32err = syscall.UTF16PtrFromString(path)
+ if win32err != nil {
return
}
return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle)
}
-func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) {
- r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle)))
- if r1 != 0 {
- err = errnoErr(e1)
+func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) {
+ r0, _, _ := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
}
return
}
-func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags))
- if r1 != 0 {
- err = errnoErr(e1)
+func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) {
+ r0, _, _ := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
}
return
}
-func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer)))
- if r1 != 0 {
- err = errnoErr(e1)
+func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) {
+ r0, _, _ := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
}
return
}
-func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) {
+func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
var _p0 *uint16
- _p0, err = syscall.UTF16PtrFromString(path)
- if err != nil {
+ _p0, win32err = syscall.UTF16PtrFromString(path)
+ if win32err != nil {
return
}
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle)
}
-func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) {
- r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
- if r1 != 0 {
- err = errnoErr(e1)
+func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
+ r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
+ if r0 != 0 {
+ win32err = syscall.Errno(r0)
}
return
}
diff --git a/vendor/github.com/Microsoft/hcsshim/.gitignore b/vendor/github.com/Microsoft/hcsshim/.gitignore
index aec9bd4bb05..54ed6f06c9d 100644
--- a/vendor/github.com/Microsoft/hcsshim/.gitignore
+++ b/vendor/github.com/Microsoft/hcsshim/.gitignore
@@ -1,3 +1,38 @@
+# Binaries for programs and plugins
*.exe
-.idea
-.vscode
+*.dll
+*.so
+*.dylib
+
+# Ignore vscode setting files
+.vscode/
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+# Ignore gcs bin directory
+service/bin/
+service/pkg/
+
+*.img
+*.vhd
+*.tar.gz
+
+# Make stuff
+.rootfs-done
+bin/*
+rootfs/*
+*.o
+/build/
+
+deps/*
+out/*
+
+.idea/
+.vscode/
\ No newline at end of file
diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml
new file mode 100644
index 00000000000..2400e7f1e02
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml
@@ -0,0 +1,99 @@
+run:
+ timeout: 8m
+
+linters:
+ enable:
+ - stylecheck
+
+linters-settings:
+ stylecheck:
+ # https://staticcheck.io/docs/checks
+ checks: ["all"]
+
+
+issues:
+ # This repo has a LOT of generated schema files, operating system bindings, and other things that ST1003 from stylecheck won't like
+ # (screaming case Windows api constants for example). There's also some structs that we *could* change the initialisms to be Go
+ # friendly (Id -> ID) but they're exported and it would be a breaking change. This makes it so that most new code, code that isn't
+ # supposed to be a pretty faithful mapping to an OS call/constants, or non-generated code still checks if we're following idioms,
+ # while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change.
+ exclude-rules:
+ - path: layer.go
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: hcsshim.go
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\hcs\\schema2\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\wclayer\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: hcn\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\hcs\\schema1\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\hns\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: ext4\\internal\\compactext4\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: ext4\\internal\\format\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\guestrequest\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\guest\\prot\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\windevice\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\winapi\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\vmcompute\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\regstate\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
+
+ - path: internal\\hcserror\\
+ linters:
+ - stylecheck
+ Text: "ST1003:"
\ No newline at end of file
diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile
new file mode 100644
index 00000000000..a8f5516cd0b
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/Makefile
@@ -0,0 +1,87 @@
+BASE:=base.tar.gz
+
+GO:=go
+GO_FLAGS:=-ldflags "-s -w" # strip Go binaries
+CGO_ENABLED:=0
+GOMODVENDOR:=
+
+CFLAGS:=-O2 -Wall
+LDFLAGS:=-static -s # strip C binaries
+
+GO_FLAGS_EXTRA:=
+ifeq "$(GOMODVENDOR)" "1"
+GO_FLAGS_EXTRA += -mod=vendor
+endif
+GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA)
+
+SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST))))
+
+# The link aliases for gcstools
+GCS_TOOLS=\
+ generichook
+
+.PHONY: all always rootfs test
+
+all: out/initrd.img out/rootfs.tar.gz
+
+clean:
+ find -name '*.o' -print0 | xargs -0 -r rm
+ rm -rf bin deps rootfs out
+
+test:
+ cd $(SRCROOT) && go test -v ./internal/guest/...
+
+out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools Makefile
+ @mkdir -p out
+ rm -rf rootfs
+ mkdir -p rootfs/bin/
+ cp bin/init rootfs/
+ cp bin/vsockexec rootfs/bin/
+ cp bin/cmd/gcs rootfs/bin/
+ cp bin/cmd/gcstools rootfs/bin/
+ for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done
+ git -C $(SRCROOT) rev-parse HEAD > rootfs/gcs.commit && \
+ git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/gcs.branch
+ tar -zcf $@ -C rootfs .
+ rm -rf rootfs
+
+out/rootfs.tar.gz: out/initrd.img
+ rm -rf rootfs-conv
+ mkdir rootfs-conv
+ gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd)
+ tar -zcf $@ -C rootfs-conv .
+ rm -rf rootfs-conv
+
+out/initrd.img: $(BASE) out/delta.tar.gz $(SRCROOT)/hack/catcpio.sh
+ $(SRCROOT)/hack/catcpio.sh "$(BASE)" out/delta.tar.gz > out/initrd.img.uncompressed
+ gzip -c out/initrd.img.uncompressed > $@
+ rm out/initrd.img.uncompressed
+
+-include deps/cmd/gcs.gomake
+-include deps/cmd/gcstools.gomake
+
+# Implicit rule for includes that define Go targets.
+%.gomake: $(SRCROOT)/Makefile
+ @mkdir -p $(dir $@)
+ @/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new
+ @/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new
+ @/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new
+ @/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new
+ @/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new
+ @/bin/echo -e '\tmv $$@.new $$@' >> $@.new
+ @/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new
+ mv $@.new $@
+
+VPATH=$(SRCROOT)
+
+bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o
+ @mkdir -p bin
+ $(CC) $(LDFLAGS) -o $@ $^
+
+bin/init: init/init.o vsockexec/vsock.o
+ @mkdir -p bin
+ $(CC) $(LDFLAGS) -o $@ $^
+
+%.o: %.c
+ @mkdir -p $(dir $@)
+ $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $<
diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md
index 95c30036562..b8ca926a9da 100644
--- a/vendor/github.com/Microsoft/hcsshim/README.md
+++ b/vendor/github.com/Microsoft/hcsshim/README.md
@@ -2,13 +2,67 @@
[](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster)
-This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS).
+This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS), as well as code for the [guest agent](./internal/guest/README.md) (commonly referred to as the GCS or Guest Compute Service in the codebase) used to support running Linux Hyper-V containers.
-It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well.
+It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd](https://github.com/containerd/containerd) projects, but it can be freely used by other projects as well.
+
+## Building
+
+While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md).
+### Linux Hyper-V Container Guest Agent
+
+To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs.
+```powershell
+C:\> $env:GOOS="linux"
+C:\> go build .\cmd\gcs\
+```
+
+or on a Linux machine
+```sh
+> go build ./cmd/gcs
+```
+
+If you want it to be packaged inside of a rootfs to boot with alongside all of the other tools then you'll need to provide a rootfs that it can be packaged inside of. An easy way is to export the rootfs of a container.
+
+```sh
+docker pull busybox
+docker run --name base_image_container busybox
+docker export base_image_container | gzip > base.tar.gz
+BASE=./base.tar.gz
+make all
+```
+
+If the build is successful, in the `./out` folder you should see:
+```sh
+> ls ./out/
+delta.tar.gz initrd.img rootfs.tar.gz
+```
+
+### Containerd Shim
+For info on the Runtime V2 API: https://github.com/containerd/containerd/blob/master/runtime/v2/README.md.
+
+Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers.
+
+```powershell
+C:\> $env:GOOS="windows"
+C:\> go build .\cmd\containerd-shim-runhcs-v1
+```
+
+Then place the binary in the same directory that Containerd is located at in your environment. A default Containerd configuration file can be generated by running:
+```powershell
+.\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii
+```
+
+This config file will already have the shim set as the default runtime for cri interactions.
+
+To trial using the shim out with ctr.exe:
+```powershell
+C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!"
+```
## Contributing
-This project welcomes contributions and suggestions. Most contributions require you to agree to a
+This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.microsoft.com.
@@ -16,7 +70,27 @@ When you submit a pull request, a CLA-bot will automatically determine whether y
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
-We also ask that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to certify they either authored the work themselves or otherwise have permission to use it in this project.
+We also require that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to
+certify they either authored the work themselves or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for
+more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure
+that all commits in a given PR are signed-off.
+
+### Test Directory (Important to note)
+
+This project has tried to trim some dependencies from the root Go modules file that would be cumbersome to get transitively included if this
+project is being vendored/used as a library. Some of these dependencies were only being used for tests, so the /test directory in this project also has
+its own go.mod file where these are now included to get around this issue. Our tests rely on the code in this project to run, so the test Go modules file
+has a relative path replace directive to pull in the latest hcsshim code that the tests actually touch from this project
+(which is the repo itself on your disk).
+
+```
+replace (
+ github.com/Microsoft/hcsshim => ../
+)
+```
+
+Because of this, for most code changes you may need to run `go mod vendor` + `go mod tidy` in the /test directory in this repository, as the
+CI in this project will check if the files are out of date and will fail if this is true.
## Code of Conduct
diff --git a/vendor/github.com/Microsoft/hcsshim/go.mod b/vendor/github.com/Microsoft/hcsshim/go.mod
index 8360b0a50d3..9c60dd30251 100644
--- a/vendor/github.com/Microsoft/hcsshim/go.mod
+++ b/vendor/github.com/Microsoft/hcsshim/go.mod
@@ -3,25 +3,34 @@ module github.com/Microsoft/hcsshim
go 1.13
require (
+ github.com/BurntSushi/toml v0.3.1
github.com/Microsoft/go-winio v0.4.17
+ github.com/cenkalti/backoff/v4 v4.1.1
github.com/containerd/cgroups v1.0.1
github.com/containerd/console v1.0.2
- github.com/containerd/containerd v1.4.9
- github.com/containerd/continuity v0.1.0 // indirect
- github.com/containerd/fifo v1.0.0 // indirect
+ github.com/containerd/containerd v1.5.7
github.com/containerd/go-runc v1.0.0
- github.com/containerd/ttrpc v1.0.2
+ github.com/containerd/ttrpc v1.1.0
github.com/containerd/typeurl v1.0.2
github.com/gogo/protobuf v1.3.2
- github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d
+ github.com/golang/mock v1.6.0
+ github.com/google/go-cmp v0.5.6
+ github.com/google/go-containerregistry v0.5.1
+ github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3
+ github.com/mattn/go-shellwords v1.0.6
+ github.com/opencontainers/runc v1.0.2
+ github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/pkg/errors v0.9.1
- github.com/sirupsen/logrus v1.7.0
+ github.com/sirupsen/logrus v1.8.1
github.com/urfave/cli v1.22.2
+ github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852
+ github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae
+ go.etcd.io/bbolt v1.3.6
go.opencensus.io v0.22.3
- golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
- golang.org/x/sys v0.0.0-20210324051608-47abb6519492
- google.golang.org/grpc v1.33.2
- gotest.tools/v3 v3.0.3 // indirect
+ golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
+ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
+ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
+ google.golang.org/grpc v1.40.0
)
replace (
diff --git a/vendor/github.com/Microsoft/hcsshim/go.sum b/vendor/github.com/Microsoft/hcsshim/go.sum
index 8ad3a8b8f1a..93c37657f3c 100644
--- a/vendor/github.com/Microsoft/hcsshim/go.sum
+++ b/vendor/github.com/Microsoft/hcsshim/go.sum
@@ -1,241 +1,993 @@
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
+github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
+github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w=
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
+github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
+github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
+github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
+github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
+github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
+github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
+github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
+github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
+github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
+github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
+github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ=
+github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
+github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
+github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
+github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
+github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
+github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
+github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
+github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
+github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
+github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ=
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
github.com/containerd/console v1.0.2 h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE=
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
-github.com/containerd/containerd v1.4.9 h1:JIw9mjVw4LsGmnA/Bqg9j9e+XB7soOJufrKUpA6n2Ns=
-github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
+github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
+github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
+github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
+github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
+github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM=
+github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
+github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
+github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
+github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8=
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
+github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU=
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
+github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
+github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
+github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
github.com/containerd/go-runc v1.0.0 h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0=
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/ttrpc v1.0.2 h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U=
+github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
+github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
+github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
+github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
+github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
+github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/stargz-snapshotter/estargz v0.4.1 h1:5e7heayhB7CcgdTkqfZqrNaNv15gABwr3Q2jBTbLlt4=
+github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
+github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
+github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI=
+github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
+github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
+github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
+github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
+github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
+github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
+github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
+github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
+github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
+github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
+github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
+github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
+github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
+github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
+github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
+github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017 h1:2HQmlpI3yI9deH18Q6xiSOIjXD4sLI55Y/gfpa8/558=
+github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
+github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
+github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7 h1:Cvj7S8I4Xpx78KAl6TwTmMHuHlZ/0SM60NUneGJQ7IE=
+github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
+github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
+github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
+github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
+github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=
+github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
+github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-containerregistry v0.5.1 h1:/+mFTs4AlwsJ/mJe8NDtKb7BxLtbZFpcn8vDsneEkwQ=
+github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
+github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 h1:jUp75lepDg0phMUJBCmvaeFDldD2N3S1lBuPwUTszio=
+github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.6 h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3ZkeUUI=
+github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
+github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
+github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
+github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM=
+github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
+github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
+github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
+github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg=
+github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
+github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d h1:pNa8metDkwZjb9g4T8s+krQ+HRgZAkqnXml+wNir/+s=
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc=
+github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
+github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
+github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1 h1:Lo6mRUjdS99f3zxYOUalftWHUoOGaDRqFk1+j0Q57/I=
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
+github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
+github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
+github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
+github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA=
+github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
+github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns=
+github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
+github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
+github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw=
+golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 h1:YzfoEYWbODU5Fbt37+h7X16BWQbad7Q4S6gclTKFXM8=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
+k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
+k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
+k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
+k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
+k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
+k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
+k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
+k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
+k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
+k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
+k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
+k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
+k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
+k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
+k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
+k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
+k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
+k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
index 644f0ab711f..e21354ffd66 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
@@ -78,6 +78,13 @@ var (
// ErrNotSupported is an error encountered when hcs doesn't support the request
ErrPlatformNotSupported = errors.New("unsupported platform request")
+
+ // ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped.
+ ErrProcessAlreadyStopped = syscall.Errno(0x8037011f)
+
+ // ErrInvalidHandle is an error that can be encountrered when querying the properties of a compute system when the handle to that
+ // compute system has already been closed.
+ ErrInvalidHandle = syscall.Errno(0x6)
)
type ErrorEvent struct {
@@ -249,6 +256,14 @@ func IsNotExist(err error) bool {
err == ErrElementNotFound
}
+// IsErrorInvalidHandle checks whether the error is the result of an operation carried
+// out on a handle that is invalid/closed. This error popped up while trying to query
+// stats on a container in the process of being stopped.
+func IsErrorInvalidHandle(err error) bool {
+ err = getInnerError(err)
+ return err == ErrInvalidHandle
+}
+
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
// already closed by a call to the Close() method.
func IsAlreadyClosed(err error) bool {
@@ -281,6 +296,7 @@ func IsTimeout(err error) bool {
func IsAlreadyStopped(err error) bool {
err = getInnerError(err)
return err == ErrVmcomputeAlreadyStopped ||
+ err == ErrProcessAlreadyStopped ||
err == ErrElementNotFound
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
index 8f203466887..f4605922ab4 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
@@ -3,7 +3,9 @@ package hcs
import (
"context"
"encoding/json"
+ "errors"
"io"
+ "os"
"sync"
"syscall"
"time"
@@ -16,16 +18,17 @@ import (
// ContainerError is an error encountered in HCS
type Process struct {
- handleLock sync.RWMutex
- handle vmcompute.HcsProcess
- processID int
- system *System
- hasCachedStdio bool
- stdioLock sync.Mutex
- stdin io.WriteCloser
- stdout io.ReadCloser
- stderr io.ReadCloser
- callbackNumber uintptr
+ handleLock sync.RWMutex
+ handle vmcompute.HcsProcess
+ processID int
+ system *System
+ hasCachedStdio bool
+ stdioLock sync.Mutex
+ stdin io.WriteCloser
+ stdout io.ReadCloser
+ stderr io.ReadCloser
+ callbackNumber uintptr
+ killSignalDelivered bool
closedWaitOnce sync.Once
waitBlock chan struct{}
@@ -149,12 +152,45 @@ func (process *Process) Kill(ctx context.Context) (bool, error) {
return false, makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
+ if process.killSignalDelivered {
+ // A kill signal has already been sent to this process. Sending a second
+ // one offers no real benefit, as processes cannot stop themselves from
+ // being terminated, once a TerminateProcess has been issued. Sending a
+ // second kill may result in a number of errors (two of which detailed bellow)
+ // and which we can avoid handling.
+ return true, nil
+ }
+
resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle)
+ if err != nil {
+ // We still need to check these two cases, as processes may still be killed by an
+ // external actor (human operator, OOM, random script etc).
+ if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) {
+ // There are two cases where it should be safe to ignore an error returned
+ // by HcsTerminateProcess. The first one is cause by the fact that
+ // HcsTerminateProcess ends up calling TerminateProcess in the context
+ // of a container. According to the TerminateProcess documentation:
+ // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks
+ // After a process has terminated, call to TerminateProcess with open
+ // handles to the process fails with ERROR_ACCESS_DENIED (5) error code.
+ // It's safe to ignore this error here. HCS should always have permissions
+ // to kill processes inside any container. So an ERROR_ACCESS_DENIED
+ // is unlikely to be anything else than what the ending remarks in the
+ // documentation states.
+ //
+ // The second case is generated by hcs itself, if for any reason HcsTerminateProcess
+ // is called twice in a very short amount of time. In such cases, hcs may return
+ // HCS_E_PROCESS_ALREADY_STOPPED.
+ return true, nil
+ }
+ }
events := processHcsResult(ctx, resultJSON)
delivered, err := process.processSignalResult(ctx, err)
if err != nil {
err = makeProcessError(process, operation, err, events)
}
+
+ process.killSignalDelivered = delivered
return delivered, err
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go
index bcfeb34d549..70884aad75f 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go
@@ -27,4 +27,10 @@ type Attachment struct {
CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"`
ReadOnly bool `json:"ReadOnly,omitempty"`
+
+ SupportCompressedVolumes bool `json:"SupportCompressedVolumes,omitempty"`
+
+ AlwaysAllowSparseFiles bool `json:"AlwaysAllowSparseFiles,omitempty"`
+
+ ExtensibleVirtualDiskType string `json:"ExtensibleVirtualDiskType,omitempty"`
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go
index 4fb23107683..39a54432c02 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go
@@ -31,4 +31,6 @@ type Container struct {
RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"`
AssignedDevices []Device `json:"AssignedDevices,omitempty"`
+
+ AdditionalDeviceNamespace *ContainerDefinitionDevice `json:"AdditionalDeviceNamespace,omitempty"`
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go
index f1a28cd3898..0be0475d41a 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go
@@ -14,5 +14,5 @@ type CpuGroupConfig struct {
Affinity *CpuGroupAffinity `json:"Affinity,omitempty"`
GroupProperties []CpuGroupProperty `json:"GroupProperties,omitempty"`
// Hypervisor CPU group IDs exposed to clients
- HypervisorGroupId int32 `json:"HypervisorGroupId,omitempty"`
+ HypervisorGroupId uint64 `json:"HypervisorGroupId,omitempty"`
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go
index 107caddadae..31c4538affc 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go
@@ -12,9 +12,9 @@ package hcsschema
type DeviceType string
const (
- ClassGUID DeviceType = "ClassGuid"
- DeviceInstance DeviceType = "DeviceInstance"
- GPUMirror DeviceType = "GpuMirror"
+ ClassGUID DeviceType = "ClassGuid"
+ DeviceInstanceID DeviceType = "DeviceInstance"
+ GPUMirror DeviceType = "GpuMirror"
)
type Device struct {
@@ -22,6 +22,6 @@ type Device struct {
Type DeviceType `json:"Type,omitempty"`
// The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid.
InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"`
- // The location path of the device to assign to the container. Only used when Type is DeviceInstance.
+ // The location path of the device to assign to the container. Only used when Type is DeviceInstanceID.
LocationPath string `json:"LocationPath,omitempty"`
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go
new file mode 100644
index 00000000000..8dbe40b3be2
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go
@@ -0,0 +1,14 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type ContainerDefinitionDevice struct {
+ DeviceExtension []DeviceExtension `json:"device_extension,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go
new file mode 100644
index 00000000000..8fe89f92747
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go
@@ -0,0 +1,15 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type DeviceCategory struct {
+ Name string `json:"name,omitempty"`
+ InterfaceClass []InterfaceClass `json:"interface_class,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go
new file mode 100644
index 00000000000..a62568d892e
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go
@@ -0,0 +1,15 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type DeviceExtension struct {
+ DeviceCategory *DeviceCategory `json:"device_category,omitempty"`
+ Namespace *DeviceExtensionNamespace `json:"namespace,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go
new file mode 100644
index 00000000000..a7410febd6d
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go
@@ -0,0 +1,17 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type DeviceInstance struct {
+ Id string `json:"id,omitempty"`
+ LocationPath string `json:"location_path,omitempty"`
+ PortName string `json:"port_name,omitempty"`
+ InterfaceClass []InterfaceClass `json:"interface_class,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go
new file mode 100644
index 00000000000..3553640647e
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go
@@ -0,0 +1,16 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type DeviceNamespace struct {
+ RequiresDriverstore bool `json:"requires_driverstore,omitempty"`
+ DeviceCategory []DeviceCategory `json:"device_category,omitempty"`
+ DeviceInstance []DeviceInstance `json:"device_instance,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go
new file mode 100644
index 00000000000..7be98b54107
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go
@@ -0,0 +1,16 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type InterfaceClass struct {
+ Type_ string `json:"type,omitempty"`
+ Identifier string `json:"identifier,omitempty"`
+ Recurse bool `json:"recurse,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go
new file mode 100644
index 00000000000..3ab9cf1ecf0
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go
@@ -0,0 +1,15 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type DeviceExtensionNamespace struct {
+ Ob *ObjectNamespace `json:"ob,omitempty"`
+ Device *DeviceNamespace `json:"device,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go
new file mode 100644
index 00000000000..d2f51b3b53c
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go
@@ -0,0 +1,18 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type ObjectDirectory struct {
+ Name string `json:"name,omitempty"`
+ Clonesd string `json:"clonesd,omitempty"`
+ Shadow string `json:"shadow,omitempty"`
+ Symlink []ObjectSymlink `json:"symlink,omitempty"`
+ Objdir []ObjectDirectory `json:"objdir,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go
new file mode 100644
index 00000000000..47dfb55bfa8
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go
@@ -0,0 +1,16 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type ObjectNamespace struct {
+ Shadow string `json:"shadow,omitempty"`
+ Symlink []ObjectSymlink `json:"symlink,omitempty"`
+ Objdir []ObjectDirectory `json:"objdir,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go
new file mode 100644
index 00000000000..8867ebe5f02
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go
@@ -0,0 +1,18 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type ObjectSymlink struct {
+ Name string `json:"name,omitempty"`
+ Path string `json:"path,omitempty"`
+ Scope string `json:"scope,omitempty"`
+ Pathtoclone string `json:"pathtoclone,omitempty"`
+ AccessMask int32 `json:"access_mask,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go
new file mode 100644
index 00000000000..9ef322f615b
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go
@@ -0,0 +1,15 @@
+/*
+ * HCS API
+ *
+ * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
+ *
+ * API version: 2.4
+ * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
+ */
+
+package hcsschema
+
+type VirtualPMemMapping struct {
+ HostPath string `json:"HostPath,omitempty"`
+ ImageFormat string `json:"ImageFormat,omitempty"`
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
index 75499c967f0..1d45a703b2e 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
@@ -4,17 +4,22 @@ import (
"context"
"encoding/json"
"errors"
+ "fmt"
"strings"
"sync"
"syscall"
+ "time"
"github.com/Microsoft/hcsshim/internal/cow"
"github.com/Microsoft/hcsshim/internal/hcs/schema1"
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
+ "github.com/Microsoft/hcsshim/internal/jobobject"
"github.com/Microsoft/hcsshim/internal/log"
+ "github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/timeout"
"github.com/Microsoft/hcsshim/internal/vmcompute"
+ "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -28,7 +33,8 @@ type System struct {
waitBlock chan struct{}
waitError error
exitError error
- os, typ string
+ os, typ, owner string
+ startTime time.Time
}
func newSystem(id string) *System {
@@ -38,6 +44,11 @@ func newSystem(id string) *System {
}
}
+// Implementation detail for silo naming, this should NOT be relied upon very heavily.
+func siloNameFmt(containerID string) string {
+ return fmt.Sprintf(`\Container_%s`, containerID)
+}
+
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) {
operation := "hcs::CreateComputeSystem"
@@ -127,6 +138,7 @@ func (computeSystem *System) getCachedProperties(ctx context.Context) error {
}
computeSystem.typ = strings.ToLower(props.SystemType)
computeSystem.os = strings.ToLower(props.RuntimeOSType)
+ computeSystem.owner = strings.ToLower(props.Owner)
if computeSystem.os == "" && computeSystem.typ == "container" {
// Pre-RS5 HCS did not return the OS, but it only supported containers
// that ran Windows.
@@ -195,7 +207,7 @@ func (computeSystem *System) Start(ctx context.Context) (err error) {
if err != nil {
return makeSystemError(computeSystem, operation, err, events)
}
-
+ computeSystem.startTime = time.Now()
return nil
}
@@ -324,11 +336,115 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr
return properties, nil
}
-// PropertiesV2 returns the requested container properties targeting a V2 schema container.
-func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) {
- computeSystem.handleLock.RLock()
- defer computeSystem.handleLock.RUnlock()
+// queryInProc handles querying for container properties without reaching out to HCS. `props`
+// will be updated to contain any data returned from the queries present in `types`. If any properties
+// failed to be queried they will be tallied up and returned in as the first return value. Failures on
+// query are NOT considered errors; the only failure case for this method is if the containers job object
+// cannot be opened.
+func (computeSystem *System) queryInProc(ctx context.Context, props *hcsschema.Properties, types []hcsschema.PropertyType) ([]hcsschema.PropertyType, error) {
+ // In the future we can make use of some new functionality in the HCS that allows you
+ // to pass a job object for HCS to use for the container. Currently, the only way we'll
+ // be able to open the job/silo is if we're running as SYSTEM.
+ jobOptions := &jobobject.Options{
+ UseNTVariant: true,
+ Name: siloNameFmt(computeSystem.id),
+ }
+ job, err := jobobject.Open(ctx, jobOptions)
+ if err != nil {
+ return nil, err
+ }
+ defer job.Close()
+
+ var fallbackQueryTypes []hcsschema.PropertyType
+ for _, propType := range types {
+ switch propType {
+ case hcsschema.PTStatistics:
+ // Handle a bad caller asking for the same type twice. No use in re-querying if this is
+ // filled in already.
+ if props.Statistics == nil {
+ props.Statistics, err = computeSystem.statisticsInProc(job)
+ if err != nil {
+ log.G(ctx).WithError(err).Warn("failed to get statistics in-proc")
+
+ fallbackQueryTypes = append(fallbackQueryTypes, propType)
+ }
+ }
+ default:
+ fallbackQueryTypes = append(fallbackQueryTypes, propType)
+ }
+ }
+
+ return fallbackQueryTypes, nil
+}
+
+// statisticsInProc emulates what HCS does to grab statistics for a given container with a small
+// change to make grabbing the private working set total much more efficient.
+func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcsschema.Statistics, error) {
+ // Start timestamp for these stats before we grab them to match HCS
+ timestamp := time.Now()
+
+ memInfo, err := job.QueryMemoryStats()
+ if err != nil {
+ return nil, err
+ }
+
+ processorInfo, err := job.QueryProcessorStats()
+ if err != nil {
+ return nil, err
+ }
+
+ storageInfo, err := job.QueryStorageStats()
+ if err != nil {
+ return nil, err
+ }
+
+ // This calculates the private working set more efficiently than HCS does. HCS calls NtQuerySystemInformation
+ // with the class SystemProcessInformation which returns an array containing system information for *every*
+ // process running on the machine. They then grab the pids that are running in the container and filter down
+ // the entries in the array to only what's running in that silo and start tallying up the total. This doesn't
+ // work well as performance should get worse if more processess are running on the machine in general and not
+ // just in the container. All of the additional information besides the WorkingSetPrivateSize field is ignored
+ // as well which isn't great and is wasted work to fetch.
+ //
+ // HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private
+ // working set ourselves and ask for everything else seperately. The optimization we can make here is
+ // to open the silo ourselves and do the same queries for the rest of the info, as well as calculating
+ // the private working set in a more efficient manner by:
+ //
+ // 1. Find the pids running in the silo
+ // 2. Get a process handle for every process (only need PROCESS_QUERY_LIMITED_INFORMATION access)
+ // 3. Call NtQueryInformationProcess on each process with the class ProcessVmCounters
+ // 4. Tally up the total using the field PrivateWorkingSetSize in VM_COUNTERS_EX2.
+ privateWorkingSet, err := job.QueryPrivateWorkingSet()
+ if err != nil {
+ return nil, err
+ }
+ return &hcsschema.Statistics{
+ Timestamp: timestamp,
+ ContainerStartTime: computeSystem.startTime,
+ Uptime100ns: uint64(time.Since(computeSystem.startTime).Nanoseconds()) / 100,
+ Memory: &hcsschema.MemoryStats{
+ MemoryUsageCommitBytes: memInfo.JobMemory,
+ MemoryUsageCommitPeakBytes: memInfo.PeakJobMemoryUsed,
+ MemoryUsagePrivateWorkingSetBytes: privateWorkingSet,
+ },
+ Processor: &hcsschema.ProcessorStats{
+ RuntimeKernel100ns: uint64(processorInfo.TotalKernelTime),
+ RuntimeUser100ns: uint64(processorInfo.TotalUserTime),
+ TotalRuntime100ns: uint64(processorInfo.TotalKernelTime + processorInfo.TotalUserTime),
+ },
+ Storage: &hcsschema.StorageStats{
+ ReadCountNormalized: uint64(storageInfo.ReadStats.IoCount),
+ ReadSizeBytes: storageInfo.ReadStats.TotalSize,
+ WriteCountNormalized: uint64(storageInfo.WriteStats.IoCount),
+ WriteSizeBytes: storageInfo.WriteStats.TotalSize,
+ },
+ }, nil
+}
+
+// hcsPropertiesV2Query is a helper to make a HcsGetComputeSystemProperties call using the V2 schema property types.
+func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) {
operation := "hcs::System::PropertiesV2"
queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types})
@@ -345,12 +461,66 @@ func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschem
if propertiesJSON == "" {
return nil, ErrUnexpectedValue
}
- properties := &hcsschema.Properties{}
- if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil {
+ props := &hcsschema.Properties{}
+ if err := json.Unmarshal([]byte(propertiesJSON), props); err != nil {
return nil, makeSystemError(computeSystem, operation, err, nil)
}
- return properties, nil
+ return props, nil
+}
+
+// PropertiesV2 returns the requested compute systems properties targeting a V2 schema compute system.
+func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) {
+ computeSystem.handleLock.RLock()
+ defer computeSystem.handleLock.RUnlock()
+
+ // Let HCS tally up the total for VM based queries instead of querying ourselves.
+ if computeSystem.typ != "container" {
+ return computeSystem.hcsPropertiesV2Query(ctx, types)
+ }
+
+ // Define a starter Properties struct with the default fields returned from every
+ // query. Owner is only returned from Statistics but it's harmless to include.
+ properties := &hcsschema.Properties{
+ Id: computeSystem.id,
+ SystemType: computeSystem.typ,
+ RuntimeOsType: computeSystem.os,
+ Owner: computeSystem.owner,
+ }
+
+ logEntry := log.G(ctx)
+ // First lets try and query ourselves without reaching to HCS. If any of the queries fail
+ // we'll take note and fallback to querying HCS for any of the failed types.
+ fallbackTypes, err := computeSystem.queryInProc(ctx, properties, types)
+ if err == nil && len(fallbackTypes) == 0 {
+ return properties, nil
+ } else if err != nil {
+ logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err))
+ fallbackTypes = types
+ }
+
+ logEntry.WithFields(logrus.Fields{
+ logfields.ContainerID: computeSystem.id,
+ "propertyTypes": fallbackTypes,
+ }).Info("falling back to HCS for property type queries")
+
+ hcsProperties, err := computeSystem.hcsPropertiesV2Query(ctx, fallbackTypes)
+ if err != nil {
+ return nil, err
+ }
+
+ // Now add in anything that we might have successfully queried in process.
+ if properties.Statistics != nil {
+ hcsProperties.Statistics = properties.Statistics
+ hcsProperties.Owner = properties.Owner
+ }
+
+ // For future support for querying processlist in-proc as well.
+ if properties.ProcessList != nil {
+ hcsProperties.ProcessList = properties.ProcessList
+ }
+
+ return hcsProperties, nil
}
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
index 262714b4d0f..7cf954c7b26 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
@@ -20,6 +20,7 @@ type HNSEndpoint struct {
IPv6Address net.IP `json:",omitempty"`
DNSSuffix string `json:",omitempty"`
DNSServerList string `json:",omitempty"`
+ DNSDomain string `json:",omitempty"`
GatewayAddress string `json:",omitempty"`
GatewayAddressV6 string `json:",omitempty"`
EnableInternalDNS bool `json:",omitempty"`
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
index 6765aaead5e..84b36821847 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
@@ -21,10 +21,11 @@ const (
)
type NatPolicy struct {
- Type PolicyType `json:"Type"`
- Protocol string
- InternalPort uint16
- ExternalPort uint16
+ Type PolicyType `json:"Type"`
+ Protocol string `json:",omitempty"`
+ InternalPort uint16 `json:",omitempty"`
+ ExternalPort uint16 `json:",omitempty"`
+ ExternalPortReserved bool `json:",omitempty"`
}
type QosPolicy struct {
@@ -88,20 +89,20 @@ const (
type ACLPolicy struct {
Type PolicyType `json:"Type"`
Id string `json:"Id,omitempty"`
- Protocol uint16
- Protocols string `json:"Protocols,omitempty"`
- InternalPort uint16
+ Protocol uint16 `json:",omitempty"`
+ Protocols string `json:"Protocols,omitempty"`
+ InternalPort uint16 `json:",omitempty"`
Action ActionType
Direction DirectionType
- LocalAddresses string
- RemoteAddresses string
- LocalPorts string `json:"LocalPorts,omitempty"`
- LocalPort uint16
- RemotePorts string `json:"RemotePorts,omitempty"`
- RemotePort uint16
- RuleType RuleType `json:"RuleType,omitempty"`
- Priority uint16
- ServiceName string
+ LocalAddresses string `json:",omitempty"`
+ RemoteAddresses string `json:",omitempty"`
+ LocalPorts string `json:"LocalPorts,omitempty"`
+ LocalPort uint16 `json:",omitempty"`
+ RemotePorts string `json:"RemotePorts,omitempty"`
+ RemotePort uint16 `json:",omitempty"`
+ RuleType RuleType `json:"RuleType,omitempty"`
+ Priority uint16 `json:",omitempty"`
+ ServiceName string `json:",omitempty"`
}
type Policy struct {
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
new file mode 100644
index 00000000000..5d6acd69e61
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
@@ -0,0 +1,111 @@
+package jobobject
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "unsafe"
+
+ "github.com/Microsoft/hcsshim/internal/log"
+ "github.com/Microsoft/hcsshim/internal/queue"
+ "github.com/Microsoft/hcsshim/internal/winapi"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/windows"
+)
+
+var (
+ ioInitOnce sync.Once
+ initIOErr error
+ // Global iocp handle that will be re-used for every job object
+ ioCompletionPort windows.Handle
+ // Mapping of job handle to queue to place notifications in.
+ jobMap sync.Map
+)
+
+// MsgAllProcessesExited is a type representing a message that every process in a job has exited.
+type MsgAllProcessesExited struct{}
+
+// MsgUnimplemented represents a message that we are aware of, but that isn't implemented currently.
+// This should not be treated as an error.
+type MsgUnimplemented struct{}
+
+// pollIOCP polls the io completion port forever.
+func pollIOCP(ctx context.Context, iocpHandle windows.Handle) {
+ var (
+ overlapped uintptr
+ code uint32
+ key uintptr
+ )
+
+ for {
+ err := windows.GetQueuedCompletionStatus(iocpHandle, &code, &key, (**windows.Overlapped)(unsafe.Pointer(&overlapped)), windows.INFINITE)
+ if err != nil {
+ log.G(ctx).WithError(err).Error("failed to poll for job object message")
+ continue
+ }
+ if val, ok := jobMap.Load(key); ok {
+ msq, ok := val.(*queue.MessageQueue)
+ if !ok {
+ log.G(ctx).WithField("value", msq).Warn("encountered non queue type in job map")
+ continue
+ }
+ notification, err := parseMessage(code, overlapped)
+ if err != nil {
+ log.G(ctx).WithFields(logrus.Fields{
+ "code": code,
+ "overlapped": overlapped,
+ }).Warn("failed to parse job object message")
+ continue
+ }
+ if err := msq.Enqueue(notification); err == queue.ErrQueueClosed {
+ // Write will only return an error when the queue is closed.
+ // The only time a queue would ever be closed is when we call `Close` on
+ // the job it belongs to which also removes it from the jobMap, so something
+ // went wrong here. We can't return as this is reading messages for all jobs
+ // so just log it and move on.
+ log.G(ctx).WithFields(logrus.Fields{
+ "code": code,
+ "overlapped": overlapped,
+ }).Warn("tried to write to a closed queue")
+ continue
+ }
+ } else {
+ log.G(ctx).Warn("received a message for a job not present in the mapping")
+ }
+ }
+}
+
+func parseMessage(code uint32, overlapped uintptr) (interface{}, error) {
+ // Check code and parse out relevant information related to that notification
+ // that we care about. For now all we handle is the message that all processes
+ // in the job have exited.
+ switch code {
+ case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO:
+ return MsgAllProcessesExited{}, nil
+ // Other messages for completeness and a check to make sure that if we fall
+ // into the default case that this is a code we don't know how to handle.
+ case winapi.JOB_OBJECT_MSG_END_OF_JOB_TIME:
+ case winapi.JOB_OBJECT_MSG_END_OF_PROCESS_TIME:
+ case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT:
+ case winapi.JOB_OBJECT_MSG_NEW_PROCESS:
+ case winapi.JOB_OBJECT_MSG_EXIT_PROCESS:
+ case winapi.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS:
+ case winapi.JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT:
+ case winapi.JOB_OBJECT_MSG_JOB_MEMORY_LIMIT:
+ case winapi.JOB_OBJECT_MSG_NOTIFICATION_LIMIT:
+ default:
+ return nil, fmt.Errorf("unknown job notification type: %d", code)
+ }
+ return MsgUnimplemented{}, nil
+}
+
+// Assigns an IO completion port to get notified of events for the registered job
+// object.
+func attachIOCP(job windows.Handle, iocp windows.Handle) error {
+ info := winapi.JOBOBJECT_ASSOCIATE_COMPLETION_PORT{
+ CompletionKey: job,
+ CompletionPort: iocp,
+ }
+ _, err := windows.SetInformationJobObject(job, windows.JobObjectAssociateCompletionPortInformation, uintptr(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info)))
+ return err
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
new file mode 100644
index 00000000000..c9fdd921a7f
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
@@ -0,0 +1,538 @@
+package jobobject
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "unsafe"
+
+ "github.com/Microsoft/hcsshim/internal/queue"
+ "github.com/Microsoft/hcsshim/internal/winapi"
+ "golang.org/x/sys/windows"
+)
+
+// This file provides higher level constructs for the win32 job object API.
+// Most of the core creation and management functions are already present in "golang.org/x/sys/windows"
+// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information
+// structs and associated limit flags. Whatever is not present from the job object API
+// in golang.org/x/sys/windows is located in /internal/winapi.
+//
+// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects
+
+// JobObject is a high level wrapper around a Windows job object. Holds a handle to
+// the job, a queue to receive iocp notifications about the lifecycle
+// of the job and a mutex for synchronized handle access.
+type JobObject struct {
+ handle windows.Handle
+ mq *queue.MessageQueue
+ handleLock sync.RWMutex
+}
+
+// JobLimits represents the resource constraints that can be applied to a job object.
+type JobLimits struct {
+ CPULimit uint32
+ CPUWeight uint32
+ MemoryLimitInBytes uint64
+ MaxIOPS int64
+ MaxBandwidth int64
+}
+
+type CPURateControlType uint32
+
+const (
+ WeightBased CPURateControlType = iota
+ RateBased
+)
+
+// Processor resource controls
+const (
+ cpuLimitMin = 1
+ cpuLimitMax = 10000
+ cpuWeightMin = 1
+ cpuWeightMax = 9
+)
+
+var (
+ ErrAlreadyClosed = errors.New("the handle has already been closed")
+ ErrNotRegistered = errors.New("job is not registered to receive notifications")
+)
+
+// Options represents the set of configurable options when making or opening a job object.
+type Options struct {
+ // `Name` specifies the name of the job object if a named job object is desired.
+ Name string
+ // `Notifications` specifies if the job will be registered to receive notifications.
+ // Defaults to false.
+ Notifications bool
+ // `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject.
+ // Defaults to false.
+ UseNTVariant bool
+ // `IOTracking` enables tracking I/O statistics on the job object. More specifically this
+ // calls SetInformationJobObject with the JobObjectIoAttribution class.
+ EnableIOTracking bool
+}
+
+// Create creates a job object.
+//
+// If options.Name is an empty string, the job will not be assigned a name.
+//
+// If options.Notifications are not enabled `PollNotifications` will return immediately with error `errNotRegistered`.
+//
+// If `options` is nil, use default option values.
+//
+// Returns a JobObject structure and an error if there is one.
+func Create(ctx context.Context, options *Options) (_ *JobObject, err error) {
+ if options == nil {
+ options = &Options{}
+ }
+
+ var jobName *winapi.UnicodeString
+ if options.Name != "" {
+ jobName, err = winapi.NewUnicodeString(options.Name)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var jobHandle windows.Handle
+ if options.UseNTVariant {
+ oa := winapi.ObjectAttributes{
+ Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
+ ObjectName: jobName,
+ Attributes: 0,
+ }
+ status := winapi.NtCreateJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa)
+ if status != 0 {
+ return nil, winapi.RtlNtStatusToDosError(status)
+ }
+ } else {
+ var jobNameBuf *uint16
+ if jobName != nil && jobName.Buffer != nil {
+ jobNameBuf = jobName.Buffer
+ }
+ jobHandle, err = windows.CreateJobObject(nil, jobNameBuf)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ defer func() {
+ if err != nil {
+ windows.Close(jobHandle)
+ }
+ }()
+
+ job := &JobObject{
+ handle: jobHandle,
+ }
+
+ // If the IOCP we'll be using to receive messages for all jobs hasn't been
+ // created, create it and start polling.
+ if options.Notifications {
+ mq, err := setupNotifications(ctx, job)
+ if err != nil {
+ return nil, err
+ }
+ job.mq = mq
+ }
+
+ if options.EnableIOTracking {
+ if err := enableIOTracking(jobHandle); err != nil {
+ return nil, err
+ }
+ }
+
+ return job, nil
+}
+
+// Open opens an existing job object with name provided in `options`. If no name is provided
+// return an error since we need to know what job object to open.
+//
+// If options.Notifications is false `PollNotifications` will return immediately with error `errNotRegistered`.
+//
+// Returns a JobObject structure and an error if there is one.
+func Open(ctx context.Context, options *Options) (_ *JobObject, err error) {
+ if options == nil || (options != nil && options.Name == "") {
+ return nil, errors.New("no job object name specified to open")
+ }
+
+ unicodeJobName, err := winapi.NewUnicodeString(options.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ var jobHandle windows.Handle
+ if options != nil && options.UseNTVariant {
+ oa := winapi.ObjectAttributes{
+ Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
+ ObjectName: unicodeJobName,
+ Attributes: 0,
+ }
+ status := winapi.NtOpenJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa)
+ if status != 0 {
+ return nil, winapi.RtlNtStatusToDosError(status)
+ }
+ } else {
+ jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ defer func() {
+ if err != nil {
+ windows.Close(jobHandle)
+ }
+ }()
+
+ job := &JobObject{
+ handle: jobHandle,
+ }
+
+ // If the IOCP we'll be using to receive messages for all jobs hasn't been
+ // created, create it and start polling.
+ if options != nil && options.Notifications {
+ mq, err := setupNotifications(ctx, job)
+ if err != nil {
+ return nil, err
+ }
+ job.mq = mq
+ }
+
+ return job, nil
+}
+
+// helper function to setup notifications for creating/opening a job object
+func setupNotifications(ctx context.Context, job *JobObject) (*queue.MessageQueue, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ ioInitOnce.Do(func() {
+ h, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff)
+ if err != nil {
+ initIOErr = err
+ return
+ }
+ ioCompletionPort = h
+ go pollIOCP(ctx, h)
+ })
+
+ if initIOErr != nil {
+ return nil, initIOErr
+ }
+
+ mq := queue.NewMessageQueue()
+ jobMap.Store(uintptr(job.handle), mq)
+ if err := attachIOCP(job.handle, ioCompletionPort); err != nil {
+ jobMap.Delete(uintptr(job.handle))
+ return nil, fmt.Errorf("failed to attach job to IO completion port: %w", err)
+ }
+ return mq, nil
+}
+
+// PollNotification will poll for a job object notification. This call should only be called once
+// per job (ideally in a goroutine loop) and will block if there is not a notification ready.
+// This call will return immediately with error `ErrNotRegistered` if the job was not registered
+// to receive notifications during `Create`. Internally, messages will be queued and there
+// is no worry of messages being dropped.
+func (job *JobObject) PollNotification() (interface{}, error) {
+ if job.mq == nil {
+ return nil, ErrNotRegistered
+ }
+ return job.mq.Dequeue()
+}
+
+// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to
+// launch a process in a job at creation time. This can be used to avoid having to call Assign() after a process
+// has already started running.
+func (job *JobObject) UpdateProcThreadAttribute(attrList *windows.ProcThreadAttributeListContainer) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if err := attrList.Update(
+ winapi.PROC_THREAD_ATTRIBUTE_JOB_LIST,
+ unsafe.Pointer(&job.handle),
+ unsafe.Sizeof(job.handle),
+ ); err != nil {
+ return fmt.Errorf("failed to update proc thread attributes for job object: %w", err)
+ }
+
+ return nil
+}
+
+// Close closes the job object handle.
+func (job *JobObject) Close() error {
+ job.handleLock.Lock()
+ defer job.handleLock.Unlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if err := windows.Close(job.handle); err != nil {
+ return err
+ }
+
+ if job.mq != nil {
+ job.mq.Close()
+ }
+ // Handles now invalid so if the map entry to receive notifications for this job still
+ // exists remove it so we can stop receiving notifications.
+ if _, ok := jobMap.Load(uintptr(job.handle)); ok {
+ jobMap.Delete(uintptr(job.handle))
+ }
+
+ job.handle = 0
+ return nil
+}
+
+// Assign assigns a process to the job object.
+func (job *JobObject) Assign(pid uint32) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if pid == 0 {
+ return errors.New("invalid pid: 0")
+ }
+ hProc, err := windows.OpenProcess(winapi.PROCESS_ALL_ACCESS, true, pid)
+ if err != nil {
+ return err
+ }
+ defer windows.Close(hProc)
+ return windows.AssignProcessToJobObject(job.handle, hProc)
+}
+
+// Terminate terminates the job, essentially calls TerminateProcess on every process in the
+// job.
+func (job *JobObject) Terminate(exitCode uint32) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+ return windows.TerminateJobObject(job.handle, exitCode)
+}
+
+// Pids returns all of the process IDs in the job object.
+func (job *JobObject) Pids() ([]uint32, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST{}
+ err := winapi.QueryInformationJobObject(
+ job.handle,
+ winapi.JobObjectBasicProcessIdList,
+ unsafe.Pointer(&info),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ )
+
+ // This is either the case where there is only one process or no processes in
+ // the job. Any other case will result in ERROR_MORE_DATA. Check if info.NumberOfProcessIdsInList
+ // is 1 and just return this, otherwise return an empty slice.
+ if err == nil {
+ if info.NumberOfProcessIdsInList == 1 {
+ return []uint32{uint32(info.ProcessIdList[0])}, nil
+ }
+ // Return empty slice instead of nil to play well with the caller of this.
+ // Do not return an error if no processes are running inside the job
+ return []uint32{}, nil
+ }
+
+ if err != winapi.ERROR_MORE_DATA {
+ return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err)
+ }
+
+ jobBasicProcessIDListSize := unsafe.Sizeof(info) + (unsafe.Sizeof(info.ProcessIdList[0]) * uintptr(info.NumberOfAssignedProcesses-1))
+ buf := make([]byte, jobBasicProcessIDListSize)
+ if err = winapi.QueryInformationJobObject(
+ job.handle,
+ winapi.JobObjectBasicProcessIdList,
+ unsafe.Pointer(&buf[0]),
+ uint32(len(buf)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("failed to query for PIDs in job object: %w", err)
+ }
+
+ bufInfo := (*winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST)(unsafe.Pointer(&buf[0]))
+ pids := make([]uint32, bufInfo.NumberOfProcessIdsInList)
+ for i, bufPid := range bufInfo.AllPids() {
+ pids[i] = uint32(bufPid)
+ }
+ return pids, nil
+}
+
+// QueryMemoryStats gets the memory stats for the job object.
+func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION{}
+ if err := winapi.QueryInformationJobObject(
+ job.handle,
+ winapi.JobObjectMemoryUsageInformation,
+ unsafe.Pointer(&info),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("failed to query for job object memory stats: %w", err)
+ }
+ return &info, nil
+}
+
+// QueryProcessorStats gets the processor stats for the job object.
+func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION{}
+ if err := winapi.QueryInformationJobObject(
+ job.handle,
+ winapi.JobObjectBasicAccountingInformation,
+ unsafe.Pointer(&info),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("failed to query for job object process stats: %w", err)
+ }
+ return &info, nil
+}
+
+// QueryStorageStats gets the storage (I/O) stats for the job object. This call will error
+// if either `EnableIOTracking` wasn't set to true on creation of the job, or SetIOTracking()
+// hasn't been called since creation of the job.
+func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
+ ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
+ }
+ if err := winapi.QueryInformationJobObject(
+ job.handle,
+ winapi.JobObjectIoAttribution,
+ unsafe.Pointer(&info),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("failed to query for job object storage stats: %w", err)
+ }
+ return &info, nil
+}
+
+// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the
+// private working set for every process running in the job.
+func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
+ pids, err := job.Pids()
+ if err != nil {
+ return 0, err
+ }
+
+ openAndQueryWorkingSet := func(pid uint32) (uint64, error) {
+ h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid)
+ if err != nil {
+ // Continue to the next if OpenProcess doesn't return a valid handle (fails). Handles a
+ // case where one of the pids in the job exited before we open.
+ return 0, nil
+ }
+ defer func() {
+ _ = windows.Close(h)
+ }()
+ // Check if the process is actually running in the job still. There's a small chance
+ // that the process could have exited and had its pid re-used between grabbing the pids
+ // in the job and opening the handle to it above.
+ var inJob int32
+ if err := winapi.IsProcessInJob(h, job.handle, &inJob); err != nil {
+ // This shouldn't fail unless we have incorrect access rights which we control
+ // here so probably best to error out if this failed.
+ return 0, err
+ }
+ // Don't report stats for this process as it's not running in the job. This shouldn't be
+ // an error condition though.
+ if inJob == 0 {
+ return 0, nil
+ }
+
+ var vmCounters winapi.VM_COUNTERS_EX2
+ status := winapi.NtQueryInformationProcess(
+ h,
+ winapi.ProcessVmCounters,
+ unsafe.Pointer(&vmCounters),
+ uint32(unsafe.Sizeof(vmCounters)),
+ nil,
+ )
+ if !winapi.NTSuccess(status) {
+ return 0, fmt.Errorf("failed to query information for process: %w", winapi.RtlNtStatusToDosError(status))
+ }
+ return uint64(vmCounters.PrivateWorkingSetSize), nil
+ }
+
+ var jobWorkingSetSize uint64
+ for _, pid := range pids {
+ workingSet, err := openAndQueryWorkingSet(pid)
+ if err != nil {
+ return 0, err
+ }
+ jobWorkingSetSize += workingSet
+ }
+
+ return jobWorkingSetSize, nil
+}
+
+// SetIOTracking enables IO tracking for processes in the job object.
+// This enables use of the QueryStorageStats method.
+func (job *JobObject) SetIOTracking() error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ return enableIOTracking(job.handle)
+}
+
+func enableIOTracking(job windows.Handle) error {
+ info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
+ ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
+ }
+ if _, err := windows.SetInformationJobObject(
+ job,
+ winapi.JobObjectIoAttribution,
+ uintptr(unsafe.Pointer(&info)),
+ uint32(unsafe.Sizeof(info)),
+ ); err != nil {
+ return fmt.Errorf("failed to enable IO tracking on job object: %w", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
new file mode 100644
index 00000000000..4efde292c49
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
@@ -0,0 +1,315 @@
+package jobobject
+
+import (
+ "errors"
+ "fmt"
+ "unsafe"
+
+ "github.com/Microsoft/hcsshim/internal/winapi"
+ "golang.org/x/sys/windows"
+)
+
+const (
+ memoryLimitMax uint64 = 0xffffffffffffffff
+)
+
+func isFlagSet(flag, controlFlags uint32) bool {
+ return (flag & controlFlags) == flag
+}
+
+// SetResourceLimits sets resource limits on the job object (cpu, memory, storage).
+func (job *JobObject) SetResourceLimits(limits *JobLimits) error {
+ // Go through and check what limits were specified and apply them to the job.
+ if limits.MemoryLimitInBytes != 0 {
+ if err := job.SetMemoryLimit(limits.MemoryLimitInBytes); err != nil {
+ return fmt.Errorf("failed to set job object memory limit: %w", err)
+ }
+ }
+
+ if limits.CPULimit != 0 {
+ if err := job.SetCPULimit(RateBased, limits.CPULimit); err != nil {
+ return fmt.Errorf("failed to set job object cpu limit: %w", err)
+ }
+ } else if limits.CPUWeight != 0 {
+ if err := job.SetCPULimit(WeightBased, limits.CPUWeight); err != nil {
+ return fmt.Errorf("failed to set job object cpu limit: %w", err)
+ }
+ }
+
+ if limits.MaxBandwidth != 0 || limits.MaxIOPS != 0 {
+ if err := job.SetIOLimit(limits.MaxBandwidth, limits.MaxIOPS); err != nil {
+ return fmt.Errorf("failed to set io limit on job object: %w", err)
+ }
+ }
+ return nil
+}
+
+// SetTerminateOnLastHandleClose sets the job object flag that specifies that the job should terminate
+// all processes in the job on the last open handle being closed.
+func (job *JobObject) SetTerminateOnLastHandleClose() error {
+ info, err := job.getExtendedInformation()
+ if err != nil {
+ return err
+ }
+ info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+ return job.setExtendedInformation(info)
+}
+
+// SetMemoryLimit sets the memory limit of the job object based on the given `memoryLimitInBytes`.
+func (job *JobObject) SetMemoryLimit(memoryLimitInBytes uint64) error {
+ if memoryLimitInBytes >= memoryLimitMax {
+ return errors.New("memory limit specified exceeds the max size")
+ }
+
+ info, err := job.getExtendedInformation()
+ if err != nil {
+ return err
+ }
+
+ info.JobMemoryLimit = uintptr(memoryLimitInBytes)
+ info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_JOB_MEMORY
+ return job.setExtendedInformation(info)
+}
+
+// GetMemoryLimit gets the memory limit in bytes of the job object.
+func (job *JobObject) GetMemoryLimit() (uint64, error) {
+ info, err := job.getExtendedInformation()
+ if err != nil {
+ return 0, err
+ }
+ return uint64(info.JobMemoryLimit), nil
+}
+
+// SetCPULimit sets the CPU limit depending on the specified `CPURateControlType` to
+// `rateControlValue` for the job object.
+func (job *JobObject) SetCPULimit(rateControlType CPURateControlType, rateControlValue uint32) error {
+ cpuInfo, err := job.getCPURateControlInformation()
+ if err != nil {
+ return err
+ }
+ switch rateControlType {
+ case WeightBased:
+ if rateControlValue < cpuWeightMin || rateControlValue > cpuWeightMax {
+ return fmt.Errorf("processor weight value of `%d` is invalid", rateControlValue)
+ }
+ cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED
+ cpuInfo.Value = rateControlValue
+ case RateBased:
+ if rateControlValue < cpuLimitMin || rateControlValue > cpuLimitMax {
+ return fmt.Errorf("processor rate of `%d` is invalid", rateControlValue)
+ }
+ cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP
+ cpuInfo.Value = rateControlValue
+ default:
+ return errors.New("invalid job object cpu rate control type")
+ }
+ return job.setCPURateControlInfo(cpuInfo)
+}
+
+// GetCPULimit gets the cpu limits for the job object.
+// `rateControlType` is used to indicate what type of cpu limit to query for.
+func (job *JobObject) GetCPULimit(rateControlType CPURateControlType) (uint32, error) {
+ info, err := job.getCPURateControlInformation()
+ if err != nil {
+ return 0, err
+ }
+
+ if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE, info.ControlFlags) {
+ return 0, errors.New("the job does not have cpu rate control enabled")
+ }
+
+ switch rateControlType {
+ case WeightBased:
+ if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED, info.ControlFlags) {
+ return 0, errors.New("cannot get cpu weight for job object without cpu weight option set")
+ }
+ case RateBased:
+ if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP, info.ControlFlags) {
+ return 0, errors.New("cannot get cpu rate hard cap for job object without cpu rate hard cap option set")
+ }
+ default:
+ return 0, errors.New("invalid job object cpu rate control type")
+ }
+ return info.Value, nil
+}
+
+// SetCPUAffinity sets the processor affinity for the job object.
+// The affinity is passed in as a bitmask.
+func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error {
+ info, err := job.getExtendedInformation()
+ if err != nil {
+ return err
+ }
+ info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY)
+ info.BasicLimitInformation.Affinity = uintptr(affinityBitMask)
+ return job.setExtendedInformation(info)
+}
+
+// GetCPUAffinity gets the processor affinity for the job object.
+// The returned affinity is a bitmask.
+func (job *JobObject) GetCPUAffinity() (uint64, error) {
+ info, err := job.getExtendedInformation()
+ if err != nil {
+ return 0, err
+ }
+ return uint64(info.BasicLimitInformation.Affinity), nil
+}
+
+// SetIOLimit sets the IO limits specified on the job object.
+func (job *JobObject) SetIOLimit(maxBandwidth, maxIOPS int64) error {
+ ioInfo, err := job.getIOLimit()
+ if err != nil {
+ return err
+ }
+ ioInfo.ControlFlags |= winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE
+ if maxBandwidth != 0 {
+ ioInfo.MaxBandwidth = maxBandwidth
+ }
+ if maxIOPS != 0 {
+ ioInfo.MaxIops = maxIOPS
+ }
+ return job.setIORateControlInfo(ioInfo)
+}
+
+// GetIOMaxBandwidthLimit gets the max bandwidth for the job object.
+func (job *JobObject) GetIOMaxBandwidthLimit() (int64, error) {
+ info, err := job.getIOLimit()
+ if err != nil {
+ return 0, err
+ }
+ return info.MaxBandwidth, nil
+}
+
+// GetIOMaxIopsLimit gets the max iops for the job object.
+func (job *JobObject) GetIOMaxIopsLimit() (int64, error) {
+ info, err := job.getIOLimit()
+ if err != nil {
+ return 0, err
+ }
+ return info.MaxIops, nil
+}
+
+// Helper function for getting a job object's extended information.
+func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{}
+ if err := winapi.QueryInformationJobObject(
+ job.handle,
+ windows.JobObjectExtendedLimitInformation,
+ unsafe.Pointer(&info),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("query %v returned error: %w", info, err)
+ }
+ return &info, nil
+}
+
+// Helper function for getting a job object's CPU rate control information.
+func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ info := winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION{}
+ if err := winapi.QueryInformationJobObject(
+ job.handle,
+ windows.JobObjectCpuRateControlInformation,
+ unsafe.Pointer(&info),
+ uint32(unsafe.Sizeof(info)),
+ nil,
+ ); err != nil {
+ return nil, fmt.Errorf("query %v returned error: %w", info, err)
+ }
+ return &info, nil
+}
+
+// Helper function for setting a job object's extended information.
+func (job *JobObject) setExtendedInformation(info *windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if _, err := windows.SetInformationJobObject(
+ job.handle,
+ windows.JobObjectExtendedLimitInformation,
+ uintptr(unsafe.Pointer(info)),
+ uint32(unsafe.Sizeof(*info)),
+ ); err != nil {
+ return fmt.Errorf("failed to set Extended info %v on job object: %w", info, err)
+ }
+ return nil
+}
+
+// Helper function for querying job handle for IO limit information.
+func (job *JobObject) getIOLimit() (*winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION, error) {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return nil, ErrAlreadyClosed
+ }
+
+ ioInfo := &winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION{}
+ var blockCount uint32 = 1
+
+ if _, err := winapi.QueryIoRateControlInformationJobObject(
+ job.handle,
+ nil,
+ &ioInfo,
+ &blockCount,
+ ); err != nil {
+ return nil, fmt.Errorf("query %v returned error: %w", ioInfo, err)
+ }
+
+ if !isFlagSet(winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE, ioInfo.ControlFlags) {
+ return nil, fmt.Errorf("query %v cannot get IO limits for job object without IO rate control option set", ioInfo)
+ }
+ return ioInfo, nil
+}
+
+// Helper function for setting a job object's IO rate control information.
+func (job *JobObject) setIORateControlInfo(ioInfo *winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+
+ if _, err := winapi.SetIoRateControlInformationJobObject(job.handle, ioInfo); err != nil {
+ return fmt.Errorf("failed to set IO limit info %v on job object: %w", ioInfo, err)
+ }
+ return nil
+}
+
+// Helper function for setting a job object's CPU rate control information.
+func (job *JobObject) setCPURateControlInfo(cpuInfo *winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION) error {
+ job.handleLock.RLock()
+ defer job.handleLock.RUnlock()
+
+ if job.handle == 0 {
+ return ErrAlreadyClosed
+ }
+ if _, err := windows.SetInformationJobObject(
+ job.handle,
+ windows.JobObjectCpuRateControlInformation,
+ uintptr(unsafe.Pointer(cpuInfo)),
+ uint32(unsafe.Sizeof(cpuInfo)),
+ ); err != nil {
+ return fmt.Errorf("failed to set cpu limit info %v on job object: %w", cpuInfo, err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go b/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go
new file mode 100644
index 00000000000..4eb9bb9f1f3
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go
@@ -0,0 +1,92 @@
+package queue
+
+import (
+ "errors"
+ "sync"
+)
+
+var ErrQueueClosed = errors.New("the queue is closed for reading and writing")
+
+// MessageQueue represents a threadsafe message queue to be used to retrieve or
+// write messages to.
+type MessageQueue struct {
+ m *sync.RWMutex
+ c *sync.Cond
+ messages []interface{}
+ closed bool
+}
+
+// NewMessageQueue returns a new MessageQueue.
+func NewMessageQueue() *MessageQueue {
+ m := &sync.RWMutex{}
+ return &MessageQueue{
+ m: m,
+ c: sync.NewCond(m),
+ messages: []interface{}{},
+ }
+}
+
+// Enqueue writes `msg` to the queue.
+func (mq *MessageQueue) Enqueue(msg interface{}) error {
+ mq.m.Lock()
+ defer mq.m.Unlock()
+
+ if mq.closed {
+ return ErrQueueClosed
+ }
+ mq.messages = append(mq.messages, msg)
+ // Signal a waiter that there is now a value available in the queue.
+ mq.c.Signal()
+ return nil
+}
+
+// Dequeue will read a value from the queue and remove it. If the queue
+// is empty, this will block until the queue is closed or a value gets enqueued.
+func (mq *MessageQueue) Dequeue() (interface{}, error) {
+ mq.m.Lock()
+ defer mq.m.Unlock()
+
+ for !mq.closed && mq.size() == 0 {
+ mq.c.Wait()
+ }
+
+ // We got woken up, check if it's because the queue got closed.
+ if mq.closed {
+ return nil, ErrQueueClosed
+ }
+
+ val := mq.messages[0]
+ mq.messages[0] = nil
+ mq.messages = mq.messages[1:]
+ return val, nil
+}
+
+// Size returns the size of the queue.
+func (mq *MessageQueue) Size() int {
+ mq.m.RLock()
+ defer mq.m.RUnlock()
+ return mq.size()
+}
+
+// Nonexported size check to check if the queue is empty inside already locked functions.
+func (mq *MessageQueue) size() int {
+ return len(mq.messages)
+}
+
+// Close closes the queue for future writes or reads. Any attempts to read or write from the
+// queue after close will return ErrQueueClosed. This is safe to call multiple times.
+func (mq *MessageQueue) Close() {
+ mq.m.Lock()
+ defer mq.m.Unlock()
+
+ // Already closed, noop
+ if mq.closed {
+ return
+ }
+
+ mq.messages = nil
+ mq.closed = true
+ // If there's anybody currently waiting on a value from Dequeue, we need to
+ // broadcast so the read(s) can return ErrQueueClosed.
+ mq.c.Broadcast()
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go
index ff81ac2c156..5debe974d41 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go
@@ -21,7 +21,7 @@ func ActivateLayer(ctx context.Context, path string) (err error) {
err = activateLayer(&stdDriverInfo, path)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go
index ffee31ab126..480aee8725c 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go
@@ -21,7 +21,7 @@ func CreateLayer(ctx context.Context, path, parent string) (err error) {
err = createLayer(&stdDriverInfo, path, parent)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
index 5a3809ae229..131aa94f14b 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
@@ -28,7 +28,7 @@ func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []str
err = createSandboxLayer(&stdDriverInfo, path, 0, layers)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go
index 787054e7941..424467ac331 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go
@@ -19,7 +19,7 @@ func DestroyLayer(ctx context.Context, path string) (err error) {
err = destroyLayer(&stdDriverInfo, path)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
index 22f7605beee..035c9041e68 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
@@ -25,7 +25,7 @@ func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error
err = expandSandboxSize(&stdDriverInfo, path, size)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
// Manually expand the volume now in order to work around bugs in 19H1 and
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
index 09f0de1a446..97b27eb7d6b 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
@@ -35,7 +35,7 @@ func ExportLayer(ctx context.Context, path string, exportFolderPath string, pare
err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go
index 4d22d0ecf3d..8d213f5871a 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go
@@ -27,7 +27,7 @@ func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) {
log.G(ctx).Debug("Calling proc (1)")
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil)
if err != nil {
- return "", hcserror.New(err, title+" - failed", "(first call)")
+ return "", hcserror.New(err, title, "(first call)")
}
// Allocate a mount path of the returned length.
@@ -41,7 +41,7 @@ func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) {
log.G(ctx).Debug("Calling proc (2)")
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0])
if err != nil {
- return "", hcserror.New(err, title+" - failed", "(second call)")
+ return "", hcserror.New(err, title, "(second call)")
}
mountPath := syscall.UTF16ToString(mountPathp[0:])
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go
index bcc8fbd4239..ae1fff84036 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go
@@ -21,7 +21,7 @@ func GetSharedBaseImages(ctx context.Context) (_ string, err error) {
var buffer *uint16
err = getBaseImages(&buffer)
if err != nil {
- return "", hcserror.New(err, title+" - failed", "")
+ return "", hcserror.New(err, title, "")
}
imageData := interop.ConvertAndFreeCoTaskMemString(buffer)
span.AddAttributes(trace.StringAttribute("imageData", imageData))
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go
index 3eaca27808e..4b282fef9db 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go
@@ -20,7 +20,7 @@ func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error
err = grantVmAccess(vmid, filepath)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
index b3c150d66fe..687550f0be3 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
@@ -36,7 +36,7 @@ func ImportLayer(ctx context.Context, path string, importFolderPath string, pare
err = importLayer(&stdDriverInfo, path, importFolderPath, layers)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go
index c6999973cb8..01e67233939 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go
@@ -21,7 +21,7 @@ func LayerExists(ctx context.Context, path string) (_ bool, err error) {
var exists uint32
err = layerExists(&stdDriverInfo, path, &exists)
if err != nil {
- return false, hcserror.New(err, title+" - failed", "")
+ return false, hcserror.New(err, title, "")
}
span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0))
return exists != 0, nil
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go
index 83ba72cfada..b7f3064f26b 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go
@@ -76,7 +76,7 @@ func readTombstones(path string) (map[string]([]string), error) {
defer tf.Close()
s := bufio.NewScanner(tf)
if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" {
- return nil, errors.New("Invalid tombstones file")
+ return nil, errors.New("invalid tombstones file")
}
ts := make(map[string]([]string))
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go
index bcf39c6b86c..09950297cef 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go
@@ -17,12 +17,12 @@ func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) {
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
- span.AddAttributes(trace.StringAttribute("name", name))
+ span.AddAttributes(trace.StringAttribute("objectName", name))
var id guid.GUID
err = nameToGuid(name, &id)
if err != nil {
- return guid.GUID{}, hcserror.New(err, title+" - failed", "")
+ return guid.GUID{}, hcserror.New(err, title, "")
}
span.AddAttributes(trace.StringAttribute("guid", id.String()))
return id, nil
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go
index 55f7730d0c1..90129faefbb 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go
@@ -38,7 +38,7 @@ func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (
defer prepareLayerLock.Unlock()
err = prepareLayer(&stdDriverInfo, path, layers)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go
index 79fb986786c..71b130c525f 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go
@@ -19,7 +19,7 @@ func UnprepareLayer(ctx context.Context, path string) (err error) {
err = unprepareLayer(&stdDriverInfo, path)
if err != nil {
- return hcserror.New(err, title+" - failed", "")
+ return hcserror.New(err, title, "")
}
return nil
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go
new file mode 100644
index 00000000000..def9525417e
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go
@@ -0,0 +1,44 @@
+package winapi
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1
+
+// CreatePseudoConsole creates a windows pseudo console.
+func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error {
+ // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand.
+ return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon)
+}
+
+// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`.
+func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error {
+ // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand.
+ return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size))))
+}
+
+// HRESULT WINAPI CreatePseudoConsole(
+// _In_ COORD size,
+// _In_ HANDLE hInput,
+// _In_ HANDLE hOutput,
+// _In_ DWORD dwFlags,
+// _Out_ HPCON* phPC
+// );
+//
+//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole
+
+// void WINAPI ClosePseudoConsole(
+// _In_ HPCON hPC
+// );
+//
+//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole
+
+// HRESULT WINAPI ResizePseudoConsole(
+// _In_ HPCON hPC ,
+// _In_ COORD size
+// );
+//
+//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go
deleted file mode 100644
index 4e609cbf1cd..00000000000
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package winapi
-
-//sys GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
index ba12b1ad92e..7eb13f8f0a8 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
@@ -24,7 +24,10 @@ const (
// Access rights for creating or opening job objects.
//
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights
-const JOB_OBJECT_ALL_ACCESS = 0x1F001F
+const (
+ JOB_OBJECT_QUERY = 0x0004
+ JOB_OBJECT_ALL_ACCESS = 0x1F001F
+)
// IO limit flags
//
@@ -93,7 +96,7 @@ type JOBOBJECT_BASIC_PROCESS_ID_LIST struct {
// AllPids returns all the process Ids in the job object.
func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr {
- return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList]
+ return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList:p.NumberOfProcessIdsInList]
}
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information
@@ -162,7 +165,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
// PBOOL Result
// );
//
-//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob
+//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) = kernel32.IsProcessInJob
// BOOL QueryInformationJobObject(
// HANDLE hJob,
@@ -172,7 +175,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
// LPDWORD lpReturnLength
// );
//
-//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
+//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
// HANDLE OpenJobObjectW(
// DWORD dwDesiredAccess,
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go
index 83f70406446..53f62948c90 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go
@@ -1,27 +1,4 @@
package winapi
-// VOID RtlMoveMemory(
-// _Out_ VOID UNALIGNED *Destination,
-// _In_ const VOID UNALIGNED *Source,
-// _In_ SIZE_T Length
-// );
-//sys RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) = kernel32.RtlMoveMemory
-
//sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc
//sys LocalFree(ptr uintptr) = kernel32.LocalFree
-
-// BOOL QueryWorkingSet(
-// HANDLE hProcess,
-// PVOID pv,
-// DWORD cb
-// );
-//sys QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSet
-
-type PSAPI_WORKING_SET_INFORMATION struct {
- NumberOfEntries uintptr
- WorkingSetInfo [1]PSAPI_WORKING_SET_BLOCK
-}
-
-type PSAPI_WORKING_SET_BLOCK struct {
- Flags uintptr
-}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
index b87068327cc..222529f433a 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
@@ -2,9 +2,64 @@ package winapi
const PROCESS_ALL_ACCESS uint32 = 2097151
-// DWORD GetProcessImageFileNameW(
-// HANDLE hProcess,
-// LPWSTR lpImageFileName,
-// DWORD nSize
+const (
+ PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016
+ PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D
+)
+
+// ProcessVmCounters corresponds to the _VM_COUNTERS_EX and _VM_COUNTERS_EX2 structures.
+const ProcessVmCounters = 3
+
+// __kernel_entry NTSTATUS NtQueryInformationProcess(
+// [in] HANDLE ProcessHandle,
+// [in] PROCESSINFOCLASS ProcessInformationClass,
+// [out] PVOID ProcessInformation,
+// [in] ULONG ProcessInformationLength,
+// [out, optional] PULONG ReturnLength
// );
-//sys GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) = kernel32.GetProcessImageFileNameW
+//
+//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess
+
+// typedef struct _VM_COUNTERS_EX
+// {
+// SIZE_T PeakVirtualSize;
+// SIZE_T VirtualSize;
+// ULONG PageFaultCount;
+// SIZE_T PeakWorkingSetSize;
+// SIZE_T WorkingSetSize;
+// SIZE_T QuotaPeakPagedPoolUsage;
+// SIZE_T QuotaPagedPoolUsage;
+// SIZE_T QuotaPeakNonPagedPoolUsage;
+// SIZE_T QuotaNonPagedPoolUsage;
+// SIZE_T PagefileUsage;
+// SIZE_T PeakPagefileUsage;
+// SIZE_T PrivateUsage;
+// } VM_COUNTERS_EX, *PVM_COUNTERS_EX;
+//
+type VM_COUNTERS_EX struct {
+ PeakVirtualSize uintptr
+ VirtualSize uintptr
+ PageFaultCount uint32
+ PeakWorkingSetSize uintptr
+ WorkingSetSize uintptr
+ QuotaPeakPagedPoolUsage uintptr
+ QuotaPagedPoolUsage uintptr
+ QuotaPeakNonPagedPoolUsage uintptr
+ QuotaNonPagedPoolUsage uintptr
+ PagefileUsage uintptr
+ PeakPagefileUsage uintptr
+ PrivateUsage uintptr
+}
+
+// typedef struct _VM_COUNTERS_EX2
+// {
+// VM_COUNTERS_EX CountersEx;
+// SIZE_T PrivateWorkingSetSize;
+// SIZE_T SharedCommitUsage;
+// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2;
+//
+type VM_COUNTERS_EX2 struct {
+ CountersEx VM_COUNTERS_EX
+ PrivateWorkingSetSize uintptr
+ SharedCommitUsage uintptr
+}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go
index 327f57d7c29..78fe01a4b41 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go
@@ -12,7 +12,8 @@ const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004
// ULONG SystemInformationLength,
// PULONG ReturnLength
// );
-//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
+//
+//sys NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
type SYSTEM_PROCESS_INFORMATION struct {
NextEntryOffset uint32 // ULONG
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go
index db59567d089..859b753c246 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go
@@ -20,36 +20,41 @@ func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) {
return
}
+// UnicodeString corresponds to UNICODE_STRING win32 struct defined here
+// https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_unicode_string
type UnicodeString struct {
Length uint16
MaximumLength uint16
Buffer *uint16
}
+// NTSTRSAFE_UNICODE_STRING_MAX_CCH is a constant defined in ntstrsafe.h. This value
+// denotes the maximum number of wide chars a path can have.
+const NTSTRSAFE_UNICODE_STRING_MAX_CCH = 32767
+
//String converts a UnicodeString to a golang string
func (uni UnicodeString) String() string {
// UnicodeString is not guaranteed to be null terminated, therefore
// use the UnicodeString's Length field
- return syscall.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2)))
+ return windows.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2)))
}
// NewUnicodeString allocates a new UnicodeString and copies `s` into
// the buffer of the new UnicodeString.
func NewUnicodeString(s string) (*UnicodeString, error) {
- // Get length of original `s` to use in the UnicodeString since the `buf`
- // created later will have an additional trailing null character
- length := len(s)
- if length > 32767 {
- return nil, syscall.ENAMETOOLONG
- }
-
buf, err := windows.UTF16FromString(s)
if err != nil {
return nil, err
}
+
+ if len(buf) > NTSTRSAFE_UNICODE_STRING_MAX_CCH {
+ return nil, syscall.ENAMETOOLONG
+ }
+
uni := &UnicodeString{
- Length: uint16(length * 2),
- MaximumLength: uint16(length * 2),
+ // The length is in bytes and should not include the trailing null character.
+ Length: uint16((len(buf) - 1) * 2),
+ MaximumLength: uint16((len(buf) - 1) * 2),
Buffer: &buf[0],
}
return uni, nil
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
index ec88c0d2128..d2cc9d9fba6 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
@@ -2,4 +2,4 @@
// be thought of as an extension to golang.org/x/sys/windows.
package winapi
-//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
+//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go user.go console.go system.go net.go path.go thread.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
index 2941b0f9809..1f16cf0b8e1 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
@@ -37,18 +37,19 @@ func errnoErr(e syscall.Errno) error {
}
var (
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modntdll = windows.NewLazySystemDLL("ntdll.dll")
modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll")
- modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
- modpsapi = windows.NewLazySystemDLL("psapi.dll")
modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll")
+ procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole")
+ procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole")
+ procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole")
procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation")
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
procSearchPathW = modkernel32.NewProc("SearchPathW")
procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread")
- procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
procIsProcessInJob = modkernel32.NewProc("IsProcessInJob")
procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject")
procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW")
@@ -57,11 +58,9 @@ var (
procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject")
procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject")
procLogonUserW = modadvapi32.NewProc("LogonUserW")
- procRtlMoveMemory = modkernel32.NewProc("RtlMoveMemory")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procLocalFree = modkernel32.NewProc("LocalFree")
- procQueryWorkingSet = modpsapi.NewProc("QueryWorkingSet")
- procGetProcessImageFileNameW = modkernel32.NewProc("GetProcessImageFileNameW")
+ procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess")
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
@@ -74,7 +73,34 @@ var (
procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError")
)
-func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) {
+func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) {
+ r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0)
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
+ }
+ return
+}
+
+func ClosePseudoConsole(hpc windows.Handle) {
+ syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0)
+ return
+}
+
+func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
+ r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0)
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
+ }
+ return
+}
+
+func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) {
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
status = uint32(r0)
return
@@ -114,19 +140,7 @@ func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes,
return
}
-func GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0)
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
- }
- return
-}
-
-func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) {
+func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) {
r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result)))
if r1 == 0 {
if e1 != 0 {
@@ -138,7 +152,7 @@ func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result
return
}
-func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
+func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0)
if r1 == 0 {
if e1 != 0 {
@@ -219,18 +233,6 @@ func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uin
return
}
-func RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procRtlMoveMemory.Addr(), 3, uintptr(unsafe.Pointer(destination)), uintptr(unsafe.Pointer(source)), uintptr(length))
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
- }
- return
-}
-
func LocalAlloc(flags uint32, size int) (ptr uintptr) {
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0)
ptr = uintptr(r0)
@@ -242,28 +244,9 @@ func LocalFree(ptr uintptr) {
return
}
-func QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procQueryWorkingSet.Addr(), 3, uintptr(handle), uintptr(pv), uintptr(cb))
- if r1 == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
- }
- return
-}
-
-func GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetProcessImageFileNameW.Addr(), 3, uintptr(hProcess), uintptr(unsafe.Pointer(imageFileName)), uintptr(nSize))
- size = uint32(r0)
- if size == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
- }
+func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) {
+ r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0)
+ status = uint32(r0)
return
}
diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
index e9267b9554f..75dce5d821d 100644
--- a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
+++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
@@ -35,4 +35,16 @@ const (
// V20H2 corresponds to Windows Server 20H2 (semi-annual channel).
V20H2 = 19042
+
+ // V21H1 corresponds to Windows Server 21H1 (semi-annual channel).
+ V21H1 = 19043
+
+ // V21H2Win10 corresponds to Windows 10 (November 2021 Update).
+ V21H2Win10 = 19044
+
+ // V21H2Server corresponds to Windows Server 2022 (ltsc2022).
+ V21H2Server = 20348
+
+ // V21H2Win11 corresponds to Windows 11 (original release).
+ V21H2Win11 = 22000
)
diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE
deleted file mode 100644
index 339177be663..00000000000
--- a/vendor/github.com/beorn7/perks/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (C) 2013 Blake Mizerany
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
deleted file mode 100644
index 1602287d7ce..00000000000
--- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt
+++ /dev/null
@@ -1,2388 +0,0 @@
-8
-5
-26
-12
-5
-235
-13
-6
-28
-30
-3
-3
-3
-3
-5
-2
-33
-7
-2
-4
-7
-12
-14
-5
-8
-3
-10
-4
-5
-3
-6
-6
-209
-20
-3
-10
-14
-3
-4
-6
-8
-5
-11
-7
-3
-2
-3
-3
-212
-5
-222
-4
-10
-10
-5
-6
-3
-8
-3
-10
-254
-220
-2
-3
-5
-24
-5
-4
-222
-7
-3
-3
-223
-8
-15
-12
-14
-14
-3
-2
-2
-3
-13
-3
-11
-4
-4
-6
-5
-7
-13
-5
-3
-5
-2
-5
-3
-5
-2
-7
-15
-17
-14
-3
-6
-6
-3
-17
-5
-4
-7
-6
-4
-4
-8
-6
-8
-3
-9
-3
-6
-3
-4
-5
-3
-3
-660
-4
-6
-10
-3
-6
-3
-2
-5
-13
-2
-4
-4
-10
-4
-8
-4
-3
-7
-9
-9
-3
-10
-37
-3
-13
-4
-12
-3
-6
-10
-8
-5
-21
-2
-3
-8
-3
-2
-3
-3
-4
-12
-2
-4
-8
-8
-4
-3
-2
-20
-1
-6
-32
-2
-11
-6
-18
-3
-8
-11
-3
-212
-3
-4
-2
-6
-7
-12
-11
-3
-2
-16
-10
-6
-4
-6
-3
-2
-7
-3
-2
-2
-2
-2
-5
-6
-4
-3
-10
-3
-4
-6
-5
-3
-4
-4
-5
-6
-4
-3
-4
-4
-5
-7
-5
-5
-3
-2
-7
-2
-4
-12
-4
-5
-6
-2
-4
-4
-8
-4
-15
-13
-7
-16
-5
-3
-23
-5
-5
-7
-3
-2
-9
-8
-7
-5
-8
-11
-4
-10
-76
-4
-47
-4
-3
-2
-7
-4
-2
-3
-37
-10
-4
-2
-20
-5
-4
-4
-10
-10
-4
-3
-7
-23
-240
-7
-13
-5
-5
-3
-3
-2
-5
-4
-2
-8
-7
-19
-2
-23
-8
-7
-2
-5
-3
-8
-3
-8
-13
-5
-5
-5
-2
-3
-23
-4
-9
-8
-4
-3
-3
-5
-220
-2
-3
-4
-6
-14
-3
-53
-6
-2
-5
-18
-6
-3
-219
-6
-5
-2
-5
-3
-6
-5
-15
-4
-3
-17
-3
-2
-4
-7
-2
-3
-3
-4
-4
-3
-2
-664
-6
-3
-23
-5
-5
-16
-5
-8
-2
-4
-2
-24
-12
-3
-2
-3
-5
-8
-3
-5
-4
-3
-14
-3
-5
-8
-2
-3
-7
-9
-4
-2
-3
-6
-8
-4
-3
-4
-6
-5
-3
-3
-6
-3
-19
-4
-4
-6
-3
-6
-3
-5
-22
-5
-4
-4
-3
-8
-11
-4
-9
-7
-6
-13
-4
-4
-4
-6
-17
-9
-3
-3
-3
-4
-3
-221
-5
-11
-3
-4
-2
-12
-6
-3
-5
-7
-5
-7
-4
-9
-7
-14
-37
-19
-217
-16
-3
-5
-2
-2
-7
-19
-7
-6
-7
-4
-24
-5
-11
-4
-7
-7
-9
-13
-3
-4
-3
-6
-28
-4
-4
-5
-5
-2
-5
-6
-4
-4
-6
-10
-5
-4
-3
-2
-3
-3
-6
-5
-5
-4
-3
-2
-3
-7
-4
-6
-18
-16
-8
-16
-4
-5
-8
-6
-9
-13
-1545
-6
-215
-6
-5
-6
-3
-45
-31
-5
-2
-2
-4
-3
-3
-2
-5
-4
-3
-5
-7
-7
-4
-5
-8
-5
-4
-749
-2
-31
-9
-11
-2
-11
-5
-4
-4
-7
-9
-11
-4
-5
-4
-7
-3
-4
-6
-2
-15
-3
-4
-3
-4
-3
-5
-2
-13
-5
-5
-3
-3
-23
-4
-4
-5
-7
-4
-13
-2
-4
-3
-4
-2
-6
-2
-7
-3
-5
-5
-3
-29
-5
-4
-4
-3
-10
-2
-3
-79
-16
-6
-6
-7
-7
-3
-5
-5
-7
-4
-3
-7
-9
-5
-6
-5
-9
-6
-3
-6
-4
-17
-2
-10
-9
-3
-6
-2
-3
-21
-22
-5
-11
-4
-2
-17
-2
-224
-2
-14
-3
-4
-4
-2
-4
-4
-4
-4
-5
-3
-4
-4
-10
-2
-6
-3
-3
-5
-7
-2
-7
-5
-6
-3
-218
-2
-2
-5
-2
-6
-3
-5
-222
-14
-6
-33
-3
-2
-5
-3
-3
-3
-9
-5
-3
-3
-2
-7
-4
-3
-4
-3
-5
-6
-5
-26
-4
-13
-9
-7
-3
-221
-3
-3
-4
-4
-4
-4
-2
-18
-5
-3
-7
-9
-6
-8
-3
-10
-3
-11
-9
-5
-4
-17
-5
-5
-6
-6
-3
-2
-4
-12
-17
-6
-7
-218
-4
-2
-4
-10
-3
-5
-15
-3
-9
-4
-3
-3
-6
-29
-3
-3
-4
-5
-5
-3
-8
-5
-6
-6
-7
-5
-3
-5
-3
-29
-2
-31
-5
-15
-24
-16
-5
-207
-4
-3
-3
-2
-15
-4
-4
-13
-5
-5
-4
-6
-10
-2
-7
-8
-4
-6
-20
-5
-3
-4
-3
-12
-12
-5
-17
-7
-3
-3
-3
-6
-10
-3
-5
-25
-80
-4
-9
-3
-2
-11
-3
-3
-2
-3
-8
-7
-5
-5
-19
-5
-3
-3
-12
-11
-2
-6
-5
-5
-5
-3
-3
-3
-4
-209
-14
-3
-2
-5
-19
-4
-4
-3
-4
-14
-5
-6
-4
-13
-9
-7
-4
-7
-10
-2
-9
-5
-7
-2
-8
-4
-6
-5
-5
-222
-8
-7
-12
-5
-216
-3
-4
-4
-6
-3
-14
-8
-7
-13
-4
-3
-3
-3
-3
-17
-5
-4
-3
-33
-6
-6
-33
-7
-5
-3
-8
-7
-5
-2
-9
-4
-2
-233
-24
-7
-4
-8
-10
-3
-4
-15
-2
-16
-3
-3
-13
-12
-7
-5
-4
-207
-4
-2
-4
-27
-15
-2
-5
-2
-25
-6
-5
-5
-6
-13
-6
-18
-6
-4
-12
-225
-10
-7
-5
-2
-2
-11
-4
-14
-21
-8
-10
-3
-5
-4
-232
-2
-5
-5
-3
-7
-17
-11
-6
-6
-23
-4
-6
-3
-5
-4
-2
-17
-3
-6
-5
-8
-3
-2
-2
-14
-9
-4
-4
-2
-5
-5
-3
-7
-6
-12
-6
-10
-3
-6
-2
-2
-19
-5
-4
-4
-9
-2
-4
-13
-3
-5
-6
-3
-6
-5
-4
-9
-6
-3
-5
-7
-3
-6
-6
-4
-3
-10
-6
-3
-221
-3
-5
-3
-6
-4
-8
-5
-3
-6
-4
-4
-2
-54
-5
-6
-11
-3
-3
-4
-4
-4
-3
-7
-3
-11
-11
-7
-10
-6
-13
-223
-213
-15
-231
-7
-3
-7
-228
-2
-3
-4
-4
-5
-6
-7
-4
-13
-3
-4
-5
-3
-6
-4
-6
-7
-2
-4
-3
-4
-3
-3
-6
-3
-7
-3
-5
-18
-5
-6
-8
-10
-3
-3
-3
-2
-4
-2
-4
-4
-5
-6
-6
-4
-10
-13
-3
-12
-5
-12
-16
-8
-4
-19
-11
-2
-4
-5
-6
-8
-5
-6
-4
-18
-10
-4
-2
-216
-6
-6
-6
-2
-4
-12
-8
-3
-11
-5
-6
-14
-5
-3
-13
-4
-5
-4
-5
-3
-28
-6
-3
-7
-219
-3
-9
-7
-3
-10
-6
-3
-4
-19
-5
-7
-11
-6
-15
-19
-4
-13
-11
-3
-7
-5
-10
-2
-8
-11
-2
-6
-4
-6
-24
-6
-3
-3
-3
-3
-6
-18
-4
-11
-4
-2
-5
-10
-8
-3
-9
-5
-3
-4
-5
-6
-2
-5
-7
-4
-4
-14
-6
-4
-4
-5
-5
-7
-2
-4
-3
-7
-3
-3
-6
-4
-5
-4
-4
-4
-3
-3
-3
-3
-8
-14
-2
-3
-5
-3
-2
-4
-5
-3
-7
-3
-3
-18
-3
-4
-4
-5
-7
-3
-3
-3
-13
-5
-4
-8
-211
-5
-5
-3
-5
-2
-5
-4
-2
-655
-6
-3
-5
-11
-2
-5
-3
-12
-9
-15
-11
-5
-12
-217
-2
-6
-17
-3
-3
-207
-5
-5
-4
-5
-9
-3
-2
-8
-5
-4
-3
-2
-5
-12
-4
-14
-5
-4
-2
-13
-5
-8
-4
-225
-4
-3
-4
-5
-4
-3
-3
-6
-23
-9
-2
-6
-7
-233
-4
-4
-6
-18
-3
-4
-6
-3
-4
-4
-2
-3
-7
-4
-13
-227
-4
-3
-5
-4
-2
-12
-9
-17
-3
-7
-14
-6
-4
-5
-21
-4
-8
-9
-2
-9
-25
-16
-3
-6
-4
-7
-8
-5
-2
-3
-5
-4
-3
-3
-5
-3
-3
-3
-2
-3
-19
-2
-4
-3
-4
-2
-3
-4
-4
-2
-4
-3
-3
-3
-2
-6
-3
-17
-5
-6
-4
-3
-13
-5
-3
-3
-3
-4
-9
-4
-2
-14
-12
-4
-5
-24
-4
-3
-37
-12
-11
-21
-3
-4
-3
-13
-4
-2
-3
-15
-4
-11
-4
-4
-3
-8
-3
-4
-4
-12
-8
-5
-3
-3
-4
-2
-220
-3
-5
-223
-3
-3
-3
-10
-3
-15
-4
-241
-9
-7
-3
-6
-6
-23
-4
-13
-7
-3
-4
-7
-4
-9
-3
-3
-4
-10
-5
-5
-1
-5
-24
-2
-4
-5
-5
-6
-14
-3
-8
-2
-3
-5
-13
-13
-3
-5
-2
-3
-15
-3
-4
-2
-10
-4
-4
-4
-5
-5
-3
-5
-3
-4
-7
-4
-27
-3
-6
-4
-15
-3
-5
-6
-6
-5
-4
-8
-3
-9
-2
-6
-3
-4
-3
-7
-4
-18
-3
-11
-3
-3
-8
-9
-7
-24
-3
-219
-7
-10
-4
-5
-9
-12
-2
-5
-4
-4
-4
-3
-3
-19
-5
-8
-16
-8
-6
-22
-3
-23
-3
-242
-9
-4
-3
-3
-5
-7
-3
-3
-5
-8
-3
-7
-5
-14
-8
-10
-3
-4
-3
-7
-4
-6
-7
-4
-10
-4
-3
-11
-3
-7
-10
-3
-13
-6
-8
-12
-10
-5
-7
-9
-3
-4
-7
-7
-10
-8
-30
-9
-19
-4
-3
-19
-15
-4
-13
-3
-215
-223
-4
-7
-4
-8
-17
-16
-3
-7
-6
-5
-5
-4
-12
-3
-7
-4
-4
-13
-4
-5
-2
-5
-6
-5
-6
-6
-7
-10
-18
-23
-9
-3
-3
-6
-5
-2
-4
-2
-7
-3
-3
-2
-5
-5
-14
-10
-224
-6
-3
-4
-3
-7
-5
-9
-3
-6
-4
-2
-5
-11
-4
-3
-3
-2
-8
-4
-7
-4
-10
-7
-3
-3
-18
-18
-17
-3
-3
-3
-4
-5
-3
-3
-4
-12
-7
-3
-11
-13
-5
-4
-7
-13
-5
-4
-11
-3
-12
-3
-6
-4
-4
-21
-4
-6
-9
-5
-3
-10
-8
-4
-6
-4
-4
-6
-5
-4
-8
-6
-4
-6
-4
-4
-5
-9
-6
-3
-4
-2
-9
-3
-18
-2
-4
-3
-13
-3
-6
-6
-8
-7
-9
-3
-2
-16
-3
-4
-6
-3
-2
-33
-22
-14
-4
-9
-12
-4
-5
-6
-3
-23
-9
-4
-3
-5
-5
-3
-4
-5
-3
-5
-3
-10
-4
-5
-5
-8
-4
-4
-6
-8
-5
-4
-3
-4
-6
-3
-3
-3
-5
-9
-12
-6
-5
-9
-3
-5
-3
-2
-2
-2
-18
-3
-2
-21
-2
-5
-4
-6
-4
-5
-10
-3
-9
-3
-2
-10
-7
-3
-6
-6
-4
-4
-8
-12
-7
-3
-7
-3
-3
-9
-3
-4
-5
-4
-4
-5
-5
-10
-15
-4
-4
-14
-6
-227
-3
-14
-5
-216
-22
-5
-4
-2
-2
-6
-3
-4
-2
-9
-9
-4
-3
-28
-13
-11
-4
-5
-3
-3
-2
-3
-3
-5
-3
-4
-3
-5
-23
-26
-3
-4
-5
-6
-4
-6
-3
-5
-5
-3
-4
-3
-2
-2
-2
-7
-14
-3
-6
-7
-17
-2
-2
-15
-14
-16
-4
-6
-7
-13
-6
-4
-5
-6
-16
-3
-3
-28
-3
-6
-15
-3
-9
-2
-4
-6
-3
-3
-22
-4
-12
-6
-7
-2
-5
-4
-10
-3
-16
-6
-9
-2
-5
-12
-7
-5
-5
-5
-5
-2
-11
-9
-17
-4
-3
-11
-7
-3
-5
-15
-4
-3
-4
-211
-8
-7
-5
-4
-7
-6
-7
-6
-3
-6
-5
-6
-5
-3
-4
-4
-26
-4
-6
-10
-4
-4
-3
-2
-3
-3
-4
-5
-9
-3
-9
-4
-4
-5
-5
-8
-2
-4
-2
-3
-8
-4
-11
-19
-5
-8
-6
-3
-5
-6
-12
-3
-2
-4
-16
-12
-3
-4
-4
-8
-6
-5
-6
-6
-219
-8
-222
-6
-16
-3
-13
-19
-5
-4
-3
-11
-6
-10
-4
-7
-7
-12
-5
-3
-3
-5
-6
-10
-3
-8
-2
-5
-4
-7
-2
-4
-4
-2
-12
-9
-6
-4
-2
-40
-2
-4
-10
-4
-223
-4
-2
-20
-6
-7
-24
-5
-4
-5
-2
-20
-16
-6
-5
-13
-2
-3
-3
-19
-3
-2
-4
-5
-6
-7
-11
-12
-5
-6
-7
-7
-3
-5
-3
-5
-3
-14
-3
-4
-4
-2
-11
-1
-7
-3
-9
-6
-11
-12
-5
-8
-6
-221
-4
-2
-12
-4
-3
-15
-4
-5
-226
-7
-218
-7
-5
-4
-5
-18
-4
-5
-9
-4
-4
-2
-9
-18
-18
-9
-5
-6
-6
-3
-3
-7
-3
-5
-4
-4
-4
-12
-3
-6
-31
-5
-4
-7
-3
-6
-5
-6
-5
-11
-2
-2
-11
-11
-6
-7
-5
-8
-7
-10
-5
-23
-7
-4
-3
-5
-34
-2
-5
-23
-7
-3
-6
-8
-4
-4
-4
-2
-5
-3
-8
-5
-4
-8
-25
-2
-3
-17
-8
-3
-4
-8
-7
-3
-15
-6
-5
-7
-21
-9
-5
-6
-6
-5
-3
-2
-3
-10
-3
-6
-3
-14
-7
-4
-4
-8
-7
-8
-2
-6
-12
-4
-213
-6
-5
-21
-8
-2
-5
-23
-3
-11
-2
-3
-6
-25
-2
-3
-6
-7
-6
-6
-4
-4
-6
-3
-17
-9
-7
-6
-4
-3
-10
-7
-2
-3
-3
-3
-11
-8
-3
-7
-6
-4
-14
-36
-3
-4
-3
-3
-22
-13
-21
-4
-2
-7
-4
-4
-17
-15
-3
-7
-11
-2
-4
-7
-6
-209
-6
-3
-2
-2
-24
-4
-9
-4
-3
-3
-3
-29
-2
-2
-4
-3
-3
-5
-4
-6
-3
-3
-2
-4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
deleted file mode 100644
index d7d14f8eb63..00000000000
--- a/vendor/github.com/beorn7/perks/quantile/stream.go
+++ /dev/null
@@ -1,316 +0,0 @@
-// Package quantile computes approximate quantiles over an unbounded data
-// stream within low memory and CPU bounds.
-//
-// A small amount of accuracy is traded to achieve the above properties.
-//
-// Multiple streams can be merged before calling Query to generate a single set
-// of results. This is meaningful when the streams represent the same type of
-// data. See Merge and Samples.
-//
-// For more detailed information about the algorithm used, see:
-//
-// Effective Computation of Biased Quantiles over Data Streams
-//
-// http://www.cs.rutgers.edu/~muthu/bquant.pdf
-package quantile
-
-import (
- "math"
- "sort"
-)
-
-// Sample holds an observed value and meta information for compression. JSON
-// tags have been added for convenience.
-type Sample struct {
- Value float64 `json:",string"`
- Width float64 `json:",string"`
- Delta float64 `json:",string"`
-}
-
-// Samples represents a slice of samples. It implements sort.Interface.
-type Samples []Sample
-
-func (a Samples) Len() int { return len(a) }
-func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
-func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-type invariant func(s *stream, r float64) float64
-
-// NewLowBiased returns an initialized Stream for low-biased quantiles
-// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
-// error guarantees can still be given even for the lower ranks of the data
-// distribution.
-//
-// The provided epsilon is a relative error, i.e. the true quantile of a value
-// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
-// properties.
-func NewLowBiased(epsilon float64) *Stream {
- ƒ := func(s *stream, r float64) float64 {
- return 2 * epsilon * r
- }
- return newStream(ƒ)
-}
-
-// NewHighBiased returns an initialized Stream for high-biased quantiles
-// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
-// error guarantees can still be given even for the higher ranks of the data
-// distribution.
-//
-// The provided epsilon is a relative error, i.e. the true quantile of a value
-// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
-// properties.
-func NewHighBiased(epsilon float64) *Stream {
- ƒ := func(s *stream, r float64) float64 {
- return 2 * epsilon * (s.n - r)
- }
- return newStream(ƒ)
-}
-
-// NewTargeted returns an initialized Stream concerned with a particular set of
-// quantile values that are supplied a priori. Knowing these a priori reduces
-// space and computation time. The targets map maps the desired quantiles to
-// their absolute errors, i.e. the true quantile of a value returned by a query
-// is guaranteed to be within (Quantile±Epsilon).
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
-func NewTargeted(targetMap map[float64]float64) *Stream {
- // Convert map to slice to avoid slow iterations on a map.
- // ƒ is called on the hot path, so converting the map to a slice
- // beforehand results in significant CPU savings.
- targets := targetMapToSlice(targetMap)
-
- ƒ := func(s *stream, r float64) float64 {
- var m = math.MaxFloat64
- var f float64
- for _, t := range targets {
- if t.quantile*s.n <= r {
- f = (2 * t.epsilon * r) / t.quantile
- } else {
- f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
- }
- if f < m {
- m = f
- }
- }
- return m
- }
- return newStream(ƒ)
-}
-
-type target struct {
- quantile float64
- epsilon float64
-}
-
-func targetMapToSlice(targetMap map[float64]float64) []target {
- targets := make([]target, 0, len(targetMap))
-
- for quantile, epsilon := range targetMap {
- t := target{
- quantile: quantile,
- epsilon: epsilon,
- }
- targets = append(targets, t)
- }
-
- return targets
-}
-
-// Stream computes quantiles for a stream of float64s. It is not thread-safe by
-// design. Take care when using across multiple goroutines.
-type Stream struct {
- *stream
- b Samples
- sorted bool
-}
-
-func newStream(ƒ invariant) *Stream {
- x := &stream{ƒ: ƒ}
- return &Stream{x, make(Samples, 0, 500), true}
-}
-
-// Insert inserts v into the stream.
-func (s *Stream) Insert(v float64) {
- s.insert(Sample{Value: v, Width: 1})
-}
-
-func (s *Stream) insert(sample Sample) {
- s.b = append(s.b, sample)
- s.sorted = false
- if len(s.b) == cap(s.b) {
- s.flush()
- }
-}
-
-// Query returns the computed qth percentiles value. If s was created with
-// NewTargeted, and q is not in the set of quantiles provided a priori, Query
-// will return an unspecified result.
-func (s *Stream) Query(q float64) float64 {
- if !s.flushed() {
- // Fast path when there hasn't been enough data for a flush;
- // this also yields better accuracy for small sets of data.
- l := len(s.b)
- if l == 0 {
- return 0
- }
- i := int(math.Ceil(float64(l) * q))
- if i > 0 {
- i -= 1
- }
- s.maybeSort()
- return s.b[i].Value
- }
- s.flush()
- return s.stream.query(q)
-}
-
-// Merge merges samples into the underlying streams samples. This is handy when
-// merging multiple streams from separate threads, database shards, etc.
-//
-// ATTENTION: This method is broken and does not yield correct results. The
-// underlying algorithm is not capable of merging streams correctly.
-func (s *Stream) Merge(samples Samples) {
- sort.Sort(samples)
- s.stream.merge(samples)
-}
-
-// Reset reinitializes and clears the list reusing the samples buffer memory.
-func (s *Stream) Reset() {
- s.stream.reset()
- s.b = s.b[:0]
-}
-
-// Samples returns stream samples held by s.
-func (s *Stream) Samples() Samples {
- if !s.flushed() {
- return s.b
- }
- s.flush()
- return s.stream.samples()
-}
-
-// Count returns the total number of samples observed in the stream
-// since initialization.
-func (s *Stream) Count() int {
- return len(s.b) + s.stream.count()
-}
-
-func (s *Stream) flush() {
- s.maybeSort()
- s.stream.merge(s.b)
- s.b = s.b[:0]
-}
-
-func (s *Stream) maybeSort() {
- if !s.sorted {
- s.sorted = true
- sort.Sort(s.b)
- }
-}
-
-func (s *Stream) flushed() bool {
- return len(s.stream.l) > 0
-}
-
-type stream struct {
- n float64
- l []Sample
- ƒ invariant
-}
-
-func (s *stream) reset() {
- s.l = s.l[:0]
- s.n = 0
-}
-
-func (s *stream) insert(v float64) {
- s.merge(Samples{{v, 1, 0}})
-}
-
-func (s *stream) merge(samples Samples) {
- // TODO(beorn7): This tries to merge not only individual samples, but
- // whole summaries. The paper doesn't mention merging summaries at
- // all. Unittests show that the merging is inaccurate. Find out how to
- // do merges properly.
- var r float64
- i := 0
- for _, sample := range samples {
- for ; i < len(s.l); i++ {
- c := s.l[i]
- if c.Value > sample.Value {
- // Insert at position i.
- s.l = append(s.l, Sample{})
- copy(s.l[i+1:], s.l[i:])
- s.l[i] = Sample{
- sample.Value,
- sample.Width,
- math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
- // TODO(beorn7): How to calculate delta correctly?
- }
- i++
- goto inserted
- }
- r += c.Width
- }
- s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
- i++
- inserted:
- s.n += sample.Width
- r += sample.Width
- }
- s.compress()
-}
-
-func (s *stream) count() int {
- return int(s.n)
-}
-
-func (s *stream) query(q float64) float64 {
- t := math.Ceil(q * s.n)
- t += math.Ceil(s.ƒ(s, t) / 2)
- p := s.l[0]
- var r float64
- for _, c := range s.l[1:] {
- r += p.Width
- if r+c.Width+c.Delta > t {
- return p.Value
- }
- p = c
- }
- return p.Value
-}
-
-func (s *stream) compress() {
- if len(s.l) < 2 {
- return
- }
- x := s.l[len(s.l)-1]
- xi := len(s.l) - 1
- r := s.n - 1 - x.Width
-
- for i := len(s.l) - 2; i >= 0; i-- {
- c := s.l[i]
- if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
- x.Width += c.Width
- s.l[xi] = x
- // Remove element at i.
- copy(s.l[i:], s.l[i+1:])
- s.l = s.l[:len(s.l)-1]
- xi -= 1
- } else {
- x = c
- xi = i
- }
- r -= c.Width
- }
-}
-
-func (s *stream) samples() Samples {
- samples := make(Samples, len(s.l))
- copy(samples, s.l)
- return samples
-}
diff --git a/vendor/github.com/bits-and-blooms/bitset/.gitignore b/vendor/github.com/bits-and-blooms/bitset/.gitignore
deleted file mode 100644
index 5c204d28b0e..00000000000
--- a/vendor/github.com/bits-and-blooms/bitset/.gitignore
+++ /dev/null
@@ -1,26 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
-
-target
diff --git a/vendor/github.com/bits-and-blooms/bitset/.travis.yml b/vendor/github.com/bits-and-blooms/bitset/.travis.yml
deleted file mode 100644
index 094aa5ce070..00000000000
--- a/vendor/github.com/bits-and-blooms/bitset/.travis.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-language: go
-
-sudo: false
-
-branches:
- except:
- - release
-
-branches:
- only:
- - master
- - travis
-
-go:
- - "1.11.x"
- - tip
-
-matrix:
- allow_failures:
- - go: tip
-
-before_install:
- - if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi;
- - if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi;
- - go get github.com/mattn/goveralls
-
-before_script:
- - make deps
-
-script:
- - make qa
-
-after_failure:
- - cat ./target/test/report.xml
-
-after_success:
- - if [ "$TRAVIS_GO_VERSION" = "1.11.1" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi;
diff --git a/vendor/github.com/bits-and-blooms/bitset/README.md b/vendor/github.com/bits-and-blooms/bitset/README.md
deleted file mode 100644
index 97e83071e41..00000000000
--- a/vendor/github.com/bits-and-blooms/bitset/README.md
+++ /dev/null
@@ -1,93 +0,0 @@
-# bitset
-
-*Go language library to map between non-negative integers and boolean values*
-
-[](https://github.com/willf/bitset/actions?query=workflow%3ATest)
-[](https://goreportcard.com/report/github.com/willf/bitset)
-[](https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc)
-
-
-## Description
-
-Package bitset implements bitsets, a mapping between non-negative integers and boolean values.
-It should be more efficient than map[uint] bool.
-
-It provides methods for setting, clearing, flipping, and testing individual integers.
-
-But it also provides set intersection, union, difference, complement, and symmetric operations, as well as tests to check whether any, all, or no bits are set, and querying a bitset's current length and number of positive bits.
-
-BitSets are expanded to the size of the largest set bit; the memory allocation is approximately Max bits, where Max is the largest set bit. BitSets are never shrunk. On creation, a hint can be given for the number of bits that will be used.
-
-Many of the methods, including Set, Clear, and Flip, return a BitSet pointer, which allows for chaining.
-
-### Example use:
-
-```go
-package main
-
-import (
- "fmt"
- "math/rand"
-
- "github.com/bits-and-blooms/bitset"
-)
-
-func main() {
- fmt.Printf("Hello from BitSet!\n")
- var b bitset.BitSet
- // play some Go Fish
- for i := 0; i < 100; i++ {
- card1 := uint(rand.Intn(52))
- card2 := uint(rand.Intn(52))
- b.Set(card1)
- if b.Test(card2) {
- fmt.Println("Go Fish!")
- }
- b.Clear(card1)
- }
-
- // Chaining
- b.Set(10).Set(11)
-
- for i, e := b.NextSet(0); e; i, e = b.NextSet(i + 1) {
- fmt.Println("The following bit is set:", i)
- }
- if b.Intersection(bitset.New(100).Set(10)).Count() == 1 {
- fmt.Println("Intersection works.")
- } else {
- fmt.Println("Intersection doesn't work???")
- }
-}
-```
-
-As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets.
-
-Package documentation is at: https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc
-
-## Memory Usage
-
-The memory usage of a bitset using N bits is at least N/8 bytes. The number of bits in a bitset is at least as large as one plus the greatest bit index you have accessed. Thus it is possible to run out of memory while using a bitset. If you have lots of bits, you might prefer compressed bitsets, like the [Roaring bitmaps](http://roaringbitmap.org) and its [Go implementation](https://github.com/RoaringBitmap/roaring).
-
-## Implementation Note
-
-Go 1.9 introduced a native `math/bits` library. We provide backward compatibility to Go 1.7, which might be removed.
-
-It is possible that a later version will match the `math/bits` return signature for counts (which is `int`, rather than our library's `unit64`). If so, the version will be bumped.
-
-## Installation
-
-```bash
-go get github.com/bits-and-blooms/bitset
-```
-
-## Contributing
-
-If you wish to contribute to this project, please branch and issue a pull request against master ("[GitHub Flow](https://guides.github.com/introduction/flow/)")
-
-## Running all tests
-
-Before committing the code, please check if it passes tests, has adequate coverage, etc.
-```bash
-go test
-go test -cover
-```
diff --git a/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml b/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml
deleted file mode 100644
index f9b29591840..00000000000
--- a/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-# Go
-# Build your Go project.
-# Add steps that test, save build artifacts, deploy, and more:
-# https://docs.microsoft.com/azure/devops/pipelines/languages/go
-
-trigger:
-- master
-
-pool:
- vmImage: 'Ubuntu-16.04'
-
-variables:
- GOBIN: '$(GOPATH)/bin' # Go binaries path
- GOROOT: '/usr/local/go1.11' # Go installation path
- GOPATH: '$(system.defaultWorkingDirectory)/gopath' # Go workspace path
- modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' # Path to the module's code
-
-steps:
-- script: |
- mkdir -p '$(GOBIN)'
- mkdir -p '$(GOPATH)/pkg'
- mkdir -p '$(modulePath)'
- shopt -s extglob
- shopt -s dotglob
- mv !(gopath) '$(modulePath)'
- echo '##vso[task.prependpath]$(GOBIN)'
- echo '##vso[task.prependpath]$(GOROOT)/bin'
- displayName: 'Set up the Go workspace'
-
-- script: |
- go version
- go get -v -t -d ./...
- if [ -f Gopkg.toml ]; then
- curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
- dep ensure
- fi
- go build -v .
- workingDirectory: '$(modulePath)'
- displayName: 'Get dependencies, then build'
diff --git a/vendor/github.com/bits-and-blooms/bitset/bitset.go b/vendor/github.com/bits-and-blooms/bitset/bitset.go
deleted file mode 100644
index d688806a54b..00000000000
--- a/vendor/github.com/bits-and-blooms/bitset/bitset.go
+++ /dev/null
@@ -1,952 +0,0 @@
-/*
-Package bitset implements bitsets, a mapping
-between non-negative integers and boolean values. It should be more
-efficient than map[uint] bool.
-
-It provides methods for setting, clearing, flipping, and testing
-individual integers.
-
-But it also provides set intersection, union, difference,
-complement, and symmetric operations, as well as tests to
-check whether any, all, or no bits are set, and querying a
-bitset's current length and number of positive bits.
-
-BitSets are expanded to the size of the largest set bit; the
-memory allocation is approximately Max bits, where Max is
-the largest set bit. BitSets are never shrunk. On creation,
-a hint can be given for the number of bits that will be used.
-
-Many of the methods, including Set,Clear, and Flip, return
-a BitSet pointer, which allows for chaining.
-
-Example use:
-
- import "bitset"
- var b BitSet
- b.Set(10).Set(11)
- if b.Test(1000) {
- b.Clear(1000)
- }
- if B.Intersection(bitset.New(100).Set(10)).Count() > 1 {
- fmt.Println("Intersection works.")
- }
-
-As an alternative to BitSets, one should check out the 'big' package,
-which provides a (less set-theoretical) view of bitsets.
-
-*/
-package bitset
-
-import (
- "bufio"
- "bytes"
- "encoding/base64"
- "encoding/binary"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "strconv"
-)
-
-// the wordSize of a bit set
-const wordSize = uint(64)
-
-// log2WordSize is lg(wordSize)
-const log2WordSize = uint(6)
-
-// allBits has every bit set
-const allBits uint64 = 0xffffffffffffffff
-
-// default binary BigEndian
-var binaryOrder binary.ByteOrder = binary.BigEndian
-
-// default json encoding base64.URLEncoding
-var base64Encoding = base64.URLEncoding
-
-// Base64StdEncoding Marshal/Unmarshal BitSet with base64.StdEncoding(Default: base64.URLEncoding)
-func Base64StdEncoding() { base64Encoding = base64.StdEncoding }
-
-// LittleEndian Marshal/Unmarshal Binary as Little Endian(Default: binary.BigEndian)
-func LittleEndian() { binaryOrder = binary.LittleEndian }
-
-// A BitSet is a set of bits. The zero value of a BitSet is an empty set of length 0.
-type BitSet struct {
- length uint
- set []uint64
-}
-
-// Error is used to distinguish errors (panics) generated in this package.
-type Error string
-
-// safeSet will fixup b.set to be non-nil and return the field value
-func (b *BitSet) safeSet() []uint64 {
- if b.set == nil {
- b.set = make([]uint64, wordsNeeded(0))
- }
- return b.set
-}
-
-// From is a constructor used to create a BitSet from an array of integers
-func From(buf []uint64) *BitSet {
- return &BitSet{uint(len(buf)) * 64, buf}
-}
-
-// Bytes returns the bitset as array of integers
-func (b *BitSet) Bytes() []uint64 {
- return b.set
-}
-
-// wordsNeeded calculates the number of words needed for i bits
-func wordsNeeded(i uint) int {
- if i > (Cap() - wordSize + 1) {
- return int(Cap() >> log2WordSize)
- }
- return int((i + (wordSize - 1)) >> log2WordSize)
-}
-
-// New creates a new BitSet with a hint that length bits will be required
-func New(length uint) (bset *BitSet) {
- defer func() {
- if r := recover(); r != nil {
- bset = &BitSet{
- 0,
- make([]uint64, 0),
- }
- }
- }()
-
- bset = &BitSet{
- length,
- make([]uint64, wordsNeeded(length)),
- }
-
- return bset
-}
-
-// Cap returns the total possible capacity, or number of bits
-func Cap() uint {
- return ^uint(0)
-}
-
-// Len returns the number of bits in the BitSet.
-// Note the difference to method Count, see example.
-func (b *BitSet) Len() uint {
- return b.length
-}
-
-// extendSetMaybe adds additional words to incorporate new bits if needed
-func (b *BitSet) extendSetMaybe(i uint) {
- if i >= b.length { // if we need more bits, make 'em
- if i >= Cap() {
- panic("You are exceeding the capacity")
- }
- nsize := wordsNeeded(i + 1)
- if b.set == nil {
- b.set = make([]uint64, nsize)
- } else if cap(b.set) >= nsize {
- b.set = b.set[:nsize] // fast resize
- } else if len(b.set) < nsize {
- newset := make([]uint64, nsize, 2*nsize) // increase capacity 2x
- copy(newset, b.set)
- b.set = newset
- }
- b.length = i + 1
- }
-}
-
-// Test whether bit i is set.
-func (b *BitSet) Test(i uint) bool {
- if i >= b.length {
- return false
- }
- return b.set[i>>log2WordSize]&(1<<(i&(wordSize-1))) != 0
-}
-
-// Set bit i to 1, the capacity of the bitset is automatically
-// increased accordingly.
-// If i>= Cap(), this function will panic.
-// Warning: using a very large value for 'i'
-// may lead to a memory shortage and a panic: the caller is responsible
-// for providing sensible parameters in line with their memory capacity.
-func (b *BitSet) Set(i uint) *BitSet {
- b.extendSetMaybe(i)
- b.set[i>>log2WordSize] |= 1 << (i & (wordSize - 1))
- return b
-}
-
-// Clear bit i to 0
-func (b *BitSet) Clear(i uint) *BitSet {
- if i >= b.length {
- return b
- }
- b.set[i>>log2WordSize] &^= 1 << (i & (wordSize - 1))
- return b
-}
-
-// SetTo sets bit i to value.
-// If i>= Cap(), this function will panic.
-// Warning: using a very large value for 'i'
-// may lead to a memory shortage and a panic: the caller is responsible
-// for providing sensible parameters in line with their memory capacity.
-func (b *BitSet) SetTo(i uint, value bool) *BitSet {
- if value {
- return b.Set(i)
- }
- return b.Clear(i)
-}
-
-// Flip bit at i.
-// If i>= Cap(), this function will panic.
-// Warning: using a very large value for 'i'
-// may lead to a memory shortage and a panic: the caller is responsible
-// for providing sensible parameters in line with their memory capacity.
-func (b *BitSet) Flip(i uint) *BitSet {
- if i >= b.length {
- return b.Set(i)
- }
- b.set[i>>log2WordSize] ^= 1 << (i & (wordSize - 1))
- return b
-}
-
-// FlipRange bit in [start, end).
-// If end>= Cap(), this function will panic.
-// Warning: using a very large value for 'end'
-// may lead to a memory shortage and a panic: the caller is responsible
-// for providing sensible parameters in line with their memory capacity.
-func (b *BitSet) FlipRange(start, end uint) *BitSet {
- if start >= end {
- return b
- }
-
- b.extendSetMaybe(end - 1)
- var startWord uint = start >> log2WordSize
- var endWord uint = end >> log2WordSize
- b.set[startWord] ^= ^(^uint64(0) << (start & (wordSize - 1)))
- for i := startWord; i < endWord; i++ {
- b.set[i] = ^b.set[i]
- }
- b.set[endWord] ^= ^uint64(0) >> (-end & (wordSize - 1))
- return b
-}
-
-// Shrink shrinks BitSet so that the provided value is the last possible
-// set value. It clears all bits > the provided index and reduces the size
-// and length of the set.
-//
-// Note that the parameter value is not the new length in bits: it is the
-// maximal value that can be stored in the bitset after the function call.
-// The new length in bits is the parameter value + 1. Thus it is not possible
-// to use this function to set the length to 0, the minimal value of the length
-// after this function call is 1.
-//
-// A new slice is allocated to store the new bits, so you may see an increase in
-// memory usage until the GC runs. Normally this should not be a problem, but if you
-// have an extremely large BitSet its important to understand that the old BitSet will
-// remain in memory until the GC frees it.
-func (b *BitSet) Shrink(lastbitindex uint) *BitSet {
- length := lastbitindex + 1
- idx := wordsNeeded(length)
- if idx > len(b.set) {
- return b
- }
- shrunk := make([]uint64, idx)
- copy(shrunk, b.set[:idx])
- b.set = shrunk
- b.length = length
- b.set[idx-1] &= (allBits >> (uint64(64) - uint64(length&(wordSize-1))))
- return b
-}
-
-// Compact shrinks BitSet to so that we preserve all set bits, while minimizing
-// memory usage. Compact calls Shrink.
-func (b *BitSet) Compact() *BitSet {
- idx := len(b.set) - 1
- for ; idx >= 0 && b.set[idx] == 0; idx-- {
- }
- newlength := uint((idx + 1) << log2WordSize)
- if newlength >= b.length {
- return b // nothing to do
- }
- if newlength > 0 {
- return b.Shrink(newlength - 1)
- }
- // We preserve one word
- return b.Shrink(63)
-}
-
-// InsertAt takes an index which indicates where a bit should be
-// inserted. Then it shifts all the bits in the set to the left by 1, starting
-// from the given index position, and sets the index position to 0.
-//
-// Depending on the size of your BitSet, and where you are inserting the new entry,
-// this method could be extremely slow and in some cases might cause the entire BitSet
-// to be recopied.
-func (b *BitSet) InsertAt(idx uint) *BitSet {
- insertAtElement := (idx >> log2WordSize)
-
- // if length of set is a multiple of wordSize we need to allocate more space first
- if b.isLenExactMultiple() {
- b.set = append(b.set, uint64(0))
- }
-
- var i uint
- for i = uint(len(b.set) - 1); i > insertAtElement; i-- {
- // all elements above the position where we want to insert can simply by shifted
- b.set[i] <<= 1
-
- // we take the most significant bit of the previous element and set it as
- // the least significant bit of the current element
- b.set[i] |= (b.set[i-1] & 0x8000000000000000) >> 63
- }
-
- // generate a mask to extract the data that we need to shift left
- // within the element where we insert a bit
- dataMask := ^(uint64(1)< 0x40000 {
- buffer.WriteString("...")
- break
- }
- buffer.WriteString(strconv.FormatInt(int64(i), 10))
- i, e = b.NextSet(i + 1)
- if e {
- buffer.WriteString(",")
- }
- }
- buffer.WriteString("}")
- return buffer.String()
-}
-
-// DeleteAt deletes the bit at the given index position from
-// within the bitset
-// All the bits residing on the left of the deleted bit get
-// shifted right by 1
-// The running time of this operation may potentially be
-// relatively slow, O(length)
-func (b *BitSet) DeleteAt(i uint) *BitSet {
- // the index of the slice element where we'll delete a bit
- deleteAtElement := i >> log2WordSize
-
- // generate a mask for the data that needs to be shifted right
- // within that slice element that gets modified
- dataMask := ^((uint64(1) << (i & (wordSize - 1))) - 1)
-
- // extract the data that we'll shift right from the slice element
- data := b.set[deleteAtElement] & dataMask
-
- // set the masked area to 0 while leaving the rest as it is
- b.set[deleteAtElement] &= ^dataMask
-
- // shift the previously extracted data to the right and then
- // set it in the previously masked area
- b.set[deleteAtElement] |= (data >> 1) & dataMask
-
- // loop over all the consecutive slice elements to copy each
- // lowest bit into the highest position of the previous element,
- // then shift the entire content to the right by 1
- for i := int(deleteAtElement) + 1; i < len(b.set); i++ {
- b.set[i-1] |= (b.set[i] & 1) << 63
- b.set[i] >>= 1
- }
-
- b.length = b.length - 1
-
- return b
-}
-
-// NextSet returns the next bit set from the specified index,
-// including possibly the current index
-// along with an error code (true = valid, false = no set bit found)
-// for i,e := v.NextSet(0); e; i,e = v.NextSet(i + 1) {...}
-//
-// Users concerned with performance may want to use NextSetMany to
-// retrieve several values at once.
-func (b *BitSet) NextSet(i uint) (uint, bool) {
- x := int(i >> log2WordSize)
- if x >= len(b.set) {
- return 0, false
- }
- w := b.set[x]
- w = w >> (i & (wordSize - 1))
- if w != 0 {
- return i + trailingZeroes64(w), true
- }
- x = x + 1
- for x < len(b.set) {
- if b.set[x] != 0 {
- return uint(x)*wordSize + trailingZeroes64(b.set[x]), true
- }
- x = x + 1
-
- }
- return 0, false
-}
-
-// NextSetMany returns many next bit sets from the specified index,
-// including possibly the current index and up to cap(buffer).
-// If the returned slice has len zero, then no more set bits were found
-//
-// buffer := make([]uint, 256) // this should be reused
-// j := uint(0)
-// j, buffer = bitmap.NextSetMany(j, buffer)
-// for ; len(buffer) > 0; j, buffer = bitmap.NextSetMany(j,buffer) {
-// for k := range buffer {
-// do something with buffer[k]
-// }
-// j += 1
-// }
-//
-//
-// It is possible to retrieve all set bits as follow:
-//
-// indices := make([]uint, bitmap.Count())
-// bitmap.NextSetMany(0, indices)
-//
-// However if bitmap.Count() is large, it might be preferable to
-// use several calls to NextSetMany, for performance reasons.
-func (b *BitSet) NextSetMany(i uint, buffer []uint) (uint, []uint) {
- myanswer := buffer
- capacity := cap(buffer)
- x := int(i >> log2WordSize)
- if x >= len(b.set) || capacity == 0 {
- return 0, myanswer[:0]
- }
- skip := i & (wordSize - 1)
- word := b.set[x] >> skip
- myanswer = myanswer[:capacity]
- size := int(0)
- for word != 0 {
- r := trailingZeroes64(word)
- t := word & ((^word) + 1)
- myanswer[size] = r + i
- size++
- if size == capacity {
- goto End
- }
- word = word ^ t
- }
- x++
- for idx, word := range b.set[x:] {
- for word != 0 {
- r := trailingZeroes64(word)
- t := word & ((^word) + 1)
- myanswer[size] = r + (uint(x+idx) << 6)
- size++
- if size == capacity {
- goto End
- }
- word = word ^ t
- }
- }
-End:
- if size > 0 {
- return myanswer[size-1], myanswer[:size]
- }
- return 0, myanswer[:0]
-}
-
-// NextClear returns the next clear bit from the specified index,
-// including possibly the current index
-// along with an error code (true = valid, false = no bit found i.e. all bits are set)
-func (b *BitSet) NextClear(i uint) (uint, bool) {
- x := int(i >> log2WordSize)
- if x >= len(b.set) {
- return 0, false
- }
- w := b.set[x]
- w = w >> (i & (wordSize - 1))
- wA := allBits >> (i & (wordSize - 1))
- index := i + trailingZeroes64(^w)
- if w != wA && index < b.length {
- return index, true
- }
- x++
- for x < len(b.set) {
- index = uint(x)*wordSize + trailingZeroes64(^b.set[x])
- if b.set[x] != allBits && index < b.length {
- return index, true
- }
- x++
- }
- return 0, false
-}
-
-// ClearAll clears the entire BitSet
-func (b *BitSet) ClearAll() *BitSet {
- if b != nil && b.set != nil {
- for i := range b.set {
- b.set[i] = 0
- }
- }
- return b
-}
-
-// wordCount returns the number of words used in a bit set
-func (b *BitSet) wordCount() int {
- return len(b.set)
-}
-
-// Clone this BitSet
-func (b *BitSet) Clone() *BitSet {
- c := New(b.length)
- if b.set != nil { // Clone should not modify current object
- copy(c.set, b.set)
- }
- return c
-}
-
-// Copy into a destination BitSet
-// Returning the size of the destination BitSet
-// like array copy
-func (b *BitSet) Copy(c *BitSet) (count uint) {
- if c == nil {
- return
- }
- if b.set != nil { // Copy should not modify current object
- copy(c.set, b.set)
- }
- count = c.length
- if b.length < c.length {
- count = b.length
- }
- return
-}
-
-// Count (number of set bits).
-// Also known as "popcount" or "population count".
-func (b *BitSet) Count() uint {
- if b != nil && b.set != nil {
- return uint(popcntSlice(b.set))
- }
- return 0
-}
-
-// Equal tests the equivalence of two BitSets.
-// False if they are of different sizes, otherwise true
-// only if all the same bits are set
-func (b *BitSet) Equal(c *BitSet) bool {
- if c == nil || b == nil {
- return c == b
- }
- if b.length != c.length {
- return false
- }
- if b.length == 0 { // if they have both length == 0, then could have nil set
- return true
- }
- // testing for equality shoud not transform the bitset (no call to safeSet)
-
- for p, v := range b.set {
- if c.set[p] != v {
- return false
- }
- }
- return true
-}
-
-func panicIfNull(b *BitSet) {
- if b == nil {
- panic(Error("BitSet must not be null"))
- }
-}
-
-// Difference of base set and other set
-// This is the BitSet equivalent of &^ (and not)
-func (b *BitSet) Difference(compare *BitSet) (result *BitSet) {
- panicIfNull(b)
- panicIfNull(compare)
- result = b.Clone() // clone b (in case b is bigger than compare)
- l := int(compare.wordCount())
- if l > int(b.wordCount()) {
- l = int(b.wordCount())
- }
- for i := 0; i < l; i++ {
- result.set[i] = b.set[i] &^ compare.set[i]
- }
- return
-}
-
-// DifferenceCardinality computes the cardinality of the differnce
-func (b *BitSet) DifferenceCardinality(compare *BitSet) uint {
- panicIfNull(b)
- panicIfNull(compare)
- l := int(compare.wordCount())
- if l > int(b.wordCount()) {
- l = int(b.wordCount())
- }
- cnt := uint64(0)
- cnt += popcntMaskSlice(b.set[:l], compare.set[:l])
- cnt += popcntSlice(b.set[l:])
- return uint(cnt)
-}
-
-// InPlaceDifference computes the difference of base set and other set
-// This is the BitSet equivalent of &^ (and not)
-func (b *BitSet) InPlaceDifference(compare *BitSet) {
- panicIfNull(b)
- panicIfNull(compare)
- l := int(compare.wordCount())
- if l > int(b.wordCount()) {
- l = int(b.wordCount())
- }
- for i := 0; i < l; i++ {
- b.set[i] &^= compare.set[i]
- }
-}
-
-// Convenience function: return two bitsets ordered by
-// increasing length. Note: neither can be nil
-func sortByLength(a *BitSet, b *BitSet) (ap *BitSet, bp *BitSet) {
- if a.length <= b.length {
- ap, bp = a, b
- } else {
- ap, bp = b, a
- }
- return
-}
-
-// Intersection of base set and other set
-// This is the BitSet equivalent of & (and)
-func (b *BitSet) Intersection(compare *BitSet) (result *BitSet) {
- panicIfNull(b)
- panicIfNull(compare)
- b, compare = sortByLength(b, compare)
- result = New(b.length)
- for i, word := range b.set {
- result.set[i] = word & compare.set[i]
- }
- return
-}
-
-// IntersectionCardinality computes the cardinality of the union
-func (b *BitSet) IntersectionCardinality(compare *BitSet) uint {
- panicIfNull(b)
- panicIfNull(compare)
- b, compare = sortByLength(b, compare)
- cnt := popcntAndSlice(b.set, compare.set)
- return uint(cnt)
-}
-
-// InPlaceIntersection destructively computes the intersection of
-// base set and the compare set.
-// This is the BitSet equivalent of & (and)
-func (b *BitSet) InPlaceIntersection(compare *BitSet) {
- panicIfNull(b)
- panicIfNull(compare)
- l := int(compare.wordCount())
- if l > int(b.wordCount()) {
- l = int(b.wordCount())
- }
- for i := 0; i < l; i++ {
- b.set[i] &= compare.set[i]
- }
- for i := l; i < len(b.set); i++ {
- b.set[i] = 0
- }
- if compare.length > 0 {
- b.extendSetMaybe(compare.length - 1)
- }
-}
-
-// Union of base set and other set
-// This is the BitSet equivalent of | (or)
-func (b *BitSet) Union(compare *BitSet) (result *BitSet) {
- panicIfNull(b)
- panicIfNull(compare)
- b, compare = sortByLength(b, compare)
- result = compare.Clone()
- for i, word := range b.set {
- result.set[i] = word | compare.set[i]
- }
- return
-}
-
-// UnionCardinality computes the cardinality of the uniton of the base set
-// and the compare set.
-func (b *BitSet) UnionCardinality(compare *BitSet) uint {
- panicIfNull(b)
- panicIfNull(compare)
- b, compare = sortByLength(b, compare)
- cnt := popcntOrSlice(b.set, compare.set)
- if len(compare.set) > len(b.set) {
- cnt += popcntSlice(compare.set[len(b.set):])
- }
- return uint(cnt)
-}
-
-// InPlaceUnion creates the destructive union of base set and compare set.
-// This is the BitSet equivalent of | (or).
-func (b *BitSet) InPlaceUnion(compare *BitSet) {
- panicIfNull(b)
- panicIfNull(compare)
- l := int(compare.wordCount())
- if l > int(b.wordCount()) {
- l = int(b.wordCount())
- }
- if compare.length > 0 {
- b.extendSetMaybe(compare.length - 1)
- }
- for i := 0; i < l; i++ {
- b.set[i] |= compare.set[i]
- }
- if len(compare.set) > l {
- for i := l; i < len(compare.set); i++ {
- b.set[i] = compare.set[i]
- }
- }
-}
-
-// SymmetricDifference of base set and other set
-// This is the BitSet equivalent of ^ (xor)
-func (b *BitSet) SymmetricDifference(compare *BitSet) (result *BitSet) {
- panicIfNull(b)
- panicIfNull(compare)
- b, compare = sortByLength(b, compare)
- // compare is bigger, so clone it
- result = compare.Clone()
- for i, word := range b.set {
- result.set[i] = word ^ compare.set[i]
- }
- return
-}
-
-// SymmetricDifferenceCardinality computes the cardinality of the symmetric difference
-func (b *BitSet) SymmetricDifferenceCardinality(compare *BitSet) uint {
- panicIfNull(b)
- panicIfNull(compare)
- b, compare = sortByLength(b, compare)
- cnt := popcntXorSlice(b.set, compare.set)
- if len(compare.set) > len(b.set) {
- cnt += popcntSlice(compare.set[len(b.set):])
- }
- return uint(cnt)
-}
-
-// InPlaceSymmetricDifference creates the destructive SymmetricDifference of base set and other set
-// This is the BitSet equivalent of ^ (xor)
-func (b *BitSet) InPlaceSymmetricDifference(compare *BitSet) {
- panicIfNull(b)
- panicIfNull(compare)
- l := int(compare.wordCount())
- if l > int(b.wordCount()) {
- l = int(b.wordCount())
- }
- if compare.length > 0 {
- b.extendSetMaybe(compare.length - 1)
- }
- for i := 0; i < l; i++ {
- b.set[i] ^= compare.set[i]
- }
- if len(compare.set) > l {
- for i := l; i < len(compare.set); i++ {
- b.set[i] = compare.set[i]
- }
- }
-}
-
-// Is the length an exact multiple of word sizes?
-func (b *BitSet) isLenExactMultiple() bool {
- return b.length%wordSize == 0
-}
-
-// Clean last word by setting unused bits to 0
-func (b *BitSet) cleanLastWord() {
- if !b.isLenExactMultiple() {
- b.set[len(b.set)-1] &= allBits >> (wordSize - b.length%wordSize)
- }
-}
-
-// Complement computes the (local) complement of a biset (up to length bits)
-func (b *BitSet) Complement() (result *BitSet) {
- panicIfNull(b)
- result = New(b.length)
- for i, word := range b.set {
- result.set[i] = ^word
- }
- result.cleanLastWord()
- return
-}
-
-// All returns true if all bits are set, false otherwise. Returns true for
-// empty sets.
-func (b *BitSet) All() bool {
- panicIfNull(b)
- return b.Count() == b.length
-}
-
-// None returns true if no bit is set, false otherwise. Returns true for
-// empty sets.
-func (b *BitSet) None() bool {
- panicIfNull(b)
- if b != nil && b.set != nil {
- for _, word := range b.set {
- if word > 0 {
- return false
- }
- }
- return true
- }
- return true
-}
-
-// Any returns true if any bit is set, false otherwise
-func (b *BitSet) Any() bool {
- panicIfNull(b)
- return !b.None()
-}
-
-// IsSuperSet returns true if this is a superset of the other set
-func (b *BitSet) IsSuperSet(other *BitSet) bool {
- for i, e := other.NextSet(0); e; i, e = other.NextSet(i + 1) {
- if !b.Test(i) {
- return false
- }
- }
- return true
-}
-
-// IsStrictSuperSet returns true if this is a strict superset of the other set
-func (b *BitSet) IsStrictSuperSet(other *BitSet) bool {
- return b.Count() > other.Count() && b.IsSuperSet(other)
-}
-
-// DumpAsBits dumps a bit set as a string of bits
-func (b *BitSet) DumpAsBits() string {
- if b.set == nil {
- return "."
- }
- buffer := bytes.NewBufferString("")
- i := len(b.set) - 1
- for ; i >= 0; i-- {
- fmt.Fprintf(buffer, "%064b.", b.set[i])
- }
- return buffer.String()
-}
-
-// BinaryStorageSize returns the binary storage requirements
-func (b *BitSet) BinaryStorageSize() int {
- return binary.Size(uint64(0)) + binary.Size(b.set)
-}
-
-// WriteTo writes a BitSet to a stream
-func (b *BitSet) WriteTo(stream io.Writer) (int64, error) {
- length := uint64(b.length)
-
- // Write length
- err := binary.Write(stream, binaryOrder, length)
- if err != nil {
- return 0, err
- }
-
- // Write set
- err = binary.Write(stream, binaryOrder, b.set)
- return int64(b.BinaryStorageSize()), err
-}
-
-// ReadFrom reads a BitSet from a stream written using WriteTo
-func (b *BitSet) ReadFrom(stream io.Reader) (int64, error) {
- var length uint64
-
- // Read length first
- err := binary.Read(stream, binaryOrder, &length)
- if err != nil {
- return 0, err
- }
- newset := New(uint(length))
-
- if uint64(newset.length) != length {
- return 0, errors.New("unmarshalling error: type mismatch")
- }
-
- // Read remaining bytes as set
- err = binary.Read(stream, binaryOrder, newset.set)
- if err != nil {
- return 0, err
- }
-
- *b = *newset
- return int64(b.BinaryStorageSize()), nil
-}
-
-// MarshalBinary encodes a BitSet into a binary form and returns the result.
-func (b *BitSet) MarshalBinary() ([]byte, error) {
- var buf bytes.Buffer
- writer := bufio.NewWriter(&buf)
-
- _, err := b.WriteTo(writer)
- if err != nil {
- return []byte{}, err
- }
-
- err = writer.Flush()
-
- return buf.Bytes(), err
-}
-
-// UnmarshalBinary decodes the binary form generated by MarshalBinary.
-func (b *BitSet) UnmarshalBinary(data []byte) error {
- buf := bytes.NewReader(data)
- reader := bufio.NewReader(buf)
-
- _, err := b.ReadFrom(reader)
-
- return err
-}
-
-// MarshalJSON marshals a BitSet as a JSON structure
-func (b *BitSet) MarshalJSON() ([]byte, error) {
- buffer := bytes.NewBuffer(make([]byte, 0, b.BinaryStorageSize()))
- _, err := b.WriteTo(buffer)
- if err != nil {
- return nil, err
- }
-
- // URLEncode all bytes
- return json.Marshal(base64Encoding.EncodeToString(buffer.Bytes()))
-}
-
-// UnmarshalJSON unmarshals a BitSet from JSON created using MarshalJSON
-func (b *BitSet) UnmarshalJSON(data []byte) error {
- // Unmarshal as string
- var s string
- err := json.Unmarshal(data, &s)
- if err != nil {
- return err
- }
-
- // URLDecode string
- buf, err := base64Encoding.DecodeString(s)
- if err != nil {
- return err
- }
-
- _, err = b.ReadFrom(bytes.NewReader(buf))
- return err
-}
diff --git a/vendor/github.com/bits-and-blooms/bitset/go.mod b/vendor/github.com/bits-and-blooms/bitset/go.mod
deleted file mode 100644
index c43e4522b7f..00000000000
--- a/vendor/github.com/bits-and-blooms/bitset/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/bits-and-blooms/bitset
-
-go 1.14
diff --git a/vendor/github.com/bits-and-blooms/bitset/go.sum b/vendor/github.com/bits-and-blooms/bitset/go.sum
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt.go b/vendor/github.com/bits-and-blooms/bitset/popcnt.go
deleted file mode 100644
index 76577a83828..00000000000
--- a/vendor/github.com/bits-and-blooms/bitset/popcnt.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package bitset
-
-// bit population count, take from
-// https://code.google.com/p/go/issues/detail?id=4988#c11
-// credit: https://code.google.com/u/arnehormann/
-func popcount(x uint64) (n uint64) {
- x -= (x >> 1) & 0x5555555555555555
- x = (x>>2)&0x3333333333333333 + x&0x3333333333333333
- x += x >> 4
- x &= 0x0f0f0f0f0f0f0f0f
- x *= 0x0101010101010101
- return x >> 56
-}
-
-func popcntSliceGo(s []uint64) uint64 {
- cnt := uint64(0)
- for _, x := range s {
- cnt += popcount(x)
- }
- return cnt
-}
-
-func popcntMaskSliceGo(s, m []uint64) uint64 {
- cnt := uint64(0)
- for i := range s {
- cnt += popcount(s[i] &^ m[i])
- }
- return cnt
-}
-
-func popcntAndSliceGo(s, m []uint64) uint64 {
- cnt := uint64(0)
- for i := range s {
- cnt += popcount(s[i] & m[i])
- }
- return cnt
-}
-
-func popcntOrSliceGo(s, m []uint64) uint64 {
- cnt := uint64(0)
- for i := range s {
- cnt += popcount(s[i] | m[i])
- }
- return cnt
-}
-
-func popcntXorSliceGo(s, m []uint64) uint64 {
- cnt := uint64(0)
- for i := range s {
- cnt += popcount(s[i] ^ m[i])
- }
- return cnt
-}
diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go
deleted file mode 100644
index fc8ff4f367c..00000000000
--- a/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// +build go1.9
-
-package bitset
-
-import "math/bits"
-
-func popcntSlice(s []uint64) uint64 {
- var cnt int
- for _, x := range s {
- cnt += bits.OnesCount64(x)
- }
- return uint64(cnt)
-}
-
-func popcntMaskSlice(s, m []uint64) uint64 {
- var cnt int
- for i := range s {
- cnt += bits.OnesCount64(s[i] &^ m[i])
- }
- return uint64(cnt)
-}
-
-func popcntAndSlice(s, m []uint64) uint64 {
- var cnt int
- for i := range s {
- cnt += bits.OnesCount64(s[i] & m[i])
- }
- return uint64(cnt)
-}
-
-func popcntOrSlice(s, m []uint64) uint64 {
- var cnt int
- for i := range s {
- cnt += bits.OnesCount64(s[i] | m[i])
- }
- return uint64(cnt)
-}
-
-func popcntXorSlice(s, m []uint64) uint64 {
- var cnt int
- for i := range s {
- cnt += bits.OnesCount64(s[i] ^ m[i])
- }
- return uint64(cnt)
-}
diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go
deleted file mode 100644
index 4cf64f24ad0..00000000000
--- a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// +build !go1.9
-// +build amd64,!appengine
-
-package bitset
-
-// *** the following functions are defined in popcnt_amd64.s
-
-//go:noescape
-
-func hasAsm() bool
-
-// useAsm is a flag used to select the GO or ASM implementation of the popcnt function
-var useAsm = hasAsm()
-
-//go:noescape
-
-func popcntSliceAsm(s []uint64) uint64
-
-//go:noescape
-
-func popcntMaskSliceAsm(s, m []uint64) uint64
-
-//go:noescape
-
-func popcntAndSliceAsm(s, m []uint64) uint64
-
-//go:noescape
-
-func popcntOrSliceAsm(s, m []uint64) uint64
-
-//go:noescape
-
-func popcntXorSliceAsm(s, m []uint64) uint64
-
-func popcntSlice(s []uint64) uint64 {
- if useAsm {
- return popcntSliceAsm(s)
- }
- return popcntSliceGo(s)
-}
-
-func popcntMaskSlice(s, m []uint64) uint64 {
- if useAsm {
- return popcntMaskSliceAsm(s, m)
- }
- return popcntMaskSliceGo(s, m)
-}
-
-func popcntAndSlice(s, m []uint64) uint64 {
- if useAsm {
- return popcntAndSliceAsm(s, m)
- }
- return popcntAndSliceGo(s, m)
-}
-
-func popcntOrSlice(s, m []uint64) uint64 {
- if useAsm {
- return popcntOrSliceAsm(s, m)
- }
- return popcntOrSliceGo(s, m)
-}
-
-func popcntXorSlice(s, m []uint64) uint64 {
- if useAsm {
- return popcntXorSliceAsm(s, m)
- }
- return popcntXorSliceGo(s, m)
-}
diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s
deleted file mode 100644
index 666c0dcc17f..00000000000
--- a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s
+++ /dev/null
@@ -1,104 +0,0 @@
-// +build !go1.9
-// +build amd64,!appengine
-
-TEXT ·hasAsm(SB),4,$0-1
-MOVQ $1, AX
-CPUID
-SHRQ $23, CX
-ANDQ $1, CX
-MOVB CX, ret+0(FP)
-RET
-
-#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2
-
-TEXT ·popcntSliceAsm(SB),4,$0-32
-XORQ AX, AX
-MOVQ s+0(FP), SI
-MOVQ s_len+8(FP), CX
-TESTQ CX, CX
-JZ popcntSliceEnd
-popcntSliceLoop:
-BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX
-ADDQ DX, AX
-ADDQ $8, SI
-LOOP popcntSliceLoop
-popcntSliceEnd:
-MOVQ AX, ret+24(FP)
-RET
-
-TEXT ·popcntMaskSliceAsm(SB),4,$0-56
-XORQ AX, AX
-MOVQ s+0(FP), SI
-MOVQ s_len+8(FP), CX
-TESTQ CX, CX
-JZ popcntMaskSliceEnd
-MOVQ m+24(FP), DI
-popcntMaskSliceLoop:
-MOVQ (DI), DX
-NOTQ DX
-ANDQ (SI), DX
-POPCNTQ_DX_DX
-ADDQ DX, AX
-ADDQ $8, SI
-ADDQ $8, DI
-LOOP popcntMaskSliceLoop
-popcntMaskSliceEnd:
-MOVQ AX, ret+48(FP)
-RET
-
-TEXT ·popcntAndSliceAsm(SB),4,$0-56
-XORQ AX, AX
-MOVQ s+0(FP), SI
-MOVQ s_len+8(FP), CX
-TESTQ CX, CX
-JZ popcntAndSliceEnd
-MOVQ m+24(FP), DI
-popcntAndSliceLoop:
-MOVQ (DI), DX
-ANDQ (SI), DX
-POPCNTQ_DX_DX
-ADDQ DX, AX
-ADDQ $8, SI
-ADDQ $8, DI
-LOOP popcntAndSliceLoop
-popcntAndSliceEnd:
-MOVQ AX, ret+48(FP)
-RET
-
-TEXT ·popcntOrSliceAsm(SB),4,$0-56
-XORQ AX, AX
-MOVQ s+0(FP), SI
-MOVQ s_len+8(FP), CX
-TESTQ CX, CX
-JZ popcntOrSliceEnd
-MOVQ m+24(FP), DI
-popcntOrSliceLoop:
-MOVQ (DI), DX
-ORQ (SI), DX
-POPCNTQ_DX_DX
-ADDQ DX, AX
-ADDQ $8, SI
-ADDQ $8, DI
-LOOP popcntOrSliceLoop
-popcntOrSliceEnd:
-MOVQ AX, ret+48(FP)
-RET
-
-TEXT ·popcntXorSliceAsm(SB),4,$0-56
-XORQ AX, AX
-MOVQ s+0(FP), SI
-MOVQ s_len+8(FP), CX
-TESTQ CX, CX
-JZ popcntXorSliceEnd
-MOVQ m+24(FP), DI
-popcntXorSliceLoop:
-MOVQ (DI), DX
-XORQ (SI), DX
-POPCNTQ_DX_DX
-ADDQ DX, AX
-ADDQ $8, SI
-ADDQ $8, DI
-LOOP popcntXorSliceLoop
-popcntXorSliceEnd:
-MOVQ AX, ret+48(FP)
-RET
diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go
deleted file mode 100644
index 21e0ff7b4fc..00000000000
--- a/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// +build !go1.9
-// +build !amd64 appengine
-
-package bitset
-
-func popcntSlice(s []uint64) uint64 {
- return popcntSliceGo(s)
-}
-
-func popcntMaskSlice(s, m []uint64) uint64 {
- return popcntMaskSliceGo(s, m)
-}
-
-func popcntAndSlice(s, m []uint64) uint64 {
- return popcntAndSliceGo(s, m)
-}
-
-func popcntOrSlice(s, m []uint64) uint64 {
- return popcntOrSliceGo(s, m)
-}
-
-func popcntXorSlice(s, m []uint64) uint64 {
- return popcntXorSliceGo(s, m)
-}
diff --git a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go
deleted file mode 100644
index c52b61be9fc..00000000000
--- a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build !go1.9
-
-package bitset
-
-var deBruijn = [...]byte{
- 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
- 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
- 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
- 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
-}
-
-func trailingZeroes64(v uint64) uint {
- return uint(deBruijn[((v&-v)*0x03f79d71b4ca8b09)>>58])
-}
diff --git a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go
deleted file mode 100644
index 36a988e714d..00000000000
--- a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build go1.9
-
-package bitset
-
-import "math/bits"
-
-func trailingZeroes64(v uint64) uint {
- return uint(bits.TrailingZeros64(v))
-}
diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml
deleted file mode 100644
index c516ea88da7..00000000000
--- a/vendor/github.com/cespare/xxhash/v2/.travis.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-language: go
-go:
- - "1.x"
- - master
-env:
- - TAGS=""
- - TAGS="-tags purego"
-script: go test $TAGS -v ./...
diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
deleted file mode 100644
index 24b53065f40..00000000000
--- a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Copyright (c) 2016 Caleb Spare
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
deleted file mode 100644
index 2fd8693c21b..00000000000
--- a/vendor/github.com/cespare/xxhash/v2/README.md
+++ /dev/null
@@ -1,67 +0,0 @@
-# xxhash
-
-[](https://godoc.org/github.com/cespare/xxhash)
-[](https://travis-ci.org/cespare/xxhash)
-
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
-high-quality hashing algorithm that is much faster than anything in the Go
-standard library.
-
-This package provides a straightforward API:
-
-```
-func Sum64(b []byte) uint64
-func Sum64String(s string) uint64
-type Digest struct{ ... }
- func New() *Digest
-```
-
-The `Digest` type implements hash.Hash64. Its key methods are:
-
-```
-func (*Digest) Write([]byte) (int, error)
-func (*Digest) WriteString(string) (int, error)
-func (*Digest) Sum64() uint64
-```
-
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
-
-## Compatibility
-
-This package is in a module and the latest code is in version 2 of the module.
-You need a version of Go with at least "minimal module compatibility" to use
-github.com/cespare/xxhash/v2:
-
-* 1.9.7+ for Go 1.9
-* 1.10.3+ for Go 1.10
-* Go 1.11 or later
-
-I recommend using the latest release of Go.
-
-## Benchmarks
-
-Here are some quick benchmarks comparing the pure-Go and assembly
-implementations of Sum64.
-
-| input size | purego | asm |
-| --- | --- | --- |
-| 5 B | 979.66 MB/s | 1291.17 MB/s |
-| 100 B | 7475.26 MB/s | 7973.40 MB/s |
-| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
-
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
-the following commands under Go 1.11.2:
-
-```
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
-```
-
-## Projects using this package
-
-- [InfluxDB](https://github.com/influxdata/influxdb)
-- [Prometheus](https://github.com/prometheus/prometheus)
-- [FreeCache](https://github.com/coocood/freecache)
diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod
deleted file mode 100644
index 49f67608bf6..00000000000
--- a/vendor/github.com/cespare/xxhash/v2/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/cespare/xxhash/v2
-
-go 1.11
diff --git a/vendor/github.com/cespare/xxhash/v2/go.sum b/vendor/github.com/cespare/xxhash/v2/go.sum
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
deleted file mode 100644
index db0b35fbe39..00000000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
-// at http://cyan4973.github.io/xxHash/.
-package xxhash
-
-import (
- "encoding/binary"
- "errors"
- "math/bits"
-)
-
-const (
- prime1 uint64 = 11400714785074694791
- prime2 uint64 = 14029467366897019727
- prime3 uint64 = 1609587929392839161
- prime4 uint64 = 9650029242287828579
- prime5 uint64 = 2870177450012600261
-)
-
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
- prime1v = prime1
- prime2v = prime2
- prime3v = prime3
- prime4v = prime4
- prime5v = prime5
-)
-
-// Digest implements hash.Hash64.
-type Digest struct {
- v1 uint64
- v2 uint64
- v3 uint64
- v4 uint64
- total uint64
- mem [32]byte
- n int // how much of mem is used
-}
-
-// New creates a new Digest that computes the 64-bit xxHash algorithm.
-func New() *Digest {
- var d Digest
- d.Reset()
- return &d
-}
-
-// Reset clears the Digest's state so that it can be reused.
-func (d *Digest) Reset() {
- d.v1 = prime1v + prime2
- d.v2 = prime2
- d.v3 = 0
- d.v4 = -prime1v
- d.total = 0
- d.n = 0
-}
-
-// Size always returns 8 bytes.
-func (d *Digest) Size() int { return 8 }
-
-// BlockSize always returns 32 bytes.
-func (d *Digest) BlockSize() int { return 32 }
-
-// Write adds more data to d. It always returns len(b), nil.
-func (d *Digest) Write(b []byte) (n int, err error) {
- n = len(b)
- d.total += uint64(n)
-
- if d.n+n < 32 {
- // This new data doesn't even fill the current block.
- copy(d.mem[d.n:], b)
- d.n += n
- return
- }
-
- if d.n > 0 {
- // Finish off the partial block.
- copy(d.mem[d.n:], b)
- d.v1 = round(d.v1, u64(d.mem[0:8]))
- d.v2 = round(d.v2, u64(d.mem[8:16]))
- d.v3 = round(d.v3, u64(d.mem[16:24]))
- d.v4 = round(d.v4, u64(d.mem[24:32]))
- b = b[32-d.n:]
- d.n = 0
- }
-
- if len(b) >= 32 {
- // One or more full blocks left.
- nw := writeBlocks(d, b)
- b = b[nw:]
- }
-
- // Store any remaining partial block.
- copy(d.mem[:], b)
- d.n = len(b)
-
- return
-}
-
-// Sum appends the current hash to b and returns the resulting slice.
-func (d *Digest) Sum(b []byte) []byte {
- s := d.Sum64()
- return append(
- b,
- byte(s>>56),
- byte(s>>48),
- byte(s>>40),
- byte(s>>32),
- byte(s>>24),
- byte(s>>16),
- byte(s>>8),
- byte(s),
- )
-}
-
-// Sum64 returns the current hash.
-func (d *Digest) Sum64() uint64 {
- var h uint64
-
- if d.total >= 32 {
- v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
- h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
- h = mergeRound(h, v1)
- h = mergeRound(h, v2)
- h = mergeRound(h, v3)
- h = mergeRound(h, v4)
- } else {
- h = d.v3 + prime5
- }
-
- h += d.total
-
- i, end := 0, d.n
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(d.mem[i:i+8]))
- h ^= k1
- h = rol27(h)*prime1 + prime4
- }
- if i+4 <= end {
- h ^= uint64(u32(d.mem[i:i+4])) * prime1
- h = rol23(h)*prime2 + prime3
- i += 4
- }
- for i < end {
- h ^= uint64(d.mem[i]) * prime5
- h = rol11(h) * prime1
- i++
- }
-
- h ^= h >> 33
- h *= prime2
- h ^= h >> 29
- h *= prime3
- h ^= h >> 32
-
- return h
-}
-
-const (
- magic = "xxh\x06"
- marshaledSize = len(magic) + 8*5 + 32
-)
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-func (d *Digest) MarshalBinary() ([]byte, error) {
- b := make([]byte, 0, marshaledSize)
- b = append(b, magic...)
- b = appendUint64(b, d.v1)
- b = appendUint64(b, d.v2)
- b = appendUint64(b, d.v3)
- b = appendUint64(b, d.v4)
- b = appendUint64(b, d.total)
- b = append(b, d.mem[:d.n]...)
- b = b[:len(b)+len(d.mem)-d.n]
- return b, nil
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-func (d *Digest) UnmarshalBinary(b []byte) error {
- if len(b) < len(magic) || string(b[:len(magic)]) != magic {
- return errors.New("xxhash: invalid hash state identifier")
- }
- if len(b) != marshaledSize {
- return errors.New("xxhash: invalid hash state size")
- }
- b = b[len(magic):]
- b, d.v1 = consumeUint64(b)
- b, d.v2 = consumeUint64(b)
- b, d.v3 = consumeUint64(b)
- b, d.v4 = consumeUint64(b)
- b, d.total = consumeUint64(b)
- copy(d.mem[:], b)
- b = b[len(d.mem):]
- d.n = int(d.total % uint64(len(d.mem)))
- return nil
-}
-
-func appendUint64(b []byte, x uint64) []byte {
- var a [8]byte
- binary.LittleEndian.PutUint64(a[:], x)
- return append(b, a[:]...)
-}
-
-func consumeUint64(b []byte) ([]byte, uint64) {
- x := u64(b)
- return b[8:], x
-}
-
-func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
-func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
-
-func round(acc, input uint64) uint64 {
- acc += input * prime2
- acc = rol31(acc)
- acc *= prime1
- return acc
-}
-
-func mergeRound(acc, val uint64) uint64 {
- val = round(0, val)
- acc ^= val
- acc = acc*prime1 + prime4
- return acc
-}
-
-func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
-func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
-func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
-func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
-func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
-func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
-func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
-func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
deleted file mode 100644
index d580e32aed4..00000000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
+++ /dev/null
@@ -1,215 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !purego
-
-#include "textflag.h"
-
-// Register allocation:
-// AX h
-// CX pointer to advance through b
-// DX n
-// BX loop end
-// R8 v1, k1
-// R9 v2
-// R10 v3
-// R11 v4
-// R12 tmp
-// R13 prime1v
-// R14 prime2v
-// R15 prime4v
-
-// round reads from and advances the buffer pointer in CX.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
- MOVQ (CX), R12 \
- ADDQ $8, CX \
- IMULQ R14, R12 \
- ADDQ R12, r \
- ROLQ $31, r \
- IMULQ R13, r
-
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
-#define mergeRound(acc, val) \
- IMULQ R14, val \
- ROLQ $31, val \
- IMULQ R13, val \
- XORQ val, acc \
- IMULQ R13, acc \
- ADDQ R15, acc
-
-// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
- // Load fixed primes.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), R15
-
- // Load slice.
- MOVQ b_base+0(FP), CX
- MOVQ b_len+8(FP), DX
- LEAQ (CX)(DX*1), BX
-
- // The first loop limit will be len(b)-32.
- SUBQ $32, BX
-
- // Check whether we have at least one block.
- CMPQ DX, $32
- JLT noBlocks
-
- // Set up initial state (v1, v2, v3, v4).
- MOVQ R13, R8
- ADDQ R14, R8
- MOVQ R14, R9
- XORQ R10, R10
- XORQ R11, R11
- SUBQ R13, R11
-
- // Loop until CX > BX.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ CX, BX
- JLE blockLoop
-
- MOVQ R8, AX
- ROLQ $1, AX
- MOVQ R9, R12
- ROLQ $7, R12
- ADDQ R12, AX
- MOVQ R10, R12
- ROLQ $12, R12
- ADDQ R12, AX
- MOVQ R11, R12
- ROLQ $18, R12
- ADDQ R12, AX
-
- mergeRound(AX, R8)
- mergeRound(AX, R9)
- mergeRound(AX, R10)
- mergeRound(AX, R11)
-
- JMP afterBlocks
-
-noBlocks:
- MOVQ ·prime5v(SB), AX
-
-afterBlocks:
- ADDQ DX, AX
-
- // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
- ADDQ $24, BX
-
- CMPQ CX, BX
- JG fourByte
-
-wordLoop:
- // Calculate k1.
- MOVQ (CX), R8
- ADDQ $8, CX
- IMULQ R14, R8
- ROLQ $31, R8
- IMULQ R13, R8
-
- XORQ R8, AX
- ROLQ $27, AX
- IMULQ R13, AX
- ADDQ R15, AX
-
- CMPQ CX, BX
- JLE wordLoop
-
-fourByte:
- ADDQ $4, BX
- CMPQ CX, BX
- JG singles
-
- MOVL (CX), R8
- ADDQ $4, CX
- IMULQ R13, R8
- XORQ R8, AX
-
- ROLQ $23, AX
- IMULQ R14, AX
- ADDQ ·prime3v(SB), AX
-
-singles:
- ADDQ $4, BX
- CMPQ CX, BX
- JGE finalize
-
-singlesLoop:
- MOVBQZX (CX), R12
- ADDQ $1, CX
- IMULQ ·prime5v(SB), R12
- XORQ R12, AX
-
- ROLQ $11, AX
- IMULQ R13, AX
-
- CMPQ CX, BX
- JL singlesLoop
-
-finalize:
- MOVQ AX, R12
- SHRQ $33, R12
- XORQ R12, AX
- IMULQ R14, AX
- MOVQ AX, R12
- SHRQ $29, R12
- XORQ R12, AX
- IMULQ ·prime3v(SB), AX
- MOVQ AX, R12
- SHRQ $32, R12
- XORQ R12, AX
-
- MOVQ AX, ret+24(FP)
- RET
-
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the d pointer.
-
-// func writeBlocks(d *Digest, b []byte) int
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
- // Load fixed primes needed for round.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
-
- // Load slice.
- MOVQ b_base+8(FP), CX
- MOVQ b_len+16(FP), DX
- LEAQ (CX)(DX*1), BX
- SUBQ $32, BX
-
- // Load vN from d.
- MOVQ d+0(FP), AX
- MOVQ 0(AX), R8 // v1
- MOVQ 8(AX), R9 // v2
- MOVQ 16(AX), R10 // v3
- MOVQ 24(AX), R11 // v4
-
- // We don't need to check the loop condition here; this function is
- // always called with at least one block of data to process.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ CX, BX
- JLE blockLoop
-
- // Copy vN back to d.
- MOVQ R8, 0(AX)
- MOVQ R9, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R11, 24(AX)
-
- // The number of bytes written is CX minus the old base pointer.
- SUBQ b_base+8(FP), CX
- MOVQ CX, ret+32(FP)
-
- RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
deleted file mode 100644
index 4a5a821603e..00000000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// +build !amd64 appengine !gc purego
-
-package xxhash
-
-// Sum64 computes the 64-bit xxHash digest of b.
-func Sum64(b []byte) uint64 {
- // A simpler version would be
- // d := New()
- // d.Write(b)
- // return d.Sum64()
- // but this is faster, particularly for small inputs.
-
- n := len(b)
- var h uint64
-
- if n >= 32 {
- v1 := prime1v + prime2
- v2 := prime2
- v3 := uint64(0)
- v4 := -prime1v
- for len(b) >= 32 {
- v1 = round(v1, u64(b[0:8:len(b)]))
- v2 = round(v2, u64(b[8:16:len(b)]))
- v3 = round(v3, u64(b[16:24:len(b)]))
- v4 = round(v4, u64(b[24:32:len(b)]))
- b = b[32:len(b):len(b)]
- }
- h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
- h = mergeRound(h, v1)
- h = mergeRound(h, v2)
- h = mergeRound(h, v3)
- h = mergeRound(h, v4)
- } else {
- h = prime5
- }
-
- h += uint64(n)
-
- i, end := 0, len(b)
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(b[i:i+8:len(b)]))
- h ^= k1
- h = rol27(h)*prime1 + prime4
- }
- if i+4 <= end {
- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
- h = rol23(h)*prime2 + prime3
- i += 4
- }
- for ; i < end; i++ {
- h ^= uint64(b[i]) * prime5
- h = rol11(h) * prime1
- }
-
- h ^= h >> 33
- h *= prime2
- h ^= h >> 29
- h *= prime3
- h ^= h >> 32
-
- return h
-}
-
-func writeBlocks(d *Digest, b []byte) int {
- v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
- n := len(b)
- for len(b) >= 32 {
- v1 = round(v1, u64(b[0:8:len(b)]))
- v2 = round(v2, u64(b[8:16:len(b)]))
- v3 = round(v3, u64(b[16:24:len(b)]))
- v4 = round(v4, u64(b[24:32:len(b)]))
- b = b[32:len(b):len(b)]
- }
- d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
- return n - len(b)
-}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
deleted file mode 100644
index fc9bea7a31f..00000000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build appengine
-
-// This file contains the safe implementations of otherwise unsafe-using code.
-
-package xxhash
-
-// Sum64String computes the 64-bit xxHash digest of s.
-func Sum64String(s string) uint64 {
- return Sum64([]byte(s))
-}
-
-// WriteString adds more data to d. It always returns len(s), nil.
-func (d *Digest) WriteString(s string) (n int, err error) {
- return d.Write([]byte(s))
-}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
deleted file mode 100644
index 53bf76efbc2..00000000000
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// +build !appengine
-
-// This file encapsulates usage of unsafe.
-// xxhash_safe.go contains the safe implementations.
-
-package xxhash
-
-import (
- "reflect"
- "unsafe"
-)
-
-// Notes:
-//
-// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
-// for some discussion about these unsafe conversions.
-//
-// In the future it's possible that compiler optimizations will make these
-// unsafe operations unnecessary: https://golang.org/issue/2205.
-//
-// Both of these wrapper functions still incur function call overhead since they
-// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
-// for strings to squeeze out a bit more speed. Mid-stack inlining should
-// eventually fix this.
-
-// Sum64String computes the 64-bit xxHash digest of s.
-// It may be faster than Sum64([]byte(s)) by avoiding a copy.
-func Sum64String(s string) uint64 {
- var b []byte
- bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
- bh.Len = len(s)
- bh.Cap = len(s)
- return Sum64(b)
-}
-
-// WriteString adds more data to d. It always returns len(s), nil.
-// It may be faster than Write([]byte(s)) by avoiding a copy.
-func (d *Digest) WriteString(s string) (n int, err error) {
- var b []byte
- bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
- bh.Len = len(s)
- bh.Cap = len(s)
- return d.Write(b)
-}
diff --git a/vendor/github.com/chzyer/readline/README.md b/vendor/github.com/chzyer/readline/README.md
index fab974b7f34..4b0a5ff5818 100644
--- a/vendor/github.com/chzyer/readline/README.md
+++ b/vendor/github.com/chzyer/readline/README.md
@@ -11,7 +11,7 @@
-A powerful readline library in `Linux` `macOS` `Windows` `Solaris`
+A powerful readline library in `Linux` `macOS` `Windows` `Solaris` `AIX`
## Guide
diff --git a/vendor/github.com/chzyer/readline/go.mod b/vendor/github.com/chzyer/readline/go.mod
new file mode 100644
index 00000000000..66180f6b1f6
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/go.mod
@@ -0,0 +1,10 @@
+module github.com/chzyer/readline
+
+go 1.15
+
+require (
+ github.com/chzyer/test v1.0.0
+ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5
+)
+
+require github.com/chzyer/logex v1.2.1
diff --git a/vendor/github.com/chzyer/readline/go.sum b/vendor/github.com/chzyer/readline/go.sum
new file mode 100644
index 00000000000..2358df08830
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/go.sum
@@ -0,0 +1,6 @@
+github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
+github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
+github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
+github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
+golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5 h1:y/woIyUBFbpQGKS0u1aHF/40WUDnek3fPOyD08H5Vng=
+golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
diff --git a/vendor/github.com/chzyer/readline/operation.go b/vendor/github.com/chzyer/readline/operation.go
index 4c31624f806..b60939a91f1 100644
--- a/vendor/github.com/chzyer/readline/operation.go
+++ b/vendor/github.com/chzyer/readline/operation.go
@@ -109,10 +109,12 @@ func (o *Operation) ioloop() {
keepInSearchMode := false
keepInCompleteMode := false
r := o.t.ReadRune()
+
if o.GetConfig().FuncFilterInputRune != nil {
var process bool
r, process = o.GetConfig().FuncFilterInputRune(r)
if !process {
+ o.t.KickRead()
o.buf.Refresh(nil) // to refresh the line
continue // ignore this rune
}
@@ -434,6 +436,10 @@ func (o *Operation) Slice() ([]byte, error) {
}
func (o *Operation) Close() {
+ select {
+ case o.errchan <- io.EOF:
+ default:
+ }
o.history.Close()
}
diff --git a/vendor/github.com/chzyer/readline/readline.go b/vendor/github.com/chzyer/readline/readline.go
index 0e7aca06d5a..63b9171012e 100644
--- a/vendor/github.com/chzyer/readline/readline.go
+++ b/vendor/github.com/chzyer/readline/readline.go
@@ -17,7 +17,9 @@
//
package readline
-import "io"
+import (
+ "io"
+)
type Instance struct {
Config *Config
@@ -270,14 +272,24 @@ func (i *Instance) ReadSlice() ([]byte, error) {
}
// we must make sure that call Close() before process exit.
+// if there has a pending reading operation, that reading will be interrupted.
+// so you can capture the signal and call Instance.Close(), it's thread-safe.
func (i *Instance) Close() error {
+ i.Config.Stdin.Close()
+ i.Operation.Close()
if err := i.Terminal.Close(); err != nil {
return err
}
- i.Config.Stdin.Close()
- i.Operation.Close()
return nil
}
+
+// call CaptureExitSignal when you want readline exit gracefully.
+func (i *Instance) CaptureExitSignal() {
+ CaptureExitSignal(func() {
+ i.Close()
+ })
+}
+
func (i *Instance) Clean() {
i.Operation.Clean()
}
diff --git a/vendor/github.com/chzyer/readline/runebuf.go b/vendor/github.com/chzyer/readline/runebuf.go
index 81d2da50ccb..d95df1e36b6 100644
--- a/vendor/github.com/chzyer/readline/runebuf.go
+++ b/vendor/github.com/chzyer/readline/runebuf.go
@@ -35,7 +35,7 @@ type RuneBuffer struct {
sync.Mutex
}
-func (r* RuneBuffer) pushKill(text []rune) {
+func (r *RuneBuffer) pushKill(text []rune) {
r.lastKill = append([]rune{}, text...)
}
@@ -221,7 +221,7 @@ func (r *RuneBuffer) DeleteWord() {
}
for i := init + 1; i < len(r.buf); i++ {
if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) {
- r.pushKill(r.buf[r.idx:i-1])
+ r.pushKill(r.buf[r.idx : i-1])
r.Refresh(func() {
r.buf = append(r.buf[:r.idx], r.buf[i-1:]...)
})
@@ -350,7 +350,7 @@ func (r *RuneBuffer) Yank() {
return
}
r.Refresh(func() {
- buf := make([]rune, 0, len(r.buf) + len(r.lastKill))
+ buf := make([]rune, 0, len(r.buf)+len(r.lastKill))
buf = append(buf, r.buf[:r.idx]...)
buf = append(buf, r.lastKill...)
buf = append(buf, r.buf[r.idx:]...)
diff --git a/vendor/github.com/chzyer/readline/term.go b/vendor/github.com/chzyer/readline/term.go
index 133993ca8ea..ea5db9346e8 100644
--- a/vendor/github.com/chzyer/readline/term.go
+++ b/vendor/github.com/chzyer/readline/term.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd os400 solaris
// Package terminal provides support functions for dealing with terminals, as
// commonly found on UNIX systems.
diff --git a/vendor/github.com/chzyer/readline/term_solaris.go b/vendor/github.com/chzyer/readline/term_nosyscall6.go
similarity index 96%
rename from vendor/github.com/chzyer/readline/term_solaris.go
rename to vendor/github.com/chzyer/readline/term_nosyscall6.go
index 4c27273c7ab..df923393790 100644
--- a/vendor/github.com/chzyer/readline/term_solaris.go
+++ b/vendor/github.com/chzyer/readline/term_nosyscall6.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build solaris
+// +build aix os400 solaris
package readline
diff --git a/vendor/github.com/chzyer/readline/terminal.go b/vendor/github.com/chzyer/readline/terminal.go
index 1078631c14a..38413d0cf68 100644
--- a/vendor/github.com/chzyer/readline/terminal.go
+++ b/vendor/github.com/chzyer/readline/terminal.go
@@ -125,6 +125,7 @@ func (t *Terminal) ioloop() {
var (
isEscape bool
isEscapeEx bool
+ isEscapeSS3 bool
expectNextChar bool
)
@@ -152,9 +153,15 @@ func (t *Terminal) ioloop() {
if isEscape {
isEscape = false
if r == CharEscapeEx {
+ // ^][
expectNextChar = true
isEscapeEx = true
continue
+ } else if r == CharO {
+ // ^]O
+ expectNextChar = true
+ isEscapeSS3 = true
+ continue
}
r = escapeKey(r, buf)
} else if isEscapeEx {
@@ -177,6 +184,15 @@ func (t *Terminal) ioloop() {
expectNextChar = true
continue
}
+ } else if isEscapeSS3 {
+ isEscapeSS3 = false
+ if key := readEscKey(r, buf); key != nil {
+ r = escapeSS3Key(key)
+ }
+ if r == 0 {
+ expectNextChar = true
+ continue
+ }
}
expectNextChar = true
diff --git a/vendor/github.com/chzyer/readline/utils.go b/vendor/github.com/chzyer/readline/utils.go
index af4e005216f..0706dd4ec51 100644
--- a/vendor/github.com/chzyer/readline/utils.go
+++ b/vendor/github.com/chzyer/readline/utils.go
@@ -6,9 +6,11 @@ import (
"container/list"
"fmt"
"os"
+ "os/signal"
"strconv"
"strings"
"sync"
+ "syscall"
"time"
"unicode"
)
@@ -41,6 +43,7 @@ const (
CharCtrlY = 25
CharCtrlZ = 26
CharEsc = 27
+ CharO = 79
CharEscapeEx = 91
CharBackspace = 127
)
@@ -121,6 +124,27 @@ func escapeExKey(key *escapeKeyPair) rune {
return r
}
+// translate EscOX SS3 codes for up/down/etc.
+func escapeSS3Key(key *escapeKeyPair) rune {
+ var r rune
+ switch key.typ {
+ case 'D':
+ r = CharBackward
+ case 'C':
+ r = CharForward
+ case 'A':
+ r = CharPrev
+ case 'B':
+ r = CharNext
+ case 'H':
+ r = CharLineStart
+ case 'F':
+ r = CharLineEnd
+ default:
+ }
+ return r
+}
+
type escapeKeyPair struct {
attr string
typ rune
@@ -275,3 +299,13 @@ func Debug(o ...interface{}) {
fmt.Fprintln(f, o...)
f.Close()
}
+
+func CaptureExitSignal(f func()) {
+ cSignal := make(chan os.Signal, 1)
+ signal.Notify(cSignal, os.Interrupt, syscall.SIGTERM)
+ go func() {
+ for range cSignal {
+ f()
+ }
+ }()
+}
diff --git a/vendor/github.com/chzyer/readline/utils_unix.go b/vendor/github.com/chzyer/readline/utils_unix.go
index f88dac97bd7..fc49492326e 100644
--- a/vendor/github.com/chzyer/readline/utils_unix.go
+++ b/vendor/github.com/chzyer/readline/utils_unix.go
@@ -1,4 +1,4 @@
-// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd os400 solaris
package readline
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
index 708b2668990..0da3efe4c21 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
@@ -26,9 +26,10 @@ import (
"archive/tar"
"bytes"
"compress/gzip"
+ "context"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"os"
"path"
"runtime"
@@ -38,7 +39,6 @@ import (
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
"github.com/klauspost/compress/zstd"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
@@ -48,6 +48,7 @@ type options struct {
prioritizedFiles []string
missedPrioritizedFiles *[]string
compression Compression
+ ctx context.Context
}
type Option func(o *options) error
@@ -104,6 +105,14 @@ func WithCompression(compression Compression) Option {
}
}
+// WithContext specifies a context that can be used for clean canceleration.
+func WithContext(ctx context.Context) Option {
+ return func(o *options) error {
+ o.ctx = ctx
+ return nil
+ }
+}
+
// Blob is an eStargz blob.
type Blob struct {
io.ReadCloser
@@ -139,12 +148,29 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
opts.compression = newGzipCompressionWithLevel(opts.compressionLevel)
}
layerFiles := newTempFiles()
+ ctx := opts.ctx
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ done := make(chan struct{})
+ defer close(done)
+ go func() {
+ select {
+ case <-done:
+ // nop
+ case <-ctx.Done():
+ layerFiles.CleanupAll()
+ }
+ }()
defer func() {
if rErr != nil {
if err := layerFiles.CleanupAll(); err != nil {
- rErr = errors.Wrapf(rErr, "failed to cleanup tmp files: %v", err)
+ rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr)
}
}
+ if cErr := ctx.Err(); cErr != nil {
+ rErr = fmt.Errorf("error from context %q: %w", cErr, rErr)
+ }
}()
tarBlob, err := decompressBlob(tarBlob, layerFiles)
if err != nil {
@@ -307,7 +333,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri
// Import tar file.
intar, err := importTar(in)
if err != nil {
- return nil, errors.Wrap(err, "failed to sort")
+ return nil, fmt.Errorf("failed to sort: %w", err)
}
// Sort the tar file respecting to the prioritized files list.
@@ -318,7 +344,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri
*missedPrioritized = append(*missedPrioritized, l)
continue // allow not found
}
- return nil, errors.Wrap(err, "failed to sort tar entries")
+ return nil, fmt.Errorf("failed to sort tar entries: %w", err)
}
}
if len(prioritized) == 0 {
@@ -371,7 +397,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) {
tf := &tarFile{}
pw, err := newCountReader(in)
if err != nil {
- return nil, errors.Wrap(err, "failed to make position watcher")
+ return nil, fmt.Errorf("failed to make position watcher: %w", err)
}
tr := tar.NewReader(pw)
@@ -383,7 +409,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) {
if err == io.EOF {
break
} else {
- return nil, errors.Wrap(err, "failed to parse tar file")
+ return nil, fmt.Errorf("failed to parse tar file, %w", err)
}
}
switch cleanEntryName(h.Name) {
@@ -420,7 +446,7 @@ func moveRec(name string, in *tarFile, out *tarFile) error {
_, okIn := in.get(name)
_, okOut := out.get(name)
if !okIn && !okOut {
- return errors.Wrapf(errNotFound, "file: %q", name)
+ return fmt.Errorf("file: %q: %w", name, errNotFound)
}
parent, _ := path.Split(strings.TrimSuffix(name, "/"))
@@ -506,12 +532,13 @@ func newTempFiles() *tempFiles {
}
type tempFiles struct {
- files []*os.File
- filesMu sync.Mutex
+ files []*os.File
+ filesMu sync.Mutex
+ cleanupOnce sync.Once
}
func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
- f, err := ioutil.TempFile(dir, pattern)
+ f, err := os.CreateTemp(dir, pattern)
if err != nil {
return nil, err
}
@@ -521,7 +548,14 @@ func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
return f, nil
}
-func (tf *tempFiles) CleanupAll() error {
+func (tf *tempFiles) CleanupAll() (err error) {
+ tf.cleanupOnce.Do(func() {
+ err = tf.cleanupAll()
+ })
+ return
+}
+
+func (tf *tempFiles) cleanupAll() error {
tf.filesMu.Lock()
defer tf.filesMu.Unlock()
var allErr []error
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
index e997d9cceb3..921e59ec6ef 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
@@ -23,15 +23,14 @@
package estargz
import (
- "archive/tar"
"bufio"
"bytes"
"compress/gzip"
"crypto/sha256"
+ "errors"
"fmt"
"hash"
"io"
- "io/ioutil"
"os"
"path"
"sort"
@@ -41,7 +40,7 @@ import (
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
+ "github.com/vbatts/tar-split/archive/tar"
)
// A Reader permits random access reads from a stargz file.
@@ -95,10 +94,10 @@ func WithTelemetry(telemetry *Telemetry) OpenOption {
}
}
-// A func which takes start time and records the diff
+// MeasureLatencyHook is a func which takes start time and records the diff
type MeasureLatencyHook func(time.Time)
-// A struct which defines telemetry hooks. By implementing these hooks you should be able to record
+// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record
// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...)
type Telemetry struct {
GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds)
@@ -107,7 +106,7 @@ type Telemetry struct {
}
// Open opens a stargz file for reading.
-// The behaviour is configurable using options.
+// The behavior is configurable using options.
//
// Note that each entry name is normalized as the path that is relative to root.
func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
@@ -118,7 +117,7 @@ func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
}
}
- gzipCompressors := []Decompressor{new(GzipDecompressor), new(legacyGzipDecompressor)}
+ gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)}
decompressors := append(gzipCompressors, opts.decompressors...)
// Determine the size to fetch. Try to fetch as many bytes as possible.
@@ -146,7 +145,7 @@ func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
fSize := d.FooterSize()
fOffset := positive(int64(len(footer)) - fSize)
maybeTocBytes := footer[:fOffset]
- tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:])
+ _, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:])
if err != nil {
allErr = append(allErr, err)
continue
@@ -184,10 +183,10 @@ func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr e
return 0, 0, fmt.Errorf("error reading footer: %v", err)
}
var allErr []error
- for _, d := range []Decompressor{new(GzipDecompressor), new(legacyGzipDecompressor)} {
+ for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} {
fSize := d.FooterSize()
fOffset := positive(int64(len(footer)) - fSize)
- tocOffset, _, err := d.ParseFooter(footer[fOffset:])
+ _, tocOffset, _, err := d.ParseFooter(footer[fOffset:])
if err == nil {
return tocOffset, fSize, err
}
@@ -279,12 +278,12 @@ func (r *Reader) initFields() error {
pdir := r.getOrCreateDir(pdirName)
ent.NumLink++ // at least one name(ent.Name) references this entry.
if ent.Type == "hardlink" {
- if org, ok := r.m[cleanEntryName(ent.LinkName)]; ok {
- org.NumLink++ // original entry is referenced by this ent.Name.
- ent = org
- } else {
- return fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName)
+ org, err := r.getSource(ent)
+ if err != nil {
+ return err
}
+ org.NumLink++ // original entry is referenced by this ent.Name.
+ ent = org
}
pdir.addChild(path.Base(name), ent)
}
@@ -303,6 +302,20 @@ func (r *Reader) initFields() error {
return nil
}
+func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) {
+ if ent.Type == "hardlink" {
+ org, ok := r.m[cleanEntryName(ent.LinkName)]
+ if !ok {
+ return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName)
+ }
+ ent, err = r.getSource(org)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ent, nil
+}
+
func parentDir(p string) string {
dir, _ := path.Split(p)
return strings.TrimSuffix(dir, "/")
@@ -326,6 +339,10 @@ func (r *Reader) getOrCreateDir(d string) *TOCEntry {
return e
}
+func (r *Reader) TOCDigest() digest.Digest {
+ return r.tocDigest
+}
+
// VerifyTOC checks that the TOC JSON in the passed blob matches the
// passed digests and that the TOC JSON contains digests for all chunks
// contained in the blob. If the verification succceeds, this function
@@ -335,7 +352,12 @@ func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) {
if r.tocDigest != tocDigest {
return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest)
}
+ return r.Verifiers()
+}
+// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases
+// because this doesn't verify TOC.
+func (r *Reader) Verifiers() (TOCEntryVerifier, error) {
chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest
regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest
var chunkDigestMapIncomplete bool
@@ -362,8 +384,7 @@ func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) {
if e.Digest != "" {
d, err := digest.Parse(e.Digest)
if err != nil {
- return nil, errors.Wrapf(err,
- "failed to parse regular file digest %q", e.Digest)
+ return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err)
}
regDigestMap[e.Offset] = d
} else {
@@ -378,8 +399,7 @@ func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) {
if e.ChunkDigest != "" {
d, err := digest.Parse(e.ChunkDigest)
if err != nil {
- return nil, errors.Wrapf(err,
- "failed to parse chunk digest %q", e.ChunkDigest)
+ return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err)
}
chunkDigestMap[e.Offset] = d
} else {
@@ -455,7 +475,11 @@ func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) {
}
e, ok = r.m[path]
if ok && e.Type == "hardlink" {
- e, ok = r.m[e.LinkName]
+ var err error
+ e, err = r.getSource(e)
+ if err != nil {
+ return nil, false
+ }
}
return
}
@@ -554,7 +578,7 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err)
}
defer dr.Close()
- if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil {
+ if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil {
return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err)
}
return io.ReadFull(dr, p)
@@ -591,6 +615,11 @@ type currentCompressionWriter struct{ w *Writer }
func (ccw currentCompressionWriter) Write(p []byte) (int, error) {
ccw.w.diffHash.Write(p)
+ if ccw.w.gz == nil {
+ if err := ccw.w.condOpenGz(); err != nil {
+ return 0, err
+ }
+ }
return ccw.w.gz.Write(p)
}
@@ -601,6 +630,25 @@ func (w *Writer) chunkSize() int {
return w.ChunkSize
}
+// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob.
+// TOC JSON and footer are removed.
+func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
+ footerSize := c.FooterSize()
+ if sr.Size() < footerSize {
+ return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize)
+ }
+ footerOffset := sr.Size() - footerSize
+ footer := make([]byte, footerSize)
+ if _, err := sr.ReadAt(footer, footerOffset); err != nil {
+ return nil, err
+ }
+ blobPayloadSize, _, _, err := c.ParseFooter(footer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse footer: %w", err)
+ }
+ return c.Reader(io.LimitReader(sr, blobPayloadSize))
+}
+
// NewWriter returns a new stargz writer (gzip-based) writing to w.
//
// The writer must be closed to write its trailing table of contents.
@@ -616,7 +664,7 @@ func NewWriterLevel(w io.Writer, compressionLevel int) *Writer {
return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel))
}
-// NewWriterLevel returns a new stargz writer writing to w.
+// NewWriterWithCompressor returns a new stargz writer writing to w.
// The compression method is configurable.
//
// The writer must be closed to write its trailing table of contents.
@@ -696,29 +744,71 @@ func (w *Writer) condOpenGz() (err error) {
// each of its contents to w.
//
// The input r can optionally be gzip compressed but the output will
-// always be gzip compressed.
+// always be compressed by the specified compressor.
func (w *Writer) AppendTar(r io.Reader) error {
+ return w.appendTar(r, false)
+}
+
+// AppendTarLossLess reads the tar or tar.gz file from r and appends
+// each of its contents to w.
+//
+// The input r can optionally be gzip compressed but the output will
+// always be compressed by the specified compressor.
+//
+// The difference of this func with AppendTar is that this writes
+// the input tar stream into w without any modification (e.g. to header bytes).
+//
+// Note that if the input tar stream already contains TOC JSON, this returns
+// error because w cannot overwrite the TOC JSON to the one generated by w without
+// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz,
+// you shoud decompress it and remove TOC JSON in advance.
+func (w *Writer) AppendTarLossLess(r io.Reader) error {
+ return w.appendTar(r, true)
+}
+
+func (w *Writer) appendTar(r io.Reader, lossless bool) error {
+ var src io.Reader
br := bufio.NewReader(r)
- var tr *tar.Reader
if isGzip(br) {
- // NewReader can't fail if isGzip returned true.
zr, _ := gzip.NewReader(br)
- tr = tar.NewReader(zr)
+ src = zr
} else {
- tr = tar.NewReader(br)
+ src = io.Reader(br)
+ }
+ dst := currentCompressionWriter{w}
+ var tw *tar.Writer
+ if !lossless {
+ tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode.
+ }
+ tr := tar.NewReader(src)
+ if lossless {
+ tr.RawAccounting = true
}
for {
h, err := tr.Next()
if err == io.EOF {
+ if lossless {
+ if remain := tr.RawBytes(); len(remain) > 0 {
+ // Collect the remaining null bytes.
+ // https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53
+ if _, err := dst.Write(remain); err != nil {
+ return err
+ }
+ }
+ }
break
}
if err != nil {
return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err)
}
- if h.Name == TOCTarName {
+ if cleanEntryName(h.Name) == TOCTarName {
// It is possible for a layer to be "stargzified" twice during the
// distribution lifecycle. So we reserve "TOCTarName" here to avoid
// duplicated entries in the resulting layer.
+ if lossless {
+ // We cannot handle this in lossless way.
+ return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append")
+ }
continue
}
@@ -744,9 +834,14 @@ func (w *Writer) AppendTar(r io.Reader) error {
if err := w.condOpenGz(); err != nil {
return err
}
- tw := tar.NewWriter(currentCompressionWriter{w})
- if err := tw.WriteHeader(h); err != nil {
- return err
+ if tw != nil {
+ if err := tw.WriteHeader(h); err != nil {
+ return err
+ }
+ } else {
+ if _, err := dst.Write(tr.RawBytes()); err != nil {
+ return err
+ }
}
switch h.Typeflag {
case tar.TypeLink:
@@ -808,7 +903,13 @@ func (w *Writer) AppendTar(r io.Reader) error {
}
teeChunk := io.TeeReader(tee, chunkDigest.Hash())
- if _, err := io.CopyN(tw, teeChunk, chunkSize); err != nil {
+ var out io.Writer
+ if tw != nil {
+ out = tw
+ } else {
+ out = dst
+ }
+ if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil {
return fmt.Errorf("error copying %q: %v", h.Name, err)
}
ent.ChunkDigest = chunkDigest.Digest().String()
@@ -825,11 +926,18 @@ func (w *Writer) AppendTar(r io.Reader) error {
if payloadDigest != nil {
regFileEntry.Digest = payloadDigest.Digest().String()
}
- if err := tw.Flush(); err != nil {
- return err
+ if tw != nil {
+ if err := tw.Flush(); err != nil {
+ return err
+ }
}
}
- return nil
+ remainDest := io.Discard
+ if lossless {
+ remainDest = dst // Preserve the remaining bytes in lossless mode
+ }
+ _, err := io.Copy(remainDest, src)
+ return err
}
// DiffID returns the SHA-256 of the uncompressed tar bytes.
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
index ee6b2e17f40..ca138382314 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
@@ -3,8 +3,8 @@ module github.com/containerd/stargz-snapshotter/estargz
go 1.16
require (
- github.com/klauspost/compress v1.13.5
+ github.com/klauspost/compress v1.15.7
github.com/opencontainers/go-digest v1.0.0
- github.com/pkg/errors v0.9.1
+ github.com/vbatts/tar-split v0.11.2
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
)
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
index 66cd2d69c3d..493da935654 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
@@ -1,8 +1,20 @@
-github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4=
-github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/klauspost/compress v1.15.7 h1:7cgTQxJCU/vy+oP/E3B9RGbQTgbiVzIJWIKOLoAsPok=
+github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
+github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
index efc435e099e..591d7a62e11 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
@@ -34,34 +34,37 @@ import (
"strconv"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
type gzipCompression struct {
- *gzipCompressor
+ *GzipCompressor
*GzipDecompressor
}
func newGzipCompressionWithLevel(level int) Compression {
return &gzipCompression{
- &gzipCompressor{level},
+ &GzipCompressor{level},
&GzipDecompressor{},
}
}
-func NewGzipCompressorWithLevel(level int) Compressor {
- return &gzipCompressor{level}
+func NewGzipCompressor() *GzipCompressor {
+ return &GzipCompressor{gzip.BestCompression}
}
-type gzipCompressor struct {
+func NewGzipCompressorWithLevel(level int) *GzipCompressor {
+ return &GzipCompressor{level}
+}
+
+type GzipCompressor struct {
compressionLevel int
}
-func (gc *gzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) {
+func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) {
return gzip.NewWriterLevel(w, gc.compressionLevel)
}
-func (gc *gzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) {
+func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) {
tocJSON, err := json.MarshalIndent(toc, "", "\t")
if err != nil {
return "", err
@@ -124,93 +127,111 @@ func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Dig
return parseTOCEStargz(r)
}
-func (gz *GzipDecompressor) ParseFooter(p []byte) (tocOffset, tocSize int64, err error) {
+func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
if len(p) != FooterSize {
- return 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
+ return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
}
zr, err := gzip.NewReader(bytes.NewReader(p))
if err != nil {
- return 0, 0, err
+ return 0, 0, 0, err
}
defer zr.Close()
extra := zr.Header.Extra
si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
if si1 != 'S' || si2 != 'G' {
- return 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
+ return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
}
if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) {
- return 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
+ return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
}
if string(subfield[16:]) != "STARGZ" {
- return 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
+ return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
}
tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64)
if err != nil {
- return 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
+ return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
}
- return tocOffset, 0, nil
+ return tocOffset, tocOffset, 0, nil
}
func (gz *GzipDecompressor) FooterSize() int64 {
return FooterSize
}
-type legacyGzipDecompressor struct{}
+func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ return decompressTOCEStargz(r)
+}
+
+type LegacyGzipDecompressor struct{}
-func (gz *legacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
+func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
return gzip.NewReader(r)
}
-func (gz *legacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
+func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
return parseTOCEStargz(r)
}
-func (gz *legacyGzipDecompressor) ParseFooter(p []byte) (tocOffset, tocSize int64, err error) {
+func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
if len(p) != legacyFooterSize {
- return 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
+ return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
}
zr, err := gzip.NewReader(bytes.NewReader(p))
if err != nil {
- return 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader")
+ return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err)
}
defer zr.Close()
extra := zr.Header.Extra
if len(extra) != 16+len("STARGZ") {
- return 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
+ return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
}
if string(extra[16:]) != "STARGZ" {
- return 0, 0, fmt.Errorf("legacy: magic string STARGZ not found")
+ return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found")
}
tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
if err != nil {
- return 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
+ return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
}
- return tocOffset, 0, nil
+ return tocOffset, tocOffset, 0, nil
}
-func (gz *legacyGzipDecompressor) FooterSize() int64 {
+func (gz *LegacyGzipDecompressor) FooterSize() int64 {
return legacyFooterSize
}
+func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
+ return decompressTOCEStargz(r)
+}
+
func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
+ tr, err := decompressTOCEStargz(r)
+ if err != nil {
+ return nil, "", err
+ }
+ dgstr := digest.Canonical.Digester()
+ toc = new(JTOC)
+ if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil {
+ return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err)
+ }
+ if err := tr.Close(); err != nil {
+ return nil, "", err
+ }
+ return toc, dgstr.Digest(), nil
+}
+
+func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) {
zr, err := gzip.NewReader(r)
if err != nil {
- return nil, "", fmt.Errorf("malformed TOC gzip header: %v", err)
+ return nil, fmt.Errorf("malformed TOC gzip header: %v", err)
}
- defer zr.Close()
zr.Multistream(false)
tr := tar.NewReader(zr)
h, err := tr.Next()
if err != nil {
- return nil, "", fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err)
+ return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err)
}
if h.Name != TOCTarName {
- return nil, "", fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName)
+ return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName)
}
- dgstr := digest.Canonical.Digester()
- toc = new(JTOC)
- if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil {
- return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err)
- }
- return toc, dgstr.Digest(), nil
+ return readCloser{tr, zr.Close}, nil
}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
index 020729b7edb..37448cae085 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
@@ -28,10 +28,11 @@ import (
"compress/gzip"
"crypto/sha256"
"encoding/json"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"os"
+ "path/filepath"
"reflect"
"sort"
"strings"
@@ -41,7 +42,6 @@ import (
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
"github.com/klauspost/compress/zstd"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// TestingController is Compression with some helper methods necessary for testing.
@@ -148,93 +148,96 @@ func testBuild(t *testing.T, controllers ...TestingController) {
srcCompression := srcCompression
for _, cl := range controllers {
cl := cl
- for _, prefix := range allowedPrefix {
- prefix := prefix
- t.Run(tt.name+"-"+fmt.Sprintf("compression=%v-prefix=%q-src=%d", cl, prefix, srcCompression), func(t *testing.T) {
- tarBlob := buildTarStatic(t, tt.in, prefix)
- // Test divideEntries()
- entries, err := sortEntries(tarBlob, nil, nil) // identical order
- if err != nil {
- t.Fatalf("faield to parse tar: %v", err)
- }
- var merged []*entry
- for _, part := range divideEntries(entries, 4) {
- merged = append(merged, part...)
- }
- if !reflect.DeepEqual(entries, merged) {
- for _, e := range entries {
- t.Logf("Original: %v", e.header)
+ for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
+ srcTarFormat := srcTarFormat
+ for _, prefix := range allowedPrefix {
+ prefix := prefix
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) {
+ tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
+ // Test divideEntries()
+ entries, err := sortEntries(tarBlob, nil, nil) // identical order
+ if err != nil {
+ t.Fatalf("failed to parse tar: %v", err)
}
- for _, e := range merged {
- t.Logf("Merged: %v", e.header)
+ var merged []*entry
+ for _, part := range divideEntries(entries, 4) {
+ merged = append(merged, part...)
+ }
+ if !reflect.DeepEqual(entries, merged) {
+ for _, e := range entries {
+ t.Logf("Original: %v", e.header)
+ }
+ for _, e := range merged {
+ t.Logf("Merged: %v", e.header)
+ }
+ t.Errorf("divided entries couldn't be merged")
+ return
}
- t.Errorf("divided entries couldn't be merged")
- return
- }
- // Prepare sample data
- wantBuf := new(bytes.Buffer)
- sw := NewWriterWithCompressor(wantBuf, cl)
- sw.ChunkSize = tt.chunkSize
- if err := sw.AppendTar(tarBlob); err != nil {
- t.Fatalf("faield to append tar to want stargz: %v", err)
- }
- if _, err := sw.Close(); err != nil {
- t.Fatalf("faield to prepare want stargz: %v", err)
- }
- wantData := wantBuf.Bytes()
- want, err := Open(io.NewSectionReader(
- bytes.NewReader(wantData), 0, int64(len(wantData))),
- WithDecompressors(cl),
- )
- if err != nil {
- t.Fatalf("failed to parse the want stargz: %v", err)
- }
+ // Prepare sample data
+ wantBuf := new(bytes.Buffer)
+ sw := NewWriterWithCompressor(wantBuf, cl)
+ sw.ChunkSize = tt.chunkSize
+ if err := sw.AppendTar(tarBlob); err != nil {
+ t.Fatalf("failed to append tar to want stargz: %v", err)
+ }
+ if _, err := sw.Close(); err != nil {
+ t.Fatalf("failed to prepare want stargz: %v", err)
+ }
+ wantData := wantBuf.Bytes()
+ want, err := Open(io.NewSectionReader(
+ bytes.NewReader(wantData), 0, int64(len(wantData))),
+ WithDecompressors(cl),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse the want stargz: %v", err)
+ }
- // Prepare testing data
- rc, err := Build(compressBlob(t, tarBlob, srcCompression),
- WithChunkSize(tt.chunkSize), WithCompression(cl))
- if err != nil {
- t.Fatalf("faield to build stargz: %v", err)
- }
- defer rc.Close()
- gotBuf := new(bytes.Buffer)
- if _, err := io.Copy(gotBuf, rc); err != nil {
- t.Fatalf("failed to copy built stargz blob: %v", err)
- }
- gotData := gotBuf.Bytes()
- got, err := Open(io.NewSectionReader(
- bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
- WithDecompressors(cl),
- )
- if err != nil {
- t.Fatalf("failed to parse the got stargz: %v", err)
- }
+ // Prepare testing data
+ rc, err := Build(compressBlob(t, tarBlob, srcCompression),
+ WithChunkSize(tt.chunkSize), WithCompression(cl))
+ if err != nil {
+ t.Fatalf("failed to build stargz: %v", err)
+ }
+ defer rc.Close()
+ gotBuf := new(bytes.Buffer)
+ if _, err := io.Copy(gotBuf, rc); err != nil {
+ t.Fatalf("failed to copy built stargz blob: %v", err)
+ }
+ gotData := gotBuf.Bytes()
+ got, err := Open(io.NewSectionReader(
+ bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
+ WithDecompressors(cl),
+ )
+ if err != nil {
+ t.Fatalf("failed to parse the got stargz: %v", err)
+ }
- // Check DiffID is properly calculated
- rc.Close()
- diffID := rc.DiffID()
- wantDiffID := cl.DiffIDOf(t, gotData)
- if diffID.String() != wantDiffID {
- t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
- }
+ // Check DiffID is properly calculated
+ rc.Close()
+ diffID := rc.DiffID()
+ wantDiffID := cl.DiffIDOf(t, gotData)
+ if diffID.String() != wantDiffID {
+ t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
+ }
- // Compare as stargz
- if !isSameVersion(t, cl, wantData, gotData) {
- t.Errorf("built stargz hasn't same json")
- return
- }
- if !isSameEntries(t, want, got) {
- t.Errorf("built stargz isn't same as the original")
- return
- }
+ // Compare as stargz
+ if !isSameVersion(t, cl, wantData, gotData) {
+ t.Errorf("built stargz hasn't same json")
+ return
+ }
+ if !isSameEntries(t, want, got) {
+ t.Errorf("built stargz isn't same as the original")
+ return
+ }
- // Compare as tar.gz
- if !isSameTarGz(t, cl, wantData, gotData) {
- t.Errorf("built stargz isn't same tar.gz")
- return
- }
- })
+ // Compare as tar.gz
+ if !isSameTarGz(t, cl, wantData, gotData) {
+ t.Errorf("built stargz isn't same tar.gz")
+ return
+ }
+ })
+ }
}
}
}
@@ -284,11 +287,11 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool {
return false
}
- aFile, err := ioutil.ReadAll(aTar)
+ aFile, err := io.ReadAll(aTar)
if err != nil {
t.Fatal("failed to read tar payload of A")
}
- bFile, err := ioutil.ReadAll(bTar)
+ bFile, err := io.ReadAll(bTar)
if err != nil {
t.Fatal("failed to read tar payload of B")
}
@@ -526,7 +529,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
checks: []check{
checkStargzTOC,
checkVerifyTOC,
- checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
dir("test2/"), // modified
), allowedPrefix[0])),
},
@@ -544,7 +547,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
checks: []check{
checkStargzTOC,
checkVerifyTOC,
- checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
file("baz.txt", ""),
file("foo.txt", "M"), // modified
dir("test/"),
@@ -567,7 +570,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
checks: []check{
checkStargzTOC,
checkVerifyTOC,
- checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified
file("foo.txt", "a"),
dir("test/"),
@@ -593,7 +596,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
checks: []check{
checkStargzTOC,
checkVerifyTOC,
- checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
+ checkVerifyInvalidStargzFail(buildTar(t, tarOf(
file("baz.txt", "bazbazbazbazbazbazbaz"),
file("foo.txt", "a"),
symlink("barlink", "test/bar.txt"),
@@ -615,30 +618,33 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
cl := cl
for _, prefix := range allowedPrefix {
prefix := prefix
- t.Run(tt.name+"-"+fmt.Sprintf("compression=%v-prefix=%q", cl, prefix), func(t *testing.T) {
- // Get original tar file and chunk digests
- dgstMap := make(map[string]digest.Digest)
- tarBlob := buildTarStatic(t, tt.tarInit(t, dgstMap), prefix)
-
- rc, err := Build(compressBlob(t, tarBlob, srcCompression),
- WithChunkSize(chunkSize), WithCompression(cl))
- if err != nil {
- t.Fatalf("failed to convert stargz: %v", err)
- }
- tocDigest := rc.TOCDigest()
- defer rc.Close()
- buf := new(bytes.Buffer)
- if _, err := io.Copy(buf, rc); err != nil {
- t.Fatalf("failed to copy built stargz blob: %v", err)
- }
- newStargz := buf.Bytes()
- // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
- dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
+ for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
+ srcTarFormat := srcTarFormat
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) {
+ // Get original tar file and chunk digests
+ dgstMap := make(map[string]digest.Digest)
+ tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
+
+ rc, err := Build(compressBlob(t, tarBlob, srcCompression),
+ WithChunkSize(chunkSize), WithCompression(cl))
+ if err != nil {
+ t.Fatalf("failed to convert stargz: %v", err)
+ }
+ tocDigest := rc.TOCDigest()
+ defer rc.Close()
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, rc); err != nil {
+ t.Fatalf("failed to copy built stargz blob: %v", err)
+ }
+ newStargz := buf.Bytes()
+ // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
+ dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
- for _, check := range tt.checks {
- check(t, newStargz, tocDigest, dgstMap, cl)
- }
- })
+ for _, check := range tt.checks {
+ check(t, newStargz, tocDigest, dgstMap, cl)
+ }
+ })
+ }
}
}
}
@@ -1056,18 +1062,18 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
fSize := controller.FooterSize()
footer := make([]byte, fSize)
if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil {
- return nil, 0, errors.Wrap(err, "error reading footer")
+ return nil, 0, fmt.Errorf("error reading footer: %w", err)
}
- tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
+ _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
if err != nil {
- return nil, 0, errors.Wrapf(err, "failed to parse footer")
+ return nil, 0, fmt.Errorf("failed to parse footer: %w", err)
}
// Decode the TOC JSON
tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
decodedJTOC, _, err = controller.ParseTOC(tocReader)
if err != nil {
- return nil, 0, errors.Wrap(err, "failed to parse TOC")
+ return nil, 0, fmt.Errorf("failed to parse TOC: %w", err)
}
return decodedJTOC, tocOffset, nil
}
@@ -1085,11 +1091,15 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
in []tarEntry
want []stargzCheck
wantNumGz int // expected number of streams
+
+ wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz
+ wantFailOnLossLess bool
}{
{
- name: "empty",
- in: tarOf(),
- wantNumGz: 2, // TOC + footer
+ name: "empty",
+ in: tarOf(),
+ wantNumGz: 2, // empty tar + TOC + footer
+ wantNumGzLossLess: 3, // empty tar + TOC + footer
want: checks(
numTOCEntries(0),
),
@@ -1224,26 +1234,29 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
{
name: "block_char_fifo",
in: tarOf(
- tarEntryFunc(func(w *tar.Writer, prefix string) error {
+ tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
return w.WriteHeader(&tar.Header{
Name: prefix + "b",
Typeflag: tar.TypeBlock,
Devmajor: 123,
Devminor: 456,
+ Format: format,
})
}),
- tarEntryFunc(func(w *tar.Writer, prefix string) error {
+ tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
return w.WriteHeader(&tar.Header{
Name: prefix + "c",
Typeflag: tar.TypeChar,
Devmajor: 111,
Devminor: 222,
+ Format: format,
})
}),
- tarEntryFunc(func(w *tar.Writer, prefix string) error {
+ tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
return w.WriteHeader(&tar.Header{
Name: prefix + "f",
Typeflag: tar.TypeFifo,
+ Format: format,
})
}),
),
@@ -1278,6 +1291,41 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
hasMode("foo3/bar5", os.FileMode(0755)),
),
},
+ {
+ name: "lossy",
+ in: tarOf(
+ dir("bar/", sampleOwner),
+ dir("foo/", sampleOwner),
+ file("foo/bar.txt", content, sampleOwner),
+ file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error)
+ ),
+ wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer
+ want: checks(
+ numTOCEntries(3),
+ hasDir("bar/"),
+ hasDir("foo/"),
+ hasFileLen("foo/bar.txt", len(content)),
+ entryHasChildren("", "bar", "foo"),
+ entryHasChildren("foo", "bar.txt"),
+ hasChunkEntries("foo/bar.txt", 1),
+ hasEntryOwner("bar/", sampleOwner),
+ hasEntryOwner("foo/", sampleOwner),
+ hasEntryOwner("foo/bar.txt", sampleOwner),
+ ),
+ wantFailOnLossLess: true,
+ },
+ {
+ name: "hardlink should be replaced to the destination entry",
+ in: tarOf(
+ dir("foo/"),
+ file("foo/foo1", "test"),
+ link("foolink", "foo/foo1"),
+ ),
+ wantNumGz: 4, // dir, foo1 + link, TOC, footer
+ want: checks(
+ mustSameEntry("foo/foo1", "foolink"),
+ ),
+ },
}
for _, tt := range tests {
@@ -1285,47 +1333,90 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
cl := cl
for _, prefix := range allowedPrefix {
prefix := prefix
- t.Run(tt.name+"-"+fmt.Sprintf("compression=%v-prefix=%q", cl, prefix), func(t *testing.T) {
- tr, cancel := buildTar(t, tt.in, prefix)
- defer cancel()
- var stargzBuf bytes.Buffer
- w := NewWriterWithCompressor(&stargzBuf, cl)
- w.ChunkSize = tt.chunkSize
- if err := w.AppendTar(tr); err != nil {
- t.Fatalf("Append: %v", err)
- }
- if _, err := w.Close(); err != nil {
- t.Fatalf("Writer.Close: %v", err)
- }
- b := stargzBuf.Bytes()
+ for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
+ srcTarFormat := srcTarFormat
+ for _, lossless := range []bool{true, false} {
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) {
+ var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
+ origTarDgstr := digest.Canonical.Digester()
+ tr = io.TeeReader(tr, origTarDgstr.Hash())
+ var stargzBuf bytes.Buffer
+ w := NewWriterWithCompressor(&stargzBuf, cl)
+ w.ChunkSize = tt.chunkSize
+ if lossless {
+ err := w.AppendTarLossLess(tr)
+ if tt.wantFailOnLossLess {
+ if err != nil {
+ return // expected to fail
+ }
+ t.Fatalf("Append wanted to fail on lossless")
+ }
+ if err != nil {
+ t.Fatalf("Append(lossless): %v", err)
+ }
+ } else {
+ if err := w.AppendTar(tr); err != nil {
+ t.Fatalf("Append: %v", err)
+ }
+ }
+ if _, err := w.Close(); err != nil {
+ t.Fatalf("Writer.Close: %v", err)
+ }
+ b := stargzBuf.Bytes()
+
+ if lossless {
+ // Check if the result blob reserves original tar metadata
+ rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl)
+ if err != nil {
+ t.Errorf("failed to decompress blob: %v", err)
+ return
+ }
+ defer rc.Close()
+ resultDgstr := digest.Canonical.Digester()
+ if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil {
+ t.Errorf("failed to read result decompressed blob: %v", err)
+ return
+ }
+ if resultDgstr.Digest() != origTarDgstr.Digest() {
+ t.Errorf("lossy compression occurred: digest=%v; want %v",
+ resultDgstr.Digest(), origTarDgstr.Digest())
+ return
+ }
+ }
- diffID := w.DiffID()
- wantDiffID := cl.DiffIDOf(t, b)
- if diffID != wantDiffID {
- t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
- }
+ diffID := w.DiffID()
+ wantDiffID := cl.DiffIDOf(t, b)
+ if diffID != wantDiffID {
+ t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
+ }
- got := cl.CountStreams(t, b)
- if got != tt.wantNumGz {
- t.Errorf("number of streams = %d; want %d", got, tt.wantNumGz)
- }
+ got := cl.CountStreams(t, b)
+ wantNumGz := tt.wantNumGz
+ if lossless && tt.wantNumGzLossLess > 0 {
+ wantNumGz = tt.wantNumGzLossLess
+ }
+ if got != wantNumGz {
+ t.Errorf("number of streams = %d; want %d", got, wantNumGz)
+ }
- telemetry, checkCalled := newCalledTelemetry()
- r, err := Open(
- io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))),
- WithDecompressors(cl),
- WithTelemetry(telemetry),
- )
- if err != nil {
- t.Fatalf("stargz.Open: %v", err)
- }
- if err := checkCalled(); err != nil {
- t.Errorf("telemetry failure: %v", err)
- }
- for _, want := range tt.want {
- want.check(t, r)
+ telemetry, checkCalled := newCalledTelemetry()
+ r, err := Open(
+ io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))),
+ WithDecompressors(cl),
+ WithTelemetry(telemetry),
+ )
+ if err != nil {
+ t.Fatalf("stargz.Open: %v", err)
+ }
+ if err := checkCalled(); err != nil {
+ t.Errorf("telemetry failure: %v", err)
+ }
+ for _, want := range tt.want {
+ want.check(t, r)
+ }
+ })
}
- })
+ }
}
}
}
@@ -1652,52 +1743,98 @@ func hasEntryOwner(entry string, owner owner) stargzCheck {
})
}
+func mustSameEntry(files ...string) stargzCheck {
+ return stargzCheckFn(func(t *testing.T, r *Reader) {
+ var first *TOCEntry
+ for _, f := range files {
+ if first == nil {
+ var ok bool
+ first, ok = r.Lookup(f)
+ if !ok {
+ t.Errorf("unknown first file on Lookup: %q", f)
+ return
+ }
+ }
+
+ // Test Lookup
+ e, ok := r.Lookup(f)
+ if !ok {
+ t.Errorf("unknown file on Lookup: %q", f)
+ return
+ }
+ if e != first {
+ t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first)
+ return
+ }
+
+ // Test LookupChild
+ pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f)))
+ if !ok {
+ t.Errorf("failed to get parent of %q", f)
+ return
+ }
+ e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f)))
+ if !ok {
+ t.Errorf("failed to get %q as the child of %+v", f, pe)
+ return
+ }
+ if e != first {
+ t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first)
+ return
+ }
+
+ // Test ForeachChild
+ pe.ForeachChild(func(baseName string, e *TOCEntry) bool {
+ if baseName == filepath.Base(filepath.Clean(f)) {
+ if e != first {
+ t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first)
+ return false
+ }
+ }
+ return true
+ })
+ }
+ })
+}
+
func tarOf(s ...tarEntry) []tarEntry { return s }
type tarEntry interface {
- appendTar(tw *tar.Writer, prefix string) error
+ appendTar(tw *tar.Writer, prefix string, format tar.Format) error
}
-type tarEntryFunc func(*tar.Writer, string) error
+type tarEntryFunc func(*tar.Writer, string, tar.Format) error
-func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string) error { return f(tw, prefix) }
-
-func buildTar(t *testing.T, ents []tarEntry, prefix string) (r io.Reader, cancel func()) {
- pr, pw := io.Pipe()
- go func() {
- tw := tar.NewWriter(pw)
- for _, ent := range ents {
- if err := ent.appendTar(tw, prefix); err != nil {
- t.Errorf("building input tar: %v", err)
- pw.Close()
- return
- }
- }
- if err := tw.Close(); err != nil {
- t.Errorf("closing write of input tar: %v", err)
- }
- pw.Close()
- }()
- return pr, func() { go pr.Close(); go pw.Close() }
+func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error {
+ return f(tw, prefix, format)
}
-func buildTarStatic(t *testing.T, ents []tarEntry, prefix string) *io.SectionReader {
+func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader {
+ format := tar.FormatUnknown
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case tar.Format:
+ format = v
+ default:
+ panic(fmt.Errorf("unsupported opt for buildTar: %v", opt))
+ }
+ }
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, ent := range ents {
- if err := ent.appendTar(tw, prefix); err != nil {
+ if err := ent.appendTar(tw, prefix, format); err != nil {
t.Fatalf("building input tar: %v", err)
}
}
if err := tw.Close(); err != nil {
t.Errorf("closing write of input tar: %v", err)
}
- data := buf.Bytes()
+ data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works
return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data)))
}
func dir(name string, opts ...interface{}) tarEntry {
- return tarEntryFunc(func(tw *tar.Writer, prefix string) error {
+ return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
var o owner
mode := os.FileMode(0755)
for _, opt := range opts {
@@ -1723,6 +1860,7 @@ func dir(name string, opts ...interface{}) tarEntry {
Mode: tm,
Uid: o.uid,
Gid: o.gid,
+ Format: format,
})
})
}
@@ -1737,7 +1875,7 @@ type owner struct {
}
func file(name, contents string, opts ...interface{}) tarEntry {
- return tarEntryFunc(func(tw *tar.Writer, prefix string) error {
+ return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
var xattrs xAttr
var o owner
mode := os.FileMode(0644)
@@ -1760,6 +1898,9 @@ func file(name, contents string, opts ...interface{}) tarEntry {
if err != nil {
return err
}
+ if len(xattrs) > 0 {
+ format = tar.FormatPAX // only PAX supports xattrs
+ }
if err := tw.WriteHeader(&tar.Header{
Typeflag: tar.TypeReg,
Name: prefix + name,
@@ -1768,6 +1909,7 @@ func file(name, contents string, opts ...interface{}) tarEntry {
Size: int64(len(contents)),
Uid: o.uid,
Gid: o.gid,
+ Format: format,
}); err != nil {
return err
}
@@ -1777,78 +1919,76 @@ func file(name, contents string, opts ...interface{}) tarEntry {
}
func symlink(name, target string) tarEntry {
- return tarEntryFunc(func(tw *tar.Writer, prefix string) error {
+ return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
return tw.WriteHeader(&tar.Header{
Typeflag: tar.TypeSymlink,
Name: prefix + name,
Linkname: target,
Mode: 0644,
+ Format: format,
})
})
}
func link(name string, linkname string) tarEntry {
now := time.Now()
- return tarEntryFunc(func(w *tar.Writer, prefix string) error {
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
return w.WriteHeader(&tar.Header{
- Typeflag: tar.TypeLink,
- Name: prefix + name,
- Linkname: linkname,
- ModTime: now,
- AccessTime: now,
- ChangeTime: now,
+ Typeflag: tar.TypeLink,
+ Name: prefix + name,
+ Linkname: linkname,
+ ModTime: now,
+ Format: format,
})
})
}
func chardev(name string, major, minor int64) tarEntry {
now := time.Now()
- return tarEntryFunc(func(w *tar.Writer, prefix string) error {
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
return w.WriteHeader(&tar.Header{
- Typeflag: tar.TypeChar,
- Name: prefix + name,
- Devmajor: major,
- Devminor: minor,
- ModTime: now,
- AccessTime: now,
- ChangeTime: now,
+ Typeflag: tar.TypeChar,
+ Name: prefix + name,
+ Devmajor: major,
+ Devminor: minor,
+ ModTime: now,
+ Format: format,
})
})
}
func blockdev(name string, major, minor int64) tarEntry {
now := time.Now()
- return tarEntryFunc(func(w *tar.Writer, prefix string) error {
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
return w.WriteHeader(&tar.Header{
- Typeflag: tar.TypeBlock,
- Name: prefix + name,
- Devmajor: major,
- Devminor: minor,
- ModTime: now,
- AccessTime: now,
- ChangeTime: now,
+ Typeflag: tar.TypeBlock,
+ Name: prefix + name,
+ Devmajor: major,
+ Devminor: minor,
+ ModTime: now,
+ Format: format,
})
})
}
func fifo(name string) tarEntry {
now := time.Now()
- return tarEntryFunc(func(w *tar.Writer, prefix string) error {
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
return w.WriteHeader(&tar.Header{
- Typeflag: tar.TypeFifo,
- Name: prefix + name,
- ModTime: now,
- AccessTime: now,
- ChangeTime: now,
+ Typeflag: tar.TypeFifo,
+ Name: prefix + name,
+ ModTime: now,
+ Format: format,
})
})
}
func prefetchLandmark() tarEntry {
- return tarEntryFunc(func(w *tar.Writer, prefix string) error {
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
if err := w.WriteHeader(&tar.Header{
Name: PrefetchLandmark,
Typeflag: tar.TypeReg,
Size: int64(len([]byte{landmarkContents})),
+ Format: format,
}); err != nil {
return err
}
@@ -1861,11 +2001,12 @@ func prefetchLandmark() tarEntry {
}
func noPrefetchLandmark() tarEntry {
- return tarEntryFunc(func(w *tar.Writer, prefix string) error {
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
if err := w.WriteHeader(&tar.Header{
Name: NoPrefetchLandmark,
Typeflag: tar.TypeReg,
Size: int64(len([]byte{landmarkContents})),
+ Format: format,
}); err != nil {
return err
}
@@ -1899,11 +2040,12 @@ func regDigest(t *testing.T, name string, contentStr string, digestMap map[strin
n += size
}
- return tarEntryFunc(func(w *tar.Writer, prefix string) error {
+ return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
if err := w.WriteHeader(&tar.Header{
Typeflag: tar.TypeReg,
Name: prefix + name,
Size: int64(len(content)),
+ Format: format,
}); err != nil {
return err
}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
index 1b1075a6466..3bc74463ecf 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
@@ -159,7 +159,8 @@ type TOCEntry struct {
// NumLink is the number of entry names pointing to this entry.
// Zero means one name references this entry.
- NumLink int
+ // This field is calculated during runtime and not recorded in TOC JSON.
+ NumLink int `json:"-"`
// Xattrs are the extended attribute for the entry.
Xattrs map[string][]byte `json:"xattrs,omitempty"`
@@ -290,7 +291,7 @@ type Compressor interface {
WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error)
}
-// Deompressor represents the helper mothods to be used for parsing eStargz.
+// Decompressor represents the helper mothods to be used for parsing eStargz.
type Decompressor interface {
// Reader returns ReadCloser to be used for decompressing file payload.
Reader(r io.Reader) (io.ReadCloser, error)
@@ -299,10 +300,12 @@ type Decompressor interface {
FooterSize() int64
// ParseFooter parses the footer and returns the offset and (compressed) size of TOC.
+ // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
+ // the top until the TOC JSON).
//
// Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range
// from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize).
- ParseFooter(p []byte) (tocOffset, tocSize int64, err error)
+ ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
// ParseTOC parses TOC from the passed reader. The reader provides the partial contents
// of the underlying blob that has the range specified by ParseFooter method.
diff --git a/vendor/github.com/containers/image/v5/copy/blob.go b/vendor/github.com/containers/image/v5/copy/blob.go
new file mode 100644
index 00000000000..cfac3e6d583
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/blob.go
@@ -0,0 +1,171 @@
+package copy
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/internal/private"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/sirupsen/logrus"
+)
+
+// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcReader to dest,
+// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
+// perhaps (de/re/)compressing it if canModifyBlob,
+// and returns a complete blobInfo of the copied blob.
+func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Reader, srcInfo types.BlobInfo,
+ getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
+ isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
+ // The copying happens through a pipeline of connected io.Readers;
+ // that pipeline is built by updating stream.
+ // === Input: srcReader
+ stream := sourceStream{
+ reader: srcReader,
+ info: srcInfo,
+ }
+
+ // === Process input through digestingReader to validate against the expected digest.
+ // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
+ // use a separate validation failure indicator.
+ // Note that for this check we don't use the stronger "validationSucceeded" indicator, because
+ // dest.PutBlob may detect that the layer already exists, in which case we don't
+ // read stream to the end, and validation does not happen.
+ digestingReader, err := newDigestingReader(stream.reader, srcInfo.Digest)
+ if err != nil {
+ return types.BlobInfo{}, fmt.Errorf("preparing to verify blob %s: %w", srcInfo.Digest, err)
+ }
+ stream.reader = digestingReader
+
+ // === Update progress bars
+ stream.reader = bar.ProxyReader(stream.reader)
+
+ // === Decrypt the stream, if required.
+ decryptionStep, err := ic.c.blobPipelineDecryptionStep(&stream, srcInfo)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // === Detect compression of the input stream.
+ // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
+ detectedCompression, err := blobPipelineDetectCompressionStep(&stream, srcInfo)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
+ var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
+ if getOriginalLayerCopyWriter != nil {
+ stream.reader = io.TeeReader(stream.reader, getOriginalLayerCopyWriter(detectedCompression.decompressor))
+ originalLayerReader = stream.reader
+ }
+
+ // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
+ // short-circuit conditions
+ canModifyBlob := !isConfig && ic.cannotModifyManifestReason == ""
+ // === Deal with layer compression/decompression if necessary
+ compressionStep, err := ic.blobPipelineCompressionStep(&stream, canModifyBlob, srcInfo, detectedCompression)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ defer compressionStep.close()
+
+ // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided
+ if decryptionStep.decrypting && toEncrypt {
+ // If nothing else, we can only set uploadedInfo.CryptoOperation to a single value.
+ // Before relaxing this, see the original pull request’s review if there are other reasons to reject this.
+ return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy")
+ }
+ encryptionStep, err := ic.c.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // === Report progress using the ic.c.progress channel, if required.
+ if ic.c.progress != nil && ic.c.progressInterval > 0 {
+ progressReader := newProgressReader(
+ stream.reader,
+ ic.c.progress,
+ ic.c.progressInterval,
+ srcInfo,
+ )
+ defer progressReader.reportDone()
+ stream.reader = progressReader
+ }
+
+ // === Finally, send the layer stream to dest.
+ options := private.PutBlobOptions{
+ Cache: ic.c.blobInfoCache,
+ IsConfig: isConfig,
+ EmptyLayer: emptyLayer,
+ }
+ if !isConfig {
+ options.LayerIndex = &layerIndex
+ }
+ uploadedInfo, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
+ if err != nil {
+ return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err)
+ }
+
+ uploadedInfo.Annotations = stream.info.Annotations
+
+ compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations)
+ decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation)
+ if err := encryptionStep.updateCryptoOperationAndAnnotations(&uploadedInfo.CryptoOperation, &uploadedInfo.Annotations); err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume
+ // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
+ // So, read everything from originalLayerReader, which will cause the rest to be
+ // sent there if we are not already at EOF.
+ if getOriginalLayerCopyWriter != nil {
+ logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
+ _, err := io.Copy(io.Discard, originalLayerReader)
+ if err != nil {
+ return types.BlobInfo{}, fmt.Errorf("reading input blob %s: %w", srcInfo.Digest, err)
+ }
+ }
+
+ if digestingReader.validationFailed { // Coverage: This should never happen.
+ return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
+ }
+ if stream.info.Digest != "" && uploadedInfo.Digest != stream.info.Digest {
+ return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest)
+ }
+ if digestingReader.validationSucceeded {
+ if err := compressionStep.recordValidatedDigestData(ic.c, uploadedInfo, srcInfo, encryptionStep, decryptionStep); err != nil {
+ return types.BlobInfo{}, err
+ }
+ }
+
+ return uploadedInfo, nil
+}
+
+// sourceStream encapsulates an input consumed by copyBlobFromStream, in progress of being built.
+// This allows handles of individual aspects to build the copy pipeline without _too much_
+// specific cooperation by the caller.
+//
+// We are currently very far from a generalized plug-and-play API for building/consuming the pipeline
+// without specific knowledge of various aspects in copyBlobFromStream; that may come one day.
+type sourceStream struct {
+ reader io.Reader
+ info types.BlobInfo // corresponding to the data available in reader.
+}
+
+// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read.
+// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination.
+type errorAnnotationReader struct {
+ reader io.Reader
+}
+
+// Read annotates the error happened during read
+func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
+ n, err = r.reader.Read(b)
+ if err != nil && err != io.EOF {
+ return n, fmt.Errorf("happened during read: %w", err)
+ }
+ return n, err
+}
diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go
new file mode 100644
index 00000000000..ff0e7945dd7
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/compression.go
@@ -0,0 +1,321 @@
+package copy
+
+import (
+ "errors"
+ "fmt"
+ "io"
+
+ internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/sirupsen/logrus"
+)
+
+// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step.
+type bpDetectCompressionStepData struct {
+ isCompressed bool
+ format compressiontypes.Algorithm // Valid if isCompressed
+ decompressor compressiontypes.DecompressorFunc // Valid if isCompressed
+ srcCompressorName string // Compressor name to possibly record in the blob info cache for the source blob.
+}
+
+// blobPipelineDetectCompressionStep updates *stream to detect its current compression format.
+// srcInfo is only used for error messages.
+// Returns data for other steps.
+func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobInfo) (bpDetectCompressionStepData, error) {
+ // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
+ format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform
+ if err != nil {
+ return bpDetectCompressionStepData{}, fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err)
+ }
+ stream.reader = reader
+
+ res := bpDetectCompressionStepData{
+ isCompressed: decompressor != nil,
+ format: format,
+ decompressor: decompressor,
+ }
+ if res.isCompressed {
+ res.srcCompressorName = format.Name()
+ } else {
+ res.srcCompressorName = internalblobinfocache.Uncompressed
+ }
+
+ if expectedFormat, known := expectedCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.Name() != expectedFormat.Name() {
+ logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedFormat.Name(), format.Name())
+ }
+ return res, nil
+}
+
+// bpCompressionStepData contains data that the copy pipeline needs about the compression step.
+type bpCompressionStepData struct {
+ operation types.LayerCompression // Operation to use for updating the blob metadata.
+ uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
+ uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
+ srcCompressorName string // Compressor name to record in the blob info cache for the source blob.
+ uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob.
+ closers []io.Closer // Objects to close after the upload is done, if any.
+}
+
+// blobPipelineCompressionStep updates *stream to compress and/or decompress it.
+// srcInfo is primarily used for error messages.
+// Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData,
+// and must eventually call close.
+func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModifyBlob bool, srcInfo types.BlobInfo,
+ detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
+ // short-circuit conditions
+ layerCompressionChangeSupported := ic.src.CanChangeLayerCompression(stream.info.MediaType)
+ if !layerCompressionChangeSupported {
+ logrus.Debugf("Compression change for blob %s (%q) not supported", srcInfo.Digest, stream.info.MediaType)
+ }
+ if canModifyBlob && layerCompressionChangeSupported {
+ for _, fn := range []func(*sourceStream, bpDetectCompressionStepData) (*bpCompressionStepData, error){
+ ic.bpcPreserveEncrypted,
+ ic.bpcCompressUncompressed,
+ ic.bpcRecompressCompressed,
+ ic.bpcDecompressCompressed,
+ } {
+ res, err := fn(stream, detected)
+ if err != nil {
+ return nil, err
+ }
+ if res != nil {
+ return res, nil
+ }
+ }
+ }
+ return ic.bpcPreserveOriginal(stream, detected, layerCompressionChangeSupported), nil
+}
+
+// bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if isOciEncrypted(stream.info.MediaType) {
+ logrus.Debugf("Using original blob without modification for encrypted blob")
+ // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
+ return &bpCompressionStepData{
+ operation: types.PreserveOriginal,
+ uploadedAlgorithm: nil,
+ srcCompressorName: internalblobinfocache.UnknownCompression,
+ uploadedCompressorName: internalblobinfocache.UnknownCompression,
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcCompressUncompressed checks if we should be compressing an uncompressed input, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed {
+ logrus.Debugf("Compressing blob on the fly")
+ var uploadedAlgorithm *compressiontypes.Algorithm
+ if ic.c.compressionFormat != nil {
+ uploadedAlgorithm = ic.c.compressionFormat
+ } else {
+ uploadedAlgorithm = defaultCompressionFormat
+ }
+
+ reader, annotations := ic.c.compressedStream(stream.reader, *uploadedAlgorithm)
+ // Note: reader must be closed on all return paths.
+ stream.reader = reader
+ stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
+ Digest: "",
+ Size: -1,
+ }
+ return &bpCompressionStepData{
+ operation: types.Compress,
+ uploadedAlgorithm: uploadedAlgorithm,
+ uploadedAnnotations: annotations,
+ srcCompressorName: detected.srcCompressorName,
+ uploadedCompressorName: uploadedAlgorithm.Name(),
+ closers: []io.Closer{reader},
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed &&
+ ic.c.compressionFormat != nil && ic.c.compressionFormat.Name() != detected.format.Name() {
+ // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
+ // re-compressed using the desired format.
+ logrus.Debugf("Blob will be converted")
+
+ decompressed, err := detected.decompressor(stream.reader)
+ if err != nil {
+ return nil, err
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ decompressed.Close()
+ }
+ }()
+
+ recompressed, annotations := ic.c.compressedStream(decompressed, *ic.c.compressionFormat)
+ // Note: recompressed must be closed on all return paths.
+ stream.reader = recompressed
+ stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
+ Digest: "",
+ Size: -1,
+ }
+ succeeded = true
+ return &bpCompressionStepData{
+ operation: types.PreserveOriginal,
+ uploadedAlgorithm: ic.c.compressionFormat,
+ uploadedAnnotations: annotations,
+ srcCompressorName: detected.srcCompressorName,
+ uploadedCompressorName: ic.c.compressionFormat.Name(),
+ closers: []io.Closer{decompressed, recompressed},
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcDecompressCompressed checks if we should be decompressing a compressed input, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if ic.c.dest.DesiredLayerCompression() == types.Decompress && detected.isCompressed {
+ logrus.Debugf("Blob will be decompressed")
+ s, err := detected.decompressor(stream.reader)
+ if err != nil {
+ return nil, err
+ }
+ // Note: s must be closed on all return paths.
+ stream.reader = s
+ stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
+ Digest: "",
+ Size: -1,
+ }
+ return &bpCompressionStepData{
+ operation: types.Decompress,
+ uploadedAlgorithm: nil,
+ srcCompressorName: detected.srcCompressorName,
+ uploadedCompressorName: internalblobinfocache.Uncompressed,
+ closers: []io.Closer{s},
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob.
+func (ic *imageCopier) bpcPreserveOriginal(stream *sourceStream, detected bpDetectCompressionStepData,
+ layerCompressionChangeSupported bool) *bpCompressionStepData {
+ logrus.Debugf("Using original blob without modification")
+ // Remember if the original blob was compressed, and if so how, so that if
+ // LayerInfosForCopy() returned something that differs from what was in the
+ // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(),
+ // it will be able to correctly derive the MediaType for the copied blob.
+ //
+ // But don’t touch blobs in objects where we can’t change compression,
+ // so that src.UpdatedImage() doesn’t fail; assume that for such blobs
+ // LayerInfosForCopy() should not be making any changes in the first place.
+ var algorithm *compressiontypes.Algorithm
+ if layerCompressionChangeSupported && detected.isCompressed {
+ algorithm = &detected.format
+ } else {
+ algorithm = nil
+ }
+ return &bpCompressionStepData{
+ operation: types.PreserveOriginal,
+ uploadedAlgorithm: algorithm,
+ srcCompressorName: detected.srcCompressorName,
+ uploadedCompressorName: detected.srcCompressorName,
+ }
+}
+
+// updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary.
+func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) {
+ *operation = d.operation
+ // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
+ *algorithm = d.uploadedAlgorithm
+ if *annotations == nil {
+ *annotations = map[string]string{}
+ }
+ for k, v := range d.uploadedAnnotations {
+ (*annotations)[k] = v
+ }
+}
+
+// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo.
+// This must ONLY be called if all data has been validated by OUR code, and is not comming from third parties.
+func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo,
+ encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error {
+ // Don’t record any associations that involve encrypted data. This is a bit crude,
+ // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
+ // might be safe, but it’s not trivially obvious, so let’s be conservative for now.
+ // This crude approach also means we don’t need to record whether a blob is encrypted
+ // in the blob info cache (which would probably be necessary for any more complex logic),
+ // and the simplicity is attractive.
+ if !encryptionStep.encrypting && !decryptionStep.decrypting {
+ // If d.operation != types.PreserveOriginal, we now have two reliable digest values:
+ // srcinfo.Digest describes the pre-d.operation input, verified by digestingReader
+ // uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob
+ // (because stream.info.Digest == "", this must have been computed afresh).
+ switch d.operation {
+ case types.PreserveOriginal:
+ break // Do nothing, we have only one digest and we might not have even verified it.
+ case types.Compress:
+ c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
+ case types.Decompress:
+ c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
+ default:
+ return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
+ }
+ }
+ if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
+ c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
+ }
+ if srcInfo.Digest != "" && d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
+ c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
+ }
+ return nil
+}
+
+// close closes objects that carry state throughout the compression/decompression operation.
+func (d *bpCompressionStepData) close() {
+ for _, c := range d.closers {
+ c.Close()
+ }
+}
+
+// doCompression reads all input from src and writes its compressed equivalent to dest.
+func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error {
+ compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
+ if err != nil {
+ return err
+ }
+
+ buf := make([]byte, compressionBufferSize)
+
+ _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
+ if err != nil {
+ compressor.Close()
+ return err
+ }
+
+ return compressor.Close()
+}
+
+// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
+func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
+ err := errors.New("Internal error: unexpected panic in compressGoroutine")
+ defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
+ _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
+ }()
+
+ err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel)
+}
+
+// compressedStream returns a stream the input reader compressed using format, and a metadata map.
+// The caller must close the returned reader.
+// AFTER the stream is consumed, metadata will be updated with annotations to use on the data.
+func (c *copier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) {
+ pipeReader, pipeWriter := io.Pipe()
+ annotations := map[string]string{}
+ // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
+ // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
+ // we don’t care.
+ go c.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter
+ return pipeReader, annotations
+}
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index e1649ba8e18..6758d4de133 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -3,9 +3,9 @@ package copy
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"os"
"reflect"
"strings"
@@ -13,10 +13,12 @@ import (
"time"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/imagedestination"
+ "github.com/containers/image/v5/internal/imagesource"
"github.com/containers/image/v5/internal/pkg/platform"
- internalTypes "github.com/containers/image/v5/internal/types"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache"
"github.com/containers/image/v5/pkg/compression"
@@ -24,14 +26,11 @@ import (
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/containers/ocicrypt"
encconfig "github.com/containers/ocicrypt/config"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbauerster/mpb/v7"
- "github.com/vbauerster/mpb/v7/decor"
"golang.org/x/sync/semaphore"
"golang.org/x/term"
)
@@ -63,8 +62,8 @@ var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
// copier allows us to keep track of diffID values for blobs, and other
// data shared across one or more images in a possible manifest list.
type copier struct {
- dest types.ImageDestination
- rawSource types.ImageSource
+ dest private.ImageDestination
+ rawSource private.ImageSource
reportWriter io.Writer
progressOutput io.Writer
progressInterval time.Duration
@@ -80,13 +79,13 @@ type copier struct {
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
type imageCopier struct {
- c *copier
- manifestUpdates *types.ManifestUpdateOptions
- src types.Image
- diffIDsAreNeeded bool
- canModifyManifest bool
- canSubstituteBlobs bool
- ociEncryptLayers *[]int
+ c *copier
+ manifestUpdates *types.ManifestUpdateOptions
+ src *image.SourcedImage
+ diffIDsAreNeeded bool
+ cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
+ canSubstituteBlobs bool
+ ociEncryptLayers *[]int
}
const (
@@ -122,17 +121,25 @@ type ImageListSelection int
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
type Options struct {
- RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
- SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
- ReportWriter io.Writer
- SourceCtx *types.SystemContext
- DestinationCtx *types.SystemContext
- ProgressInterval time.Duration // time to wait between reports to signal the progress channel
- Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset.
+ RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
+ SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
+ SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`.
+ SignBySigstorePrivateKeyFile string // If non-empty, asks for a signature to be added during the copy, using a sigstore private key file at the provided path.
+ SignSigstorePrivateKeyPassphrase []byte // Passphare to use when signing with `SignBySigstorePrivateKeyFile`.
+ SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination
+ ReportWriter io.Writer
+ SourceCtx *types.SystemContext
+ DestinationCtx *types.SystemContext
+ ProgressInterval time.Duration // time to wait between reports to signal the progress channel
+ Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset.
+
+ // Preserve digests, and fail if we cannot.
+ PreserveDigests bool
// manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type
ForceManifestMIMEType string
ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list
Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself
+
// If OciEncryptConfig is non-nil, it indicates that an image should be encrypted.
// The encryption options is derived from the construction of EncryptConfig object.
// Note: During initial encryption process of a layer, the resultant digest is not known
@@ -170,7 +177,7 @@ func validateImageListSelection(selection ImageListSelection) error {
case CopySystemImage, CopyAllImages, CopySpecificImages:
return nil
default:
- return errors.Errorf("Invalid value for options.ImageListSelection: %d", selection)
+ return fmt.Errorf("Invalid value for options.ImageListSelection: %d", selection)
}
}
@@ -191,38 +198,48 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
return nil, err
}
- reportWriter := ioutil.Discard
+ reportWriter := io.Discard
if options.ReportWriter != nil {
reportWriter = options.ReportWriter
}
- dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
+ publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
if err != nil {
- return nil, errors.Wrapf(err, "initializing destination %s", transports.ImageName(destRef))
+ return nil, fmt.Errorf("initializing destination %s: %w", transports.ImageName(destRef), err)
}
+ dest := imagedestination.FromPublic(publicDest)
defer func() {
if err := dest.Close(); err != nil {
- retErr = errors.Wrapf(retErr, " (dest: %v)", err)
+ if retErr != nil {
+ retErr = fmt.Errorf(" (dest: %v): %w", err, retErr)
+ } else {
+ retErr = fmt.Errorf(" (dest: %v)", err)
+ }
}
}()
- rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
+ publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
if err != nil {
- return nil, errors.Wrapf(err, "initializing source %s", transports.ImageName(srcRef))
+ return nil, fmt.Errorf("initializing source %s: %w", transports.ImageName(srcRef), err)
}
+ rawSource := imagesource.FromPublic(publicRawSource)
defer func() {
if err := rawSource.Close(); err != nil {
- retErr = errors.Wrapf(retErr, " (src: %v)", err)
+ if retErr != nil {
+ retErr = fmt.Errorf(" (src: %v): %w", err, retErr)
+ } else {
+ retErr = fmt.Errorf(" (src: %v)", err)
+ }
}
}()
// If reportWriter is not a TTY (e.g., when piping to a file), do not
// print the progress bars to avoid long and hard to parse output.
- // createProgressBar() will print a single line instead.
+ // Instead use printCopyInfo() to print single line "Copying ..." messages.
progressOutput := reportWriter
if !isTTY(reportWriter) {
- progressOutput = ioutil.Discard
+ progressOutput = io.Discard
}
c := &copier{
@@ -270,7 +287,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
multiImage, err := isMultiImage(ctx, unparsedToplevel)
if err != nil {
- return nil, errors.Wrapf(err, "determining manifest MIME type for %s", transports.ImageName(srcRef))
+ return nil, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(srcRef), err)
}
if !multiImage {
@@ -283,26 +300,26 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
// matches the current system to copy, and copy it.
mfest, manifestType, err := unparsedToplevel.Manifest(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "reading manifest for %s", transports.ImageName(srcRef))
+ return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err)
}
manifestList, err := manifest.ListFromBlob(mfest, manifestType)
if err != nil {
- return nil, errors.Wrapf(err, "parsing primary manifest as list for %s", transports.ImageName(srcRef))
+ return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err)
}
instanceDigest, err := manifestList.ChooseInstance(options.SourceCtx) // try to pick one that matches options.SourceCtx
if err != nil {
- return nil, errors.Wrapf(err, "choosing an image from manifest list %s", transports.ImageName(srcRef))
+ return nil, fmt.Errorf("choosing an image from manifest list %s: %w", transports.ImageName(srcRef), err)
}
logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest)
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, nil); err != nil {
- return nil, err
+ return nil, fmt.Errorf("copying system image from manifest list: %w", err)
}
} else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */
// If we were asked to copy multiple images and can't, that's an error.
if !supportsMultipleImages(c.dest) {
- return nil, errors.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name())
+ return nil, fmt.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name())
}
// Copy some or all of the images.
switch options.ImageListSelection {
@@ -317,7 +334,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
}
if err := c.dest.Commit(ctx, unparsedToplevel); err != nil {
- return nil, errors.Wrap(err, "committing the finished image")
+ return nil, fmt.Errorf("committing the finished image: %w", err)
}
return copiedManifest, nil
@@ -341,15 +358,10 @@ func supportsMultipleImages(dest types.ImageDestination) bool {
// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the
// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal.
-func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src types.Image, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
- srcManifest, _, err := src.Manifest(ctx)
+func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
+ srcManifestDigest, err := manifest.Digest(src.ManifestBlob)
if err != nil {
- return false, nil, "", "", errors.Wrapf(err, "reading manifest from image")
- }
-
- srcManifestDigest, err := manifest.Digest(srcManifest)
- if err != nil {
- return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest")
+ return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
}
destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx)
@@ -366,7 +378,7 @@ func compareImageDestinationManifestEqual(ctx context.Context, options *Options,
destManifestDigest, err := manifest.Digest(destManifest)
if err != nil {
- return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest")
+ return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
}
logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
@@ -384,33 +396,50 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
// Parse the list and get a copy of the original value after it's re-encoded.
manifestList, manifestType, err := unparsedToplevel.Manifest(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "reading manifest list")
+ return nil, fmt.Errorf("reading manifest list: %w", err)
}
originalList, err := manifest.ListFromBlob(manifestList, manifestType)
if err != nil {
- return nil, errors.Wrapf(err, "parsing manifest list %q", string(manifestList))
+ return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err)
}
updatedList := originalList.Clone()
- // Read and/or clear the set of signatures for this list.
- var sigs [][]byte
- if options.RemoveSignatures {
- sigs = [][]byte{}
- } else {
- c.Printf("Getting image list signatures\n")
- s, err := c.rawSource.GetSignatures(ctx, nil)
- if err != nil {
- return nil, errors.Wrap(err, "reading signatures")
- }
- sigs = s
+ sigs, err := c.sourceSignatures(ctx, unparsedToplevel, options,
+ "Getting image list signatures",
+ "Checking if image list destination supports signatures")
+ if err != nil {
+ return nil, err
}
- if len(sigs) != 0 {
- c.Printf("Checking if image list destination supports signatures\n")
- if err := c.dest.SupportsSignatures(ctx); err != nil {
- return nil, errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
+
+ // If the destination is a digested reference, make a note of that, determine what digest value we're
+ // expecting, and check that the source manifest matches it.
+ destIsDigestedReference := false
+ if named := c.dest.Reference().DockerReference(); named != nil {
+ if digested, ok := named.(reference.Digested); ok {
+ destIsDigestedReference = true
+ matches, err := manifest.MatchesDigest(manifestList, digested.Digest())
+ if err != nil {
+ return nil, fmt.Errorf("computing digest of source image's manifest: %w", err)
+ }
+ if !matches {
+ return nil, errors.New("Digest of source image's manifest would not match destination reference")
+ }
}
}
- canModifyManifestList := (len(sigs) == 0)
+
+ // Determine if we're allowed to modify the manifest list.
+ // If we can, set to the empty string. If we can't, set to the reason why.
+ // Compare, and perhaps keep in sync with, the version in copyOneImage.
+ cannotModifyManifestListReason := ""
+ if len(sigs) > 0 {
+ cannotModifyManifestListReason = "Would invalidate signatures"
+ }
+ if destIsDigestedReference {
+ cannotModifyManifestListReason = "Destination specifies a digest"
+ }
+ if options.PreserveDigests {
+ cannotModifyManifestListReason = "Instructed to preserve digests"
+ }
// Determine if we'll need to convert the manifest list to a different format.
forceListMIMEType := options.ForceManifestMIMEType
@@ -422,11 +451,11 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
}
selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType)
if err != nil {
- return nil, errors.Wrapf(err, "determining manifest list type to write to destination")
+ return nil, fmt.Errorf("determining manifest list type to write to destination: %w", err)
}
if selectedListType != originalList.MIMEType() {
- if !canModifyManifestList {
- return nil, errors.Errorf("manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType)
+ if cannotModifyManifestListReason != "" {
+ return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason)
}
}
@@ -464,7 +493,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest)
updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", instancesCopied+1, imagesToCopy, err)
}
instancesCopied++
// Record the result of a possible conversion here.
@@ -478,7 +507,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
// Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
if err = updatedList.UpdateInstances(updates); err != nil {
- return nil, errors.Wrapf(err, "updating manifest list")
+ return nil, fmt.Errorf("updating manifest list: %w", err)
}
// Iterate through supported list types, preferred format first.
@@ -493,7 +522,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
if thisListType != updatedList.MIMEType() {
attemptedList, err = updatedList.ConvertToMIMEType(thisListType)
if err != nil {
- return nil, errors.Wrapf(err, "converting manifest list to list with MIME type %q", thisListType)
+ return nil, fmt.Errorf("converting manifest list to list with MIME type %q: %w", thisListType, err)
}
}
@@ -501,17 +530,17 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
// by serializing them both so that we can compare them.
attemptedManifestList, err := attemptedList.Serialize()
if err != nil {
- return nil, errors.Wrapf(err, "encoding updated manifest list (%q: %#v)", updatedList.MIMEType(), updatedList.Instances())
+ return nil, fmt.Errorf("encoding updated manifest list (%q: %#v): %w", updatedList.MIMEType(), updatedList.Instances(), err)
}
originalManifestList, err := originalList.Serialize()
if err != nil {
- return nil, errors.Wrapf(err, "encoding original manifest list for comparison (%q: %#v)", originalList.MIMEType(), originalList.Instances())
+ return nil, fmt.Errorf("encoding original manifest list for comparison (%q: %#v): %w", originalList.MIMEType(), originalList.Instances(), err)
}
// If we can't just use the original value, but we have to change it, flag an error.
if !bytes.Equal(attemptedManifestList, originalManifestList) {
- if !canModifyManifestList {
- return nil, errors.Errorf(" manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", thisListType)
+ if cannotModifyManifestListReason != "" {
+ return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason)
}
logrus.Debugf("Manifest list has been updated")
} else {
@@ -536,7 +565,14 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
// Sign the manifest list.
if options.SignBy != "" {
- newSig, err := c.createSignature(manifestList, options.SignBy)
+ newSig, err := c.createSignature(manifestList, options.SignBy, options.SignPassphrase, options.SignIdentity)
+ if err != nil {
+ return nil, err
+ }
+ sigs = append(sigs, newSig)
+ }
+ if options.SignBySigstorePrivateKeyFile != "" {
+ newSig, err := c.createSigstoreSignature(manifestList, options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase, options.SignIdentity)
if err != nil {
return nil, err
}
@@ -544,8 +580,8 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
}
c.Printf("Storing list signatures\n")
- if err := c.dest.PutSignatures(ctx, sigs, nil); err != nil {
- return nil, errors.Wrap(err, "writing signatures")
+ if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil {
+ return nil, fmt.Errorf("writing signatures: %w", err)
}
return manifestList, nil
@@ -559,7 +595,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
multiImage, err := isMultiImage(ctx, unparsedImage)
if err != nil {
// FIXME FIXME: How to name a reference for the sub-image?
- return nil, "", "", errors.Wrapf(err, "determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
+ return nil, "", "", fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err)
}
if multiImage {
return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
@@ -569,11 +605,11 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// (The multiImage check above only matches the MIME type, which we have received anyway.
// Actual parsing of anything should be deferred.)
if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
- return nil, "", "", errors.Wrap(err, "Source image rejected")
+ return nil, "", "", fmt.Errorf("Source image rejected: %w", err)
}
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
if err != nil {
- return nil, "", "", errors.Wrapf(err, "initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
+ return nil, "", "", fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err)
}
// If the destination is a digested reference, make a note of that, determine what digest value we're
@@ -583,22 +619,18 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
if named := c.dest.Reference().DockerReference(); named != nil {
if digested, ok := named.(reference.Digested); ok {
destIsDigestedReference = true
- sourceManifest, _, err := src.Manifest(ctx)
+ matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest())
if err != nil {
- return nil, "", "", errors.Wrapf(err, "reading manifest from source image")
- }
- matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest())
- if err != nil {
- return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest")
+ return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
}
if !matches {
manifestList, _, err := unparsedToplevel.Manifest(ctx)
if err != nil {
- return nil, "", "", errors.Wrapf(err, "reading manifest from source image")
+ return nil, "", "", fmt.Errorf("reading manifest from source image: %w", err)
}
matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
if err != nil {
- return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest")
+ return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
}
if !matches {
return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference")
@@ -611,22 +643,25 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
return nil, "", "", err
}
- var sigs [][]byte
- if options.RemoveSignatures {
- sigs = [][]byte{}
- } else {
- c.Printf("Getting image source signatures\n")
- s, err := src.Signatures(ctx)
- if err != nil {
- return nil, "", "", errors.Wrap(err, "reading signatures")
- }
- sigs = s
+ sigs, err := c.sourceSignatures(ctx, src, options,
+ "Getting image source signatures",
+ "Checking if image destination supports signatures")
+ if err != nil {
+ return nil, "", "", err
}
- if len(sigs) != 0 {
- c.Printf("Checking if image destination supports signatures\n")
- if err := c.dest.SupportsSignatures(ctx); err != nil {
- return nil, "", "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
- }
+
+ // Determine if we're allowed to modify the manifest.
+ // If we can, set to the empty string. If we can't, set to the reason why.
+ // Compare, and perhaps keep in sync with, the version in copyMultipleImages.
+ cannotModifyManifestReason := ""
+ if len(sigs) > 0 {
+ cannotModifyManifestReason = "Would invalidate signatures"
+ }
+ if destIsDigestedReference {
+ cannotModifyManifestReason = "Destination specifies a digest"
+ }
+ if options.PreserveDigests {
+ cannotModifyManifestReason = "Instructed to preserve digests"
}
ic := imageCopier{
@@ -634,16 +669,18 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
src: src,
// diffIDsAreNeeded is computed later
- canModifyManifest: len(sigs) == 0 && !destIsDigestedReference,
- ociEncryptLayers: options.OciEncryptLayers,
- }
- // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
- // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
- // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
- // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
- // that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
- // and we would reuse and sign it.
- ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == ""
+ cannotModifyManifestReason: cannotModifyManifestReason,
+ ociEncryptLayers: options.OciEncryptLayers,
+ }
+ // Decide whether we can substitute blobs with semantic equivalents:
+ // - Don’t do that if we can’t modify the manifest at all
+ // - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
+ // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
+ // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
+ // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
+ // that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
+ // and we would reuse and sign it.
+ ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && options.SignBy == "" && options.SignBySigstorePrivateKeyFile == ""
if err := ic.updateEmbeddedDockerReference(); err != nil {
return nil, "", "", err
@@ -651,21 +688,30 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil
- // We compute preferredManifestMIMEType only to show it in error messages.
- // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
- preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType, destRequiresOciEncryption)
+ manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{
+ srcMIMEType: ic.src.ManifestMIMEType,
+ destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
+ forceManifestMIMEType: options.ForceManifestMIMEType,
+ requiresOCIEncryption: destRequiresOciEncryption,
+ cannotModifyManifestReason: ic.cannotModifyManifestReason,
+ })
if err != nil {
return nil, "", "", err
}
+ // We set up this part of ic.manifestUpdates quite early, not just around the
+ // code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code
+ // (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based
+ // on the expected destination format.
+ if manifestConversionPlan.preferredMIMETypeNeedsConversion {
+ ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType
+ }
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
- // If encrypted and decryption keys provided, we should try to decrypt
- ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || ic.c.ociEncryptConfig != nil
// If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
if options.OptimizeDestinationImageAlreadyExists {
- shouldUpdateSigs := len(sigs) > 0 || options.SignBy != "" // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
+ shouldUpdateSigs := len(sigs) > 0 || options.SignBy != "" || options.SignBySigstorePrivateKeyFile != "" // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
noPendingManifestUpdates := ic.noPendingManifestUpdates()
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates)
@@ -693,32 +739,34 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if
// we're altering how they're compressed. If the process succeeds, fine…
manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
- retManifestType = preferredManifestMIMEType
+ retManifestType = manifestConversionPlan.preferredMIMEType
if err != nil {
- logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
+ logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err)
// … if it fails, and the failure is either because the manifest is rejected by the registry, or
// because we failed to create a manifest of the specified type because the specific manifest type
// doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may
// have other options available that could still succeed.
- _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError)
- _, isCompressionIncompatible := errors.Cause(err).(manifest.ManifestLayerCompressionIncompatibilityError)
- if (!isManifestRejected && !isCompressionIncompatible) || len(otherManifestMIMETypeCandidates) == 0 {
+ var manifestTypeRejectedError types.ManifestTypeRejectedError
+ var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError
+ isManifestRejected := errors.As(err, &manifestTypeRejectedError)
+ isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError)
+ if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 {
// We don’t have other options.
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
// Don’t bother the user with MIME types if we have no choice.
return nil, "", "", err
}
- // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType.
+ // If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType.
// So if we are here, we will definitely be trying to convert the manifest.
- // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason,
+ // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
// so let’s bail out early and with a better error message.
- if !ic.canModifyManifest {
- return nil, "", "", errors.Wrap(err, "Writing manifest failed (and converting it is not possible, image is signed or the destination specifies a digest)")
+ if ic.cannotModifyManifestReason != "" {
+ return nil, "", "", fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err)
}
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
- errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)}
- for _, manifestMIMEType := range otherManifestMIMETypeCandidates {
+ errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)}
+ for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates {
logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
ic.manifestUpdates.ManifestMIMEType = manifestMIMEType
attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
@@ -744,7 +792,14 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
}
if options.SignBy != "" {
- newSig, err := c.createSignature(manifestBytes, options.SignBy)
+ newSig, err := c.createSignature(manifestBytes, options.SignBy, options.SignPassphrase, options.SignIdentity)
+ if err != nil {
+ return nil, "", "", err
+ }
+ sigs = append(sigs, newSig)
+ }
+ if options.SignBySigstorePrivateKeyFile != "" {
+ newSig, err := c.createSigstoreSignature(manifestBytes, options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase, options.SignIdentity)
if err != nil {
return nil, "", "", err
}
@@ -752,8 +807,8 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
}
c.Printf("Storing signatures\n")
- if err := c.dest.PutSignatures(ctx, sigs, targetInstance); err != nil {
- return nil, "", "", errors.Wrap(err, "writing signatures")
+ if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
+ return nil, "", "", fmt.Errorf("writing signatures: %w", err)
}
return manifestBytes, retManifestType, retManifestDigest, nil
@@ -773,11 +828,11 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
if dest.MustMatchRuntimeOS() {
c, err := src.OCIConfig(ctx)
if err != nil {
- return errors.Wrapf(err, "parsing image configuration")
+ return fmt.Errorf("parsing image configuration: %w", err)
}
wantedPlatforms, err := platform.WantedPlatforms(sys)
if err != nil {
- return errors.Wrapf(err, "getting current platform information %#v", sys)
+ return fmt.Errorf("getting current platform information %#v: %w", sys, err)
}
options := newOrderedSet()
@@ -813,9 +868,9 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error {
return nil // No reference embedded in the manifest, or it matches destRef already.
}
- if !ic.canModifyManifest {
- return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which is not possible (image is signed or the destination specifies a digest)",
- transports.ImageName(ic.c.dest.Reference()), destRef.String())
+ if ic.cannotModifyManifestReason != "" {
+ return fmt.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q",
+ transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason)
}
ic.manifestUpdates.EmbeddedDockerReference = destRef
return nil
@@ -833,7 +888,7 @@ func isTTY(w io.Writer) bool {
return false
}
-// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
+// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "".
func (ic *imageCopier) copyLayers(ctx context.Context) error {
srcInfos := ic.src.LayerInfos()
numLayers := len(srcInfos)
@@ -844,8 +899,8 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
srcInfosUpdated := false
// If we only need to check authorization, no updates required.
if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
- if !ic.canModifyManifest {
- return errors.Errorf("Copying this image requires changing layer representation, which is not possible (image is signed or the destination specifies a digest)")
+ if ic.cannotModifyManifestReason != "" {
+ return fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason)
}
srcInfos = updatedSrcInfos
srcInfosUpdated = true
@@ -859,11 +914,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// The manifest is used to extract the information whether a given
// layer is empty.
- manifestBlob, manifestType, err := ic.src.Manifest(ctx)
- if err != nil {
- return err
- }
- man, err := manifest.FromBlob(manifestBlob, manifestType)
+ man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType)
if err != nil {
return err
}
@@ -973,10 +1024,10 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool {
// stores the resulting config and manifest to the destination, and returns the stored manifest
// and its digest.
func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) {
- pendingImage := ic.src
+ var pendingImage types.Image = ic.src
if !ic.noPendingManifestUpdates() {
- if !ic.canModifyManifest {
- return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden")
+ if ic.cannotModifyManifestReason != "" {
+ return nil, "", fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason)
}
if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) {
// We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion.
@@ -985,20 +1036,20 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
// when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2.
// Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now.
// If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates.
- return nil, "", errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
+ return nil, "", fmt.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
}
pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates)
if err != nil {
- return nil, "", errors.Wrap(err, "creating an updated image manifest")
+ return nil, "", fmt.Errorf("creating an updated image manifest: %w", err)
}
pendingImage = pi
}
man, _, err := pendingImage.Manifest(ctx)
if err != nil {
- return nil, "", errors.Wrap(err, "reading manifest")
+ return nil, "", fmt.Errorf("reading manifest: %w", err)
}
- if err := ic.c.copyConfig(ctx, pendingImage); err != nil {
+ if err := ic.copyConfig(ctx, pendingImage); err != nil {
return nil, "", err
}
@@ -1011,129 +1062,47 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
instanceDigest = &manifestDigest
}
if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
- return nil, "", errors.Wrapf(err, "writing manifest %q", string(man))
+ logrus.Debugf("Error %v while writing manifest %q", err, string(man))
+ return nil, "", fmt.Errorf("writing manifest: %w", err)
}
return man, manifestDigest, nil
}
-// newProgressPool creates a *mpb.Progress.
-// The caller must eventually call pool.Wait() after the pool will no longer be updated.
-// NOTE: Every progress bar created within the progress pool must either successfully
-// complete or be aborted, or pool.Wait() will hang. That is typically done
-// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called.
-func (c *copier) newProgressPool() *mpb.Progress {
- return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput))
-}
-
-// customPartialBlobCounter provides a decorator function for the partial blobs retrieval progress bar
-func customPartialBlobCounter(filler interface{}, wcc ...decor.WC) decor.Decorator {
- producer := func(filler interface{}) decor.DecorFunc {
- return func(s decor.Statistics) string {
- if s.Total == 0 {
- pairFmt := "%.1f / %.1f (skipped: %.1f)"
- return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill))
- }
- pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)"
- percentage := 100.0 * float64(s.Refill) / float64(s.Total)
- return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage)
- }
- }
- return decor.Any(producer(filler), wcc...)
-}
-
-// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
-// is ioutil.Discard, the progress bar's output will be discarded
-// NOTE: Every progress bar created within a progress pool must either successfully
-// complete or be aborted, or pool.Wait() will hang. That is typically done
-// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called.
-func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
- // shortDigestLen is the length of the digest used for blobs.
- const shortDigestLen = 12
-
- prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
- // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
- maxPrefixLen := len("Copying blob ") + shortDigestLen
- if len(prefix) > maxPrefixLen {
- prefix = prefix[:maxPrefixLen]
- }
-
- // onComplete will replace prefix once the bar/spinner has completed
- onComplete = prefix + " " + onComplete
-
- // Use a normal progress bar when we know the size (i.e., size > 0).
- // Otherwise, use a spinner to indicate that something's happening.
- var bar *mpb.Bar
- sstyle := mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft()
- if info.Size > 0 {
- if partial {
- bar = pool.AddBar(info.Size,
- mpb.BarFillerClearOnComplete(),
- mpb.PrependDecorators(
- decor.OnComplete(decor.Name(prefix), onComplete),
- ),
- mpb.AppendDecorators(
- customPartialBlobCounter(sstyle.Build()),
- ),
- )
- } else {
- bar = pool.AddBar(info.Size,
- mpb.BarFillerClearOnComplete(),
- mpb.PrependDecorators(
- decor.OnComplete(decor.Name(prefix), onComplete),
- ),
- mpb.AppendDecorators(
- decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
- ),
- )
- }
- } else {
- bar = pool.Add(0,
- sstyle.Build(),
- mpb.BarFillerClearOnComplete(),
- mpb.PrependDecorators(
- decor.OnComplete(decor.Name(prefix), onComplete),
- ),
- )
- }
- if c.progressOutput == ioutil.Discard {
- c.Printf("Copying %s %s\n", kind, info.Digest)
- }
- return bar
-}
-
// copyConfig copies config.json, if any, from src to dest.
-func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
+func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
srcInfo := src.ConfigInfo()
if srcInfo.Digest != "" {
- if err := c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
+ if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
// This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
return fmt.Errorf("copying config: %w", err)
}
- defer c.concurrentBlobCopiesSemaphore.Release(1)
-
- configBlob, err := src.ConfigBlob(ctx)
- if err != nil {
- return errors.Wrapf(err, "reading config blob %s", srcInfo.Digest)
- }
+ defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
- progressPool := c.newProgressPool()
+ progressPool := ic.c.newProgressPool()
defer progressPool.Wait()
- bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done")
+ bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
defer bar.Abort(false)
+ ic.c.printCopyInfo("config", srcInfo)
+
+ configBlob, err := src.ConfigBlob(ctx)
+ if err != nil {
+ return types.BlobInfo{}, fmt.Errorf("reading config blob %s: %w", srcInfo.Digest, err)
+ }
- destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false)
+ destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false)
if err != nil {
return types.BlobInfo{}, err
}
- bar.SetTotal(int64(len(configBlob)), true)
+
+ bar.mark100PercentComplete()
return destInfo, nil
}()
if err != nil {
return err
}
if destInfo.Digest != srcInfo.Digest {
- return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
+ return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
}
}
return nil
@@ -1170,49 +1139,44 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
}
}
- cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
- // Diffs are needed if we are encrypting an image or trying to decrypt an image
- diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
+ ic.c.printCopyInfo("blob", srcInfo)
- // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
- if !diffIDIsNeeded {
+ cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
+ diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
+ // When encrypting to decrypting, only use the simple code path. We might be able to optimize more
+ // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again),
+ // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not.
+ encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
+ canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting
+
+ // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable.
+ if canAvoidProcessingCompleteLayer {
+ canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType)
+ logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v",
+ srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression)
+ canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType)
// TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
// that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
// a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
// the ImageDestination interface lets us pass in.
- var (
- blobInfo types.BlobInfo
- reused bool
- err error
- )
- // Note: the storage destination optimizes the committing of
- // layers which requires passing the index of the layer.
- // Hence, we need to special case and cast.
- dest, ok := ic.c.dest.(internalTypes.ImageDestinationWithOptions)
- if ok {
- options := internalTypes.TryReusingBlobOptions{
- Cache: ic.c.blobInfoCache,
- CanSubstitute: ic.canSubstituteBlobs,
- SrcRef: srcRef,
- EmptyLayer: emptyLayer,
- }
- options.LayerIndex = &layerIndex
- reused, blobInfo, err = dest.TryReusingBlobWithOptions(ctx, srcInfo, options)
- } else {
- reused, blobInfo, err = ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
- }
-
+ reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
+ Cache: ic.c.blobInfoCache,
+ CanSubstitute: canSubstitute,
+ EmptyLayer: emptyLayer,
+ LayerIndex: &layerIndex,
+ SrcRef: srcRef,
+ })
if err != nil {
- return types.BlobInfo{}, "", errors.Wrapf(err, "trying to reuse blob %s at destination", srcInfo.Digest)
+ return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)
}
if reused {
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
func() { // A scope for defer
- bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists")
+ bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: blobInfo.Digest, Size: 0}, "blob", "skipped: already exists")
defer bar.Abort(false)
- bar.SetTotal(0, true)
+ bar.mark100PercentComplete()
}()
// Throw an event that the layer has been skipped
@@ -1243,9 +1207,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// of the source file are not known yet and must be fetched.
// Attempt a partial only when the source allows to retrieve a blob partially and
// the destination has support for it.
- imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable)
- imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial)
- if okSource && okDest && !diffIDIsNeeded {
+ if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() {
if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
hideProgressBar := true
@@ -1253,33 +1215,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
bar.Abort(hideProgressBar)
}()
- progress := make(chan int64)
- terminate := make(chan interface{})
-
- defer close(terminate)
- defer close(progress)
-
- proxy := imageSourceSeekableProxy{
- source: imgSource,
- progress: progress,
+ proxy := blobChunkAccessorProxy{
+ wrapped: ic.c.rawSource,
+ bar: bar,
}
- go func() {
- for {
- select {
- case written := <-progress:
- bar.IncrInt64(written)
- case <-terminate:
- return
- }
- }
- }()
-
- bar.SetTotal(srcInfo.Size, false)
- info, err := imgDest.PutBlobPartial(ctx, proxy, srcInfo, ic.c.blobInfoCache)
+ info, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
if err == nil {
- bar.SetRefill(srcInfo.Size - bar.Current())
- bar.SetCurrent(srcInfo.Size)
- bar.SetTotal(srcInfo.Size, true)
+ if srcInfo.Size != -1 {
+ bar.SetRefill(srcInfo.Size - bar.Current())
+ }
+ bar.mark100PercentComplete()
hideProgressBar = false
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
return true, info
@@ -1292,16 +1237,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
}
// Fallback: copy the layer, computing the diffID if we need to do so
- srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
- if err != nil {
- return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
- }
- defer srcStream.Close()
-
return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
defer bar.Abort(false)
+ srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
+ if err != nil {
+ return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err)
+ }
+ defer srcStream.Close()
+
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer)
if err != nil {
return types.BlobInfo{}, "", err
@@ -1314,17 +1259,25 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
return types.BlobInfo{}, "", ctx.Err()
case diffIDResult := <-diffIDChan:
if diffIDResult.err != nil {
- return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID")
+ return types.BlobInfo{}, "", fmt.Errorf("computing layer DiffID: %w", diffIDResult.err)
}
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
- // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
- // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
- ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
+ // Don’t record any associations that involve encrypted data. This is a bit crude,
+ // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
+ // might be safe, but it’s not trivially obvious, so let’s be conservative for now.
+ // This crude approach also means we don’t need to record whether a blob is encrypted
+ // in the blob info cache (which would probably be necessary for any more complex logic),
+ // and the simplicity is attractive.
+ if !encryptingOrDecrypting {
+ // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
+ // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
+ ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
+ }
diffID = diffIDResult.digest
}
}
- bar.SetTotal(srcInfo.Size, true)
+ bar.mark100PercentComplete()
return blobInfo, diffID, nil
}()
}
@@ -1334,7 +1287,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// perhaps (de/re/)compressing the stream,
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
- diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
+ diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil
var diffIDChan chan diffIDResult
@@ -1359,7 +1312,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
}
}
- blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success
+ blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success
return blobInfo, diffIDChan, err
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
}
@@ -1389,351 +1342,3 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF
return digest.Canonical.FromReader(stream)
}
-
-// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read.
-// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination.
-type errorAnnotationReader struct {
- reader io.Reader
-}
-
-// Read annotates the error happened during read
-func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
- n, err = r.reader.Read(b)
- if err != io.EOF {
- return n, errors.Wrapf(err, "happened during read")
- }
- return n, err
-}
-
-// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
-// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
-// perhaps (de/re/)compressing it if canModifyBlob,
-// and returns a complete blobInfo of the copied blob.
-func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
- getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
- canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
- if isConfig { // This is guaranteed by the caller, but set it here to be explicit.
- canModifyBlob = false
- }
-
- // The copying happens through a pipeline of connected io.Readers.
- // === Input: srcStream
-
- // === Process input through digestingReader to validate against the expected digest.
- // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
- // use a separate validation failure indicator.
- // Note that for this check we don't use the stronger "validationSucceeded" indicator, because
- // dest.PutBlob may detect that the layer already exists, in which case we don't
- // read stream to the end, and validation does not happen.
- digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest)
- }
- var destStream io.Reader = digestingReader
-
- // === Decrypt the stream, if required.
- var decrypted bool
- if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil {
- newDesc := imgspecv1.Descriptor{
- Annotations: srcInfo.Annotations,
- }
-
- var d digest.Digest
- destStream, d, err = ocicrypt.DecryptLayer(c.ociDecryptConfig, destStream, newDesc, false)
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "decrypting layer %s", srcInfo.Digest)
- }
-
- srcInfo.Digest = d
- srcInfo.Size = -1
- for k := range srcInfo.Annotations {
- if strings.HasPrefix(k, "org.opencontainers.image.enc") {
- delete(srcInfo.Annotations, k)
- }
- }
- decrypted = true
- }
-
- // === Detect compression of the input stream.
- // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
- compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
- }
- isCompressed := decompressor != nil
- if expectedCompressionFormat, known := expectedCompressionFormats[srcInfo.MediaType]; known && isCompressed && compressionFormat.Name() != expectedCompressionFormat.Name() {
- logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name())
- }
-
- // === Update progress bars
- destStream = bar.ProxyReader(destStream)
-
- // === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
- var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
- if getOriginalLayerCopyWriter != nil {
- destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor))
- originalLayerReader = destStream
- }
-
- compressionMetadata := map[string]string{}
- // === Deal with layer compression/decompression if necessary
- // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
- // short-circuit conditions
- var inputInfo types.BlobInfo
- var compressionOperation types.LayerCompression
- var uploadCompressionFormat *compressiontypes.Algorithm
- srcCompressorName := internalblobinfocache.Uncompressed
- if isCompressed {
- srcCompressorName = compressionFormat.Name()
- }
- var uploadCompressorName string
- if canModifyBlob && isOciEncrypted(srcInfo.MediaType) {
- // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
- logrus.Debugf("Using original blob without modification for encrypted blob")
- compressionOperation = types.PreserveOriginal
- inputInfo = srcInfo
- srcCompressorName = internalblobinfocache.UnknownCompression
- uploadCompressionFormat = nil
- uploadCompressorName = internalblobinfocache.UnknownCompression
- } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
- logrus.Debugf("Compressing blob on the fly")
- compressionOperation = types.Compress
- pipeReader, pipeWriter := io.Pipe()
- defer pipeReader.Close()
-
- if c.compressionFormat != nil {
- uploadCompressionFormat = c.compressionFormat
- } else {
- uploadCompressionFormat = defaultCompressionFormat
- }
- // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
- // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
- // we don’t care.
- go c.compressGoroutine(pipeWriter, destStream, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter
- destStream = pipeReader
- inputInfo.Digest = ""
- inputInfo.Size = -1
- uploadCompressorName = uploadCompressionFormat.Name()
- } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed &&
- c.compressionFormat != nil && c.compressionFormat.Name() != compressionFormat.Name() {
- // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
- // re-compressed using the desired format.
- logrus.Debugf("Blob will be converted")
-
- compressionOperation = types.PreserveOriginal
- s, err := decompressor(destStream)
- if err != nil {
- return types.BlobInfo{}, err
- }
- defer s.Close()
-
- pipeReader, pipeWriter := io.Pipe()
- defer pipeReader.Close()
-
- uploadCompressionFormat = c.compressionFormat
- go c.compressGoroutine(pipeWriter, s, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter
-
- destStream = pipeReader
- inputInfo.Digest = ""
- inputInfo.Size = -1
- uploadCompressorName = uploadCompressionFormat.Name()
- } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
- logrus.Debugf("Blob will be decompressed")
- compressionOperation = types.Decompress
- s, err := decompressor(destStream)
- if err != nil {
- return types.BlobInfo{}, err
- }
- defer s.Close()
- destStream = s
- inputInfo.Digest = ""
- inputInfo.Size = -1
- uploadCompressionFormat = nil
- uploadCompressorName = internalblobinfocache.Uncompressed
- } else {
- // PreserveOriginal might also need to recompress the original blob if the desired compression format is different.
- logrus.Debugf("Using original blob without modification")
- compressionOperation = types.PreserveOriginal
- inputInfo = srcInfo
- // Remember if the original blob was compressed, and if so how, so that if
- // LayerInfosForCopy() returned something that differs from what was in the
- // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(),
- // it will be able to correctly derive the MediaType for the copied blob.
- if isCompressed {
- uploadCompressionFormat = &compressionFormat
- } else {
- uploadCompressionFormat = nil
- }
- uploadCompressorName = srcCompressorName
- }
-
- // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided
- var (
- encrypted bool
- finalizer ocicrypt.EncryptLayerFinalizer
- )
- if toEncrypt {
- if decrypted {
- return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy")
- }
-
- if !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil {
- var annotations map[string]string
- if !decrypted {
- annotations = srcInfo.Annotations
- }
- desc := imgspecv1.Descriptor{
- MediaType: srcInfo.MediaType,
- Digest: srcInfo.Digest,
- Size: srcInfo.Size,
- Annotations: annotations,
- }
-
- s, fin, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, destStream, desc)
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "encrypting blob %s", srcInfo.Digest)
- }
-
- destStream = s
- finalizer = fin
- inputInfo.Digest = ""
- inputInfo.Size = -1
- encrypted = true
- }
- }
-
- // === Report progress using the c.progress channel, if required.
- if c.progress != nil && c.progressInterval > 0 {
- progressReader := newProgressReader(
- destStream,
- c.progress,
- c.progressInterval,
- srcInfo,
- )
- defer progressReader.reportDone()
- destStream = progressReader
- }
-
- // === Finally, send the layer stream to dest.
- var uploadedInfo types.BlobInfo
- // Note: the storage destination optimizes the committing of layers
- // which requires passing the index of the layer. Hence, we need to
- // special case and cast.
- dest, ok := c.dest.(internalTypes.ImageDestinationWithOptions)
- if ok {
- options := internalTypes.PutBlobOptions{
- Cache: c.blobInfoCache,
- IsConfig: isConfig,
- EmptyLayer: emptyLayer,
- }
- if !isConfig {
- options.LayerIndex = &layerIndex
- }
- uploadedInfo, err = dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options)
- } else {
- uploadedInfo, err = c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig)
- }
- if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "writing blob")
- }
-
- uploadedInfo.Annotations = srcInfo.Annotations
-
- uploadedInfo.CompressionOperation = compressionOperation
- // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
- uploadedInfo.CompressionAlgorithm = uploadCompressionFormat
- if decrypted {
- uploadedInfo.CryptoOperation = types.Decrypt
- } else if encrypted {
- encryptAnnotations, err := finalizer()
- if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "Unable to finalize encryption")
- }
- uploadedInfo.CryptoOperation = types.Encrypt
- if uploadedInfo.Annotations == nil {
- uploadedInfo.Annotations = map[string]string{}
- }
- for k, v := range encryptAnnotations {
- uploadedInfo.Annotations[k] = v
- }
- }
-
- // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume
- // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
- // So, read everything from originalLayerReader, which will cause the rest to be
- // sent there if we are not already at EOF.
- if getOriginalLayerCopyWriter != nil {
- logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
- _, err := io.Copy(ioutil.Discard, originalLayerReader)
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
- }
- }
-
- if digestingReader.validationFailed { // Coverage: This should never happen.
- return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
- }
- if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
- return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
- }
- if digestingReader.validationSucceeded {
- // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
- // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
- // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
- // (because inputInfo.Digest == "", this must have been computed afresh).
- switch compressionOperation {
- case types.PreserveOriginal:
- break // Do nothing, we have only one digest and we might not have even verified it.
- case types.Compress:
- c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
- case types.Decompress:
- c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
- default:
- return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
- }
- if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression {
- c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName)
- }
- if srcInfo.Digest != "" && srcCompressorName != "" && srcCompressorName != internalblobinfocache.UnknownCompression {
- c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName)
- }
- }
-
- // Copy all the metadata generated by the compressor into the annotations.
- if uploadedInfo.Annotations == nil {
- uploadedInfo.Annotations = map[string]string{}
- }
- for k, v := range compressionMetadata {
- uploadedInfo.Annotations[k] = v
- }
-
- return uploadedInfo, nil
-}
-
-// doCompression reads all input from src and writes its compressed equivalent to dest.
-func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error {
- compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
- if err != nil {
- return err
- }
-
- buf := make([]byte, compressionBufferSize)
-
- _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
- if err != nil {
- compressor.Close()
- return err
- }
-
- return compressor.Close()
-}
-
-// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
-func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
- err := errors.New("Internal error: unexpected panic in compressGoroutine")
- defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
- _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
- }()
-
- err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel)
-}
diff --git a/vendor/github.com/containers/image/v5/copy/digesting_reader.go b/vendor/github.com/containers/image/v5/copy/digesting_reader.go
index ccc9110ff90..901d10826f7 100644
--- a/vendor/github.com/containers/image/v5/copy/digesting_reader.go
+++ b/vendor/github.com/containers/image/v5/copy/digesting_reader.go
@@ -1,11 +1,11 @@
package copy
import (
+ "fmt"
"hash"
"io"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
type digestingReader struct {
@@ -23,11 +23,11 @@ type digestingReader struct {
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
var digester digest.Digester
if err := expectedDigest.Validate(); err != nil {
- return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
+ return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest)
}
digestAlgorithm := expectedDigest.Algorithm()
if !digestAlgorithm.Available() {
- return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
+ return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
}
digester = digestAlgorithm.Digester()
@@ -47,14 +47,14 @@ func (d *digestingReader) Read(p []byte) (int, error) {
// Coverage: This should not happen, the hash.Hash interface requires
// d.digest.Write to never return an error, and the io.Writer interface
// requires n2 == len(input) if no error is returned.
- return 0, errors.Wrapf(err, "updating digest during verification: %d vs. %d", n2, n)
+ return 0, fmt.Errorf("updating digest during verification: %d vs. %d: %w", n2, n, err)
}
}
if err == io.EOF {
actualDigest := d.digester.Digest()
if actualDigest != d.expectedDigest {
d.validationFailed = true
- return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
+ return 0, fmt.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
}
d.validationSucceeded = true
}
diff --git a/vendor/github.com/containers/image/v5/copy/encrypt.go b/vendor/github.com/containers/image/v5/copy/encrypt.go
deleted file mode 100644
index a18d6f1518f..00000000000
--- a/vendor/github.com/containers/image/v5/copy/encrypt.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package copy
-
-import (
- "strings"
-
- "github.com/containers/image/v5/types"
-)
-
-// isOciEncrypted returns a bool indicating if a mediatype is encrypted
-// This function will be moved to be part of OCI spec when adopted.
-func isOciEncrypted(mediatype string) bool {
- return strings.HasSuffix(mediatype, "+encrypted")
-}
-
-// isEncrypted checks if an image is encrypted
-func isEncrypted(i types.Image) bool {
- layers := i.LayerInfos()
- for _, l := range layers {
- if isOciEncrypted(l.MediaType) {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go
new file mode 100644
index 00000000000..5eae8bfcda2
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/encryption.go
@@ -0,0 +1,129 @@
+package copy
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/types"
+ "github.com/containers/ocicrypt"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// isOciEncrypted returns a bool indicating if a mediatype is encrypted
+// This function will be moved to be part of OCI spec when adopted.
+func isOciEncrypted(mediatype string) bool {
+ return strings.HasSuffix(mediatype, "+encrypted")
+}
+
+// isEncrypted checks if an image is encrypted
+func isEncrypted(i types.Image) bool {
+ layers := i.LayerInfos()
+ for _, l := range layers {
+ if isOciEncrypted(l.MediaType) {
+ return true
+ }
+ }
+ return false
+}
+
+// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step.
+type bpDecryptionStepData struct {
+ decrypting bool // We are actually decrypting the stream
+}
+
+// blobPipelineDecryptionStep updates *stream to decrypt if, it necessary.
+// srcInfo is only used for error messages.
+// Returns data for other steps; the caller should eventually use updateCryptoOperation.
+func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) {
+ if isOciEncrypted(stream.info.MediaType) && c.ociDecryptConfig != nil {
+ desc := imgspecv1.Descriptor{
+ Annotations: stream.info.Annotations,
+ }
+ reader, decryptedDigest, err := ocicrypt.DecryptLayer(c.ociDecryptConfig, stream.reader, desc, false)
+ if err != nil {
+ return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
+ }
+
+ stream.reader = reader
+ stream.info.Digest = decryptedDigest
+ stream.info.Size = -1
+ for k := range stream.info.Annotations {
+ if strings.HasPrefix(k, "org.opencontainers.image.enc") {
+ delete(stream.info.Annotations, k)
+ }
+ }
+ return &bpDecryptionStepData{
+ decrypting: true,
+ }, nil
+ }
+ return &bpDecryptionStepData{
+ decrypting: false,
+ }, nil
+}
+
+// updateCryptoOperation sets *operation, if necessary.
+func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypto) {
+ if d.decrypting {
+ *operation = types.Decrypt
+ }
+}
+
+// bpdData contains data that the copy pipeline needs about the encryption step.
+type bpEncryptionStepData struct {
+ encrypting bool // We are actually encrypting the stream
+ finalizer ocicrypt.EncryptLayerFinalizer
+}
+
+// blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt.
+// srcInfo is primarily used for error messages.
+// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations.
+func (c *copier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo,
+ decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) {
+ if toEncrypt && !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil {
+ var annotations map[string]string
+ if !decryptionStep.decrypting {
+ annotations = srcInfo.Annotations
+ }
+ desc := imgspecv1.Descriptor{
+ MediaType: srcInfo.MediaType,
+ Digest: srcInfo.Digest,
+ Size: srcInfo.Size,
+ Annotations: annotations,
+ }
+ reader, finalizer, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, stream.reader, desc)
+ if err != nil {
+ return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err)
+ }
+
+ stream.reader = reader
+ stream.info.Digest = ""
+ stream.info.Size = -1
+ return &bpEncryptionStepData{
+ encrypting: true,
+ finalizer: finalizer,
+ }, nil
+ }
+ return &bpEncryptionStepData{
+ encrypting: false,
+ }, nil
+}
+
+// updateCryptoOperationAndAnnotations sets *operation and updates *annotations, if necessary.
+func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *types.LayerCrypto, annotations *map[string]string) error {
+ if !d.encrypting {
+ return nil
+ }
+
+ encryptAnnotations, err := d.finalizer()
+ if err != nil {
+ return fmt.Errorf("Unable to finalize encryption: %w", err)
+ }
+ *operation = types.Encrypt
+ if *annotations == nil {
+ *annotations = map[string]string{}
+ }
+ for k, v := range encryptAnnotations {
+ (*annotations)[k] = v
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go
index b97edbf08b7..df12e50c0f6 100644
--- a/vendor/github.com/containers/image/v5/copy/manifest.go
+++ b/vendor/github.com/containers/image/v5/copy/manifest.go
@@ -2,11 +2,12 @@ package copy
import (
"context"
+ "errors"
+ "fmt"
"strings"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -38,31 +39,50 @@ func (os *orderedSet) append(s string) {
}
}
-// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest.
-// Note that the conversion will only happen later, through ic.src.UpdatedImage
-// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified),
-// and a list of other possible alternatives, in order.
-func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string, requiresOciEncryption bool) (string, []string, error) {
- _, srcType, err := ic.src.Manifest(ctx)
- if err != nil { // This should have been cached?!
- return "", nil, errors.Wrap(err, "reading manifest")
- }
+// determineManifestConversionInputs contains the inputs for determineManifestConversion.
+type determineManifestConversionInputs struct {
+ srcMIMEType string // MIME type of the input manifest
+
+ destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes()
+
+ forceManifestMIMEType string // User’s choice of forced manifest MIME type
+ requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption
+ cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
+}
+
+// manifestConversionPlan contains the decisions made by determineManifestConversion.
+type manifestConversionPlan struct {
+ // The preferred manifest MIME type (whether we are converting to it or using it unmodified).
+ // We compute this only to show it in error messages; without having to add this context
+ // in an error message, we would be happy enough to know only that no conversion is needed.
+ preferredMIMEType string
+ preferredMIMETypeNeedsConversion bool // True if using preferredMIMEType requires a conversion step.
+ otherMIMETypeCandidates []string // Other possible alternatives, in order
+}
+
+// determineManifestConversion returns a plan for what formats, and possibly conversions, to use based on in.
+func determineManifestConversion(in determineManifestConversionInputs) (manifestConversionPlan, error) {
+ srcType := in.srcMIMEType
normalizedSrcType := manifest.NormalizedMIMEType(srcType)
if srcType != normalizedSrcType {
logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType)
srcType = normalizedSrcType
}
- if forceManifestMIMEType != "" {
- destSupportedManifestMIMETypes = []string{forceManifestMIMEType}
+ destSupportedManifestMIMETypes := in.destSupportedManifestMIMETypes
+ if in.forceManifestMIMEType != "" {
+ destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType}
}
- if len(destSupportedManifestMIMETypes) == 0 && (!requiresOciEncryption || manifest.MIMETypeSupportsEncryption(srcType)) {
- return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions.
+ if len(destSupportedManifestMIMETypes) == 0 && (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) {
+ return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions.
+ preferredMIMEType: srcType,
+ otherMIMETypeCandidates: []string{},
+ }, nil
}
supportedByDest := map[string]struct{}{}
for _, t := range destSupportedManifestMIMETypes {
- if !requiresOciEncryption || manifest.MIMETypeSupportsEncryption(t) {
+ if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) {
supportedByDest[t] = struct{}{}
}
}
@@ -79,13 +99,16 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp
if _, ok := supportedByDest[srcType]; ok {
prioritizedTypes.append(srcType)
}
- if !ic.canModifyManifest {
- // We could also drop the !ic.canModifyManifest check and have the caller
+ if in.cannotModifyManifestReason != "" {
+ // We could also drop this check and have the caller
// make the choice; it is already doing that to an extent, to improve error
- // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion”
+ // messages. But it is nice to hide the “if we can't modify, do no conversion”
// special case in here; the caller can then worry (or not) only about a good UI.
logrus.Debugf("We can't modify the manifest, hoping for the best...")
- return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying?
+ return manifestConversionPlan{ // Take our chances - FIXME? Or should we fail without trying?
+ preferredMIMEType: srcType,
+ otherMIMETypeCandidates: []string{},
+ }, nil
}
// Then use our list of preferred types.
@@ -102,15 +125,17 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp
logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", "))
if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen.
- return "", nil, errors.New("Internal error: no candidate MIME types")
+ return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types")
}
- preferredType := prioritizedTypes.list[0]
- if preferredType != srcType {
- ic.manifestUpdates.ManifestMIMEType = preferredType
- } else {
+ res := manifestConversionPlan{
+ preferredMIMEType: prioritizedTypes.list[0],
+ otherMIMETypeCandidates: prioritizedTypes.list[1:],
+ }
+ res.preferredMIMETypeNeedsConversion = res.preferredMIMEType != srcType
+ if !res.preferredMIMETypeNeedsConversion {
logrus.Debugf("... will first try using the original manifest unmodified")
}
- return preferredType, prioritizedTypes.list[1:], nil
+ return res, nil
}
// isMultiImage returns true if img is a list of images
@@ -156,7 +181,7 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport
logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
if len(prioritizedTypes.list) == 0 {
- return "", nil, errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
+ return "", nil, fmt.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
}
selectedType := prioritizedTypes.list[0]
otherSupportedTypes := prioritizedTypes.list[1:]
diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go
new file mode 100644
index 00000000000..85676f01c6b
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go
@@ -0,0 +1,155 @@
+package copy
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+ "github.com/vbauerster/mpb/v7"
+ "github.com/vbauerster/mpb/v7/decor"
+)
+
+// newProgressPool creates a *mpb.Progress.
+// The caller must eventually call pool.Wait() after the pool will no longer be updated.
+// NOTE: Every progress bar created within the progress pool must either successfully
+// complete or be aborted, or pool.Wait() will hang. That is typically done
+// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called.
+func (c *copier) newProgressPool() *mpb.Progress {
+ return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput))
+}
+
+// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar
+func customPartialBlobDecorFunc(s decor.Statistics) string {
+ if s.Total == 0 {
+ pairFmt := "%.1f / %.1f (skipped: %.1f)"
+ return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill))
+ }
+ pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)"
+ percentage := 100.0 * float64(s.Refill) / float64(s.Total)
+ return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage)
+}
+
+// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods.
+type progressBar struct {
+ *mpb.Bar
+ originalSize int64 // or -1 if unknown
+}
+
+// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter
+// is io.Discard, the progress bar's output will be discarded. Callers may call printCopyInfo()
+// to print a single line instead.
+//
+// NOTE: Every progress bar created within a progress pool must either successfully
+// complete or be aborted, or pool.Wait() will hang. That is typically done
+// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called.
+//
+// As a convention, most users of progress bars should call mark100PercentComplete on full success;
+// by convention, we don't leave progress bars in partial state when fully done
+// (even if we copied much less data than anticipated).
+func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar {
+ // shortDigestLen is the length of the digest used for blobs.
+ const shortDigestLen = 12
+
+ prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
+ // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
+ maxPrefixLen := len("Copying blob ") + shortDigestLen
+ if len(prefix) > maxPrefixLen {
+ prefix = prefix[:maxPrefixLen]
+ }
+
+ // onComplete will replace prefix once the bar/spinner has completed
+ onComplete = prefix + " " + onComplete
+
+ // Use a normal progress bar when we know the size (i.e., size > 0).
+ // Otherwise, use a spinner to indicate that something's happening.
+ var bar *mpb.Bar
+ if info.Size > 0 {
+ if partial {
+ bar = pool.AddBar(info.Size,
+ mpb.BarFillerClearOnComplete(),
+ mpb.PrependDecorators(
+ decor.OnComplete(decor.Name(prefix), onComplete),
+ ),
+ mpb.AppendDecorators(
+ decor.Any(customPartialBlobDecorFunc),
+ ),
+ )
+ } else {
+ bar = pool.AddBar(info.Size,
+ mpb.BarFillerClearOnComplete(),
+ mpb.PrependDecorators(
+ decor.OnComplete(decor.Name(prefix), onComplete),
+ ),
+ mpb.AppendDecorators(
+ decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
+ ),
+ )
+ }
+ } else {
+ bar = pool.New(0,
+ mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(),
+ mpb.BarFillerClearOnComplete(),
+ mpb.PrependDecorators(
+ decor.OnComplete(decor.Name(prefix), onComplete),
+ ),
+ )
+ }
+ return &progressBar{
+ Bar: bar,
+ originalSize: info.Size,
+ }
+}
+
+// printCopyInfo prints a "Copying ..." message on the copier if the output is
+// set to `io.Discard`. In that case, the progress bars won't be rendered but
+// we still want to indicate when blobs and configs are copied.
+func (c *copier) printCopyInfo(kind string, info types.BlobInfo) {
+ if c.progressOutput == io.Discard {
+ c.Printf("Copying %s %s\n", kind, info.Digest)
+ }
+}
+
+// mark100PercentComplete marks the progres bars as 100% complete;
+// it may do so by possibly advancing the current state if it is below the known total.
+func (bar *progressBar) mark100PercentComplete() {
+ if bar.originalSize > 0 {
+ // We can't call bar.SetTotal even if we wanted to; the total can not be changed
+ // after a progress bar is created with a definite total.
+ bar.SetCurrent(bar.originalSize) // This triggers the completion condition.
+ } else {
+ // -1 = unknown size
+ // 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known
+ // size (possible at least in theory), in mpb, zero-sized progress bars are treated
+ // as unknown size, in particular they are not configured to be marked as
+ // complete on bar.Current() reaching bar.total (because that would happen already
+ // when creating the progress bar).
+ // That means that we are both _allowed_ to call SetTotal, and we _have to_.
+ bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete.
+ }
+}
+
+// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar
+// with the number of received bytes.
+type blobChunkAccessorProxy struct {
+ wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor
+ bar *progressBar // A progress bar updated with the number of bytes read so far
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
+ if err == nil {
+ total := int64(0)
+ for _, c := range chunks {
+ total += int64(c.Length)
+ }
+ s.bar.IncrInt64(total)
+ }
+ return rc, errs, err
+}
diff --git a/vendor/github.com/containers/image/v5/copy/progress_reader.go b/vendor/github.com/containers/image/v5/copy/progress_channel.go
similarity index 68%
rename from vendor/github.com/containers/image/v5/copy/progress_reader.go
rename to vendor/github.com/containers/image/v5/copy/progress_channel.go
index 42f490d326a..d5e9e09bda9 100644
--- a/vendor/github.com/containers/image/v5/copy/progress_reader.go
+++ b/vendor/github.com/containers/image/v5/copy/progress_channel.go
@@ -1,15 +1,13 @@
package copy
import (
- "context"
"io"
"time"
- internalTypes "github.com/containers/image/v5/internal/types"
"github.com/containers/image/v5/types"
)
-// progressReader is a reader that reports its progress on an interval.
+// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval.
type progressReader struct {
source io.Reader
channel chan<- types.ProgressProperties
@@ -79,26 +77,3 @@ func (r *progressReader) Read(p []byte) (int, error) {
}
return n, err
}
-
-// imageSourceSeekableProxy wraps ImageSourceSeekable and keeps track of how many bytes
-// are received.
-type imageSourceSeekableProxy struct {
- // source is the seekable input to read from.
- source internalTypes.ImageSourceSeekable
- // progress is the chan where the total number of bytes read so far are reported.
- progress chan int64
-}
-
-// GetBlobAt reads from the ImageSourceSeekable and report how many bytes were received
-// to the progress chan.
-func (s imageSourceSeekableProxy) GetBlobAt(ctx context.Context, bInfo types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
- rc, errs, err := s.source.GetBlobAt(ctx, bInfo, chunks)
- if err == nil {
- total := int64(0)
- for _, c := range chunks {
- total += int64(c.Length)
- }
- s.progress <- total
- }
- return rc, errs, err
-}
diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go
index 61612a4d3d7..6c3d9d62cad 100644
--- a/vendor/github.com/containers/image/v5/copy/sign.go
+++ b/vendor/github.com/containers/image/v5/copy/sign.go
@@ -1,31 +1,89 @@
package copy
import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/private"
+ internalsig "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/signature/sigstore"
"github.com/containers/image/v5/transports"
- "github.com/pkg/errors"
)
+// sourceSignatures returns signatures from unparsedSource based on options,
+// and verifies that they can be used (to avoid copying a large image when we
+// can tell in advance that it would ultimately fail)
+func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage, options *Options,
+ gettingSignaturesMessage, checkingDestMessage string) ([]internalsig.Signature, error) {
+ var sigs []internalsig.Signature
+ if options.RemoveSignatures {
+ sigs = []internalsig.Signature{}
+ } else {
+ c.Printf("%s\n", gettingSignaturesMessage)
+ s, err := unparsed.UntrustedSignatures(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("reading signatures: %w", err)
+ }
+ sigs = s
+ }
+ if len(sigs) != 0 {
+ c.Printf("%s\n", checkingDestMessage)
+ if err := c.dest.SupportsSignatures(ctx); err != nil {
+ return nil, fmt.Errorf("Can not copy signatures to %s: %w", transports.ImageName(c.dest.Reference()), err)
+ }
+ }
+ return sigs, nil
+}
+
// createSignature creates a new signature of manifest using keyIdentity.
-func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) {
+func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string, identity reference.Named) (internalsig.Signature, error) {
mech, err := signature.NewGPGSigningMechanism()
if err != nil {
- return nil, errors.Wrap(err, "initializing GPG")
+ return nil, fmt.Errorf("initializing GPG: %w", err)
}
defer mech.Close()
if err := mech.SupportsSigning(); err != nil {
- return nil, errors.Wrap(err, "Signing not supported")
+ return nil, fmt.Errorf("Signing not supported: %w", err)
}
- dockerReference := c.dest.Reference().DockerReference()
- if dockerReference == nil {
- return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
+ if identity != nil {
+ if reference.IsNameOnly(identity) {
+ return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity)
+ }
+ } else {
+ identity = c.dest.Reference().DockerReference()
+ if identity == nil {
+ return nil, fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
+ }
+ }
+
+ c.Printf("Signing manifest using simple signing\n")
+ newSig, err := signature.SignDockerManifestWithOptions(manifest, identity.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase})
+ if err != nil {
+ return nil, fmt.Errorf("creating signature: %w", err)
+ }
+ return internalsig.SimpleSigningFromBlob(newSig), nil
+}
+
+// createSigstoreSignature creates a new sigstore signature of manifest using privateKeyFile and identity.
+func (c *copier) createSigstoreSignature(manifest []byte, privateKeyFile string, passphrase []byte, identity reference.Named) (internalsig.Signature, error) {
+ if identity != nil {
+ if reference.IsNameOnly(identity) {
+ return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity.String())
+ }
+ } else {
+ identity = c.dest.Reference().DockerReference()
+ if identity == nil {
+ return nil, fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
+ }
}
- c.Printf("Signing manifest\n")
- newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity)
+ c.Printf("Signing manifest using a sigstore signature\n")
+ newSig, err := sigstore.SignDockerManifestWithPrivateKeyFileUnstable(manifest, identity, privateKeyFile, passphrase)
if err != nil {
- return nil, errors.Wrap(err, "creating signature")
+ return nil, fmt.Errorf("creating signature: %w", err)
}
return newSig, nil
}
diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go
index ea20e7c5e41..47a5c17cd55 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go
@@ -2,16 +2,20 @@ package directory
import (
"context"
+ "errors"
+ "fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -22,47 +26,50 @@ const version = "Directory Transport Version: 1.1\n"
var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data")
type dirImageDestination struct {
- ref dirReference
- desiredLayerCompression types.LayerCompression
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.AlwaysSupportsSignatures
+
+ ref dirReference
}
// newImageDestination returns an ImageDestination for writing to a directory.
-func newImageDestination(sys *types.SystemContext, ref dirReference) (types.ImageDestination, error) {
+func newImageDestination(sys *types.SystemContext, ref dirReference) (private.ImageDestination, error) {
desiredLayerCompression := types.PreserveOriginal
if sys != nil {
if sys.DirForceCompress {
desiredLayerCompression = types.Compress
if sys.DirForceDecompress {
- return nil, errors.Errorf("Cannot compress and decompress at the same time")
+ return nil, fmt.Errorf("Cannot compress and decompress at the same time")
}
}
if sys.DirForceDecompress {
desiredLayerCompression = types.Decompress
}
}
- d := &dirImageDestination{ref: ref, desiredLayerCompression: desiredLayerCompression}
// If directory exists check if it is empty
// if not empty, check whether the contents match that of a container image directory and overwrite the contents
// if the contents don't match throw an error
- dirExists, err := pathExists(d.ref.resolvedPath)
+ dirExists, err := pathExists(ref.resolvedPath)
if err != nil {
- return nil, errors.Wrapf(err, "checking for path %q", d.ref.resolvedPath)
+ return nil, fmt.Errorf("checking for path %q: %w", ref.resolvedPath, err)
}
if dirExists {
- isEmpty, err := isDirEmpty(d.ref.resolvedPath)
+ isEmpty, err := isDirEmpty(ref.resolvedPath)
if err != nil {
return nil, err
}
if !isEmpty {
- versionExists, err := pathExists(d.ref.versionPath())
+ versionExists, err := pathExists(ref.versionPath())
if err != nil {
- return nil, errors.Wrapf(err, "checking if path exists %q", d.ref.versionPath())
+ return nil, fmt.Errorf("checking if path exists %q: %w", ref.versionPath(), err)
}
if versionExists {
- contents, err := ioutil.ReadFile(d.ref.versionPath())
+ contents, err := os.ReadFile(ref.versionPath())
if err != nil {
return nil, err
}
@@ -74,22 +81,37 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag
return nil, ErrNotContainerImageDir
}
// delete directory contents so that only one image is in the directory at a time
- if err = removeDirContents(d.ref.resolvedPath); err != nil {
- return nil, errors.Wrapf(err, "erasing contents in %q", d.ref.resolvedPath)
+ if err = removeDirContents(ref.resolvedPath); err != nil {
+ return nil, fmt.Errorf("erasing contents in %q: %w", ref.resolvedPath, err)
}
- logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath)
+ logrus.Debugf("overwriting existing container image directory %q", ref.resolvedPath)
}
} else {
// create directory if it doesn't exist
- if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil {
- return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath)
+ if err := os.MkdirAll(ref.resolvedPath, 0755); err != nil {
+ return nil, fmt.Errorf("unable to create directory %q: %w", ref.resolvedPath, err)
}
}
// create version file
- err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644)
+ err = os.WriteFile(ref.versionPath(), []byte(version), 0644)
if err != nil {
- return nil, errors.Wrapf(err, "creating version file %q", d.ref.versionPath())
+ return nil, fmt.Errorf("creating version file %q: %w", ref.versionPath(), err)
+ }
+
+ d := &dirImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: nil,
+ DesiredLayerCompression: desiredLayerCompression,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil.
+ HasThreadSafePutBlob: false,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+
+ ref: ref,
}
+ d.Compat = impl.AddCompat(d)
return d, nil
}
@@ -104,52 +126,15 @@ func (d *dirImageDestination) Close() error {
return nil
}
-func (d *dirImageDestination) SupportedManifestMIMETypes() []string {
- return nil
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error {
- return nil
-}
-
-func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression {
- return d.desiredLayerCompression
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *dirImageDestination) AcceptsForeignLayerURLs() bool {
- return false
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *dirImageDestination) MustMatchRuntimeOS() bool {
- return false
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
- return false // N/A, DockerReference() returns nil.
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *dirImageDestination) HasThreadSafePutBlob() bool {
- return false
-}
-
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
-// May update cache.
+// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
+func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
+ blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob")
if err != nil {
return types.BlobInfo{}, err
}
@@ -172,7 +157,7 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
}
blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
- return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
+ return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err
@@ -199,18 +184,16 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
return types.BlobInfo{Digest: blobDigest, Size: size}, nil
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with unknown digest")
}
blobPath := d.ref.layerPath(info.Digest)
finfo, err := os.Stat(blobPath)
@@ -232,15 +215,20 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error {
- return ioutil.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
+ return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
}
-// PutSignatures writes a set of signatures to the destination.
+// PutSignaturesWithFormat writes a set of signatures to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
-func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *dirImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
for i, sig := range signatures {
- if err := ioutil.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil {
+ blob, err := signature.Blob(sig)
+ if err != nil {
+ return err
+ }
+ if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), blob, 0644); err != nil {
return err
}
}
@@ -272,7 +260,7 @@ func pathExists(path string) (bool, error) {
// returns true if directory is empty
func isDirEmpty(path string) (bool, error) {
- files, err := ioutil.ReadDir(path)
+ files, err := os.ReadDir(path)
if err != nil {
return false, err
}
@@ -281,7 +269,7 @@ func isDirEmpty(path string) (bool, error) {
// deletes the contents of a directory
func removeDirContents(path string) error {
- files, err := ioutil.ReadDir(path)
+ files, err := os.ReadDir(path)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/image/v5/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go
index ad9129d4012..98efdedd739 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_src.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_src.go
@@ -2,23 +2,41 @@ package directory
import (
"context"
+ "fmt"
"io"
- "io/ioutil"
"os"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)
type dirImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
ref dirReference
}
// newImageSource returns an ImageSource reading from an existing directory.
// The caller must call .Close() on the returned ImageSource.
-func newImageSource(ref dirReference) types.ImageSource {
- return &dirImageSource{ref}
+func newImageSource(ref dirReference) private.ImageSource {
+ s := &dirImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ ref: ref,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s
}
// Reference returns the reference used to set up this source, _as specified by the user_
@@ -37,18 +55,13 @@ func (s *dirImageSource) Close() error {
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
- m, err := ioutil.ReadFile(s.ref.manifestPath(instanceDigest))
+ m, err := os.ReadFile(s.ref.manifestPath(instanceDigest))
if err != nil {
return nil, "", err
}
return m, manifest.GuessMIMEType(m), err
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *dirImageSource) HasThreadSafeGetBlob() bool {
- return false
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@@ -64,33 +77,26 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
return r, fi.Size(), nil
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
-func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- signatures := [][]byte{}
+func (s *dirImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ signatures := []signature.Signature{}
for i := 0; ; i++ {
- signature, err := ioutil.ReadFile(s.ref.signaturePath(i, instanceDigest))
+ path := s.ref.signaturePath(i, instanceDigest)
+ sigBlob, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
break
}
return nil, err
}
+ signature, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing signature %q: %w", path, err)
+ }
signatures = append(signatures, signature)
}
return signatures, nil
}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *dirImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
-}
diff --git a/vendor/github.com/containers/image/v5/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go
index e542d888c22..253ecb24758 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_transport.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go
@@ -2,17 +2,17 @@ package directory
import (
"context"
+ "errors"
"fmt"
"path/filepath"
"strings"
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
func init() {
@@ -39,7 +39,7 @@ func (t dirTransport) ParseReference(reference string) (types.ImageReference, er
// scope passed to this function will not be "", that value is always allowed.
func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
if !strings.HasPrefix(scope, "/") {
- return errors.Errorf("Invalid scope %s: Must be an absolute path", scope)
+ return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope)
}
// Refuse also "/", otherwise "/" and "" would have the same semantics,
// and "" could be unexpectedly shadowed by the "/" entry.
@@ -48,7 +48,7 @@ func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
}
cleaned := filepath.Clean(scope)
if cleaned != scope {
- return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
+ return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
}
return nil
}
@@ -140,8 +140,7 @@ func (ref dirReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src := newImageSource(ref)
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
@@ -158,7 +157,7 @@ func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.Syst
// DeleteImage deletes the named image from the registry, if supported.
func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("Deleting images not implemented for dir: images")
+ return errors.New("Deleting images not implemented for dir: images")
}
// manifestPath returns a path for the manifest within a directory using our conventions.
diff --git a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
index 71136b88089..b4ff4d08a59 100644
--- a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
+++ b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
@@ -1,10 +1,9 @@
package explicitfilepath
import (
+ "fmt"
"os"
"path/filepath"
-
- "github.com/pkg/errors"
)
// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
@@ -26,14 +25,14 @@ func ResolvePathToFullyExplicit(path string) (string, error) {
// This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed.
// We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components
// in the resulting path, and especially not at the end.
- return "", errors.Errorf("Unexpectedly missing special filename component in %s", path)
+ return "", fmt.Errorf("Unexpectedly missing special filename component in %s", path)
}
resolvedPath := filepath.Join(resolvedParent, file)
// As a sanity check, ensure that there are no "." or ".." components.
cleanedResolvedPath := filepath.Clean(resolvedPath)
if cleanedResolvedPath != resolvedPath {
// Coverage: This should never happen.
- return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath)
+ return "", fmt.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath)
}
return resolvedPath, nil
default: // err != nil, unrecognized
diff --git a/vendor/github.com/containers/image/v5/docker/archive/dest.go b/vendor/github.com/containers/image/v5/docker/archive/dest.go
index d4248db21f0..632ee7c4936 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/dest.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/dest.go
@@ -2,56 +2,50 @@ package archive
import (
"context"
- "io"
+ "fmt"
"github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
type archiveImageDestination struct {
*tarfile.Destination // Implements most of types.ImageDestination
ref archiveReference
- archive *tarfile.Writer // Should only be closed if writer != nil
- writer io.Closer // May be nil if the archive is shared
+ writer *Writer // Should be closed if closeWriter
+ closeWriter bool
}
-func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) {
+func newImageDestination(sys *types.SystemContext, ref archiveReference) (private.ImageDestination, error) {
if ref.sourceIndex != -1 {
- return nil, errors.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
+ return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
}
- var archive *tarfile.Writer
- var writer io.Closer
- if ref.archiveWriter != nil {
- archive = ref.archiveWriter
- writer = nil
+ var writer *Writer
+ var closeWriter bool
+ if ref.writer != nil {
+ writer = ref.writer
+ closeWriter = false
} else {
- fh, err := openArchiveForWriting(ref.path)
+ w, err := NewWriter(sys, ref.path)
if err != nil {
return nil, err
}
-
- archive = tarfile.NewWriter(fh)
- writer = fh
+ writer = w
+ closeWriter = true
}
- tarDest := tarfile.NewDestination(sys, archive, ref.ref)
+ tarDest := tarfile.NewDestination(sys, writer.archive, ref.Transport().Name(), ref.ref)
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
}
return &archiveImageDestination{
Destination: tarDest,
ref: ref,
- archive: archive,
writer: writer,
+ closeWriter: closeWriter,
}, nil
}
-// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved
-func (d *archiveImageDestination) DesiredLayerCompression() types.LayerCompression {
- return types.Decompress
-}
-
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (d *archiveImageDestination) Reference() types.ImageReference {
@@ -60,7 +54,7 @@ func (d *archiveImageDestination) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *archiveImageDestination) Close() error {
- if d.writer != nil {
+ if d.closeWriter {
return d.writer.Close()
}
return nil
@@ -74,8 +68,15 @@ func (d *archiveImageDestination) Close() error {
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
- if d.writer != nil {
- return d.archive.Close()
+ d.writer.imageCommitted()
+ if d.closeWriter {
+ // We could do this only in .Close(), but failures in .Close() are much more likely to be
+ // ignored by callers that use defer. So, in single-image destinations, try to complete
+ // the archive here.
+ // But if Commit() is never called, let .Close() clean up.
+ err := d.writer.Close()
+ d.closeWriter = false
+ return err
}
return nil
}
diff --git a/vendor/github.com/containers/image/v5/docker/archive/reader.go b/vendor/github.com/containers/image/v5/docker/archive/reader.go
index 4bb519a2622..875a1525753 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/reader.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/reader.go
@@ -1,11 +1,12 @@
package archive
import (
+ "fmt"
+
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
// Reader manages a single Docker archive, allows listing its contents and accessing
@@ -40,10 +41,10 @@ func (r *Reader) Close() error {
func NewReaderForReference(sys *types.SystemContext, ref types.ImageReference) (*Reader, types.ImageReference, error) {
standalone, ok := ref.(archiveReference)
if !ok {
- return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
+ return nil, nil, fmt.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
}
if standalone.archiveReader != nil {
- return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport())
+ return nil, nil, fmt.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport())
}
reader, err := NewReader(sys, standalone.path)
if err != nil {
@@ -73,22 +74,22 @@ func (r *Reader) List() ([][]types.ImageReference, error) {
for _, tag := range image.RepoTags {
parsedTag, err := reference.ParseNormalizedNamed(tag)
if err != nil {
- return nil, errors.Wrapf(err, "Invalid tag %#v in manifest item @%d", tag, imageIndex)
+ return nil, fmt.Errorf("Invalid tag %#v in manifest item @%d: %w", tag, imageIndex, err)
}
nt, ok := parsedTag.(reference.NamedTagged)
if !ok {
- return nil, errors.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String())
+ return nil, fmt.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String())
}
ref, err := newReference(r.path, nt, -1, r.archive, nil)
if err != nil {
- return nil, errors.Wrapf(err, "creating a reference for tag %#v in manifest item @%d", tag, imageIndex)
+ return nil, fmt.Errorf("creating a reference for tag %#v in manifest item @%d: %w", tag, imageIndex, err)
}
refs = append(refs, ref)
}
if len(refs) == 0 {
ref, err := newReference(r.path, nil, imageIndex, r.archive, nil)
if err != nil {
- return nil, errors.Wrapf(err, "creating a reference for manifest item @%d", imageIndex)
+ return nil, fmt.Errorf("creating a reference for manifest item @%d: %w", imageIndex, err)
}
refs = append(refs, ref)
}
@@ -107,7 +108,7 @@ func (r *Reader) List() ([][]types.ImageReference, error) {
func (r *Reader) ManifestTagsForReference(ref types.ImageReference) ([]string, error) {
archiveRef, ok := ref.(archiveReference)
if !ok {
- return nil, errors.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
+ return nil, fmt.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
}
manifestItem, tagIndex, err := r.archive.ChooseManifestItem(archiveRef.ref, archiveRef.sourceIndex)
if err != nil {
diff --git a/vendor/github.com/containers/image/v5/docker/archive/src.go b/vendor/github.com/containers/image/v5/docker/archive/src.go
index 7acca210ef1..5604a512184 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/src.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/src.go
@@ -4,6 +4,7 @@ import (
"context"
"github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
)
@@ -14,7 +15,7 @@ type archiveImageSource struct {
// newImageSource returns a types.ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
-func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (types.ImageSource, error) {
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) {
var archive *tarfile.Reader
var closeArchive bool
if ref.archiveReader != nil {
@@ -28,7 +29,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveRe
archive = a
closeArchive = true
}
- src := tarfile.NewSource(archive, closeArchive, ref.ref, ref.sourceIndex)
+ src := tarfile.NewSource(archive, closeArchive, ref.Transport().Name(), ref.ref, ref.sourceIndex)
return &archiveImageSource{
Source: src,
ref: ref,
diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go
index 9a48cb46cc4..304a8c618ff 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/transport.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go
@@ -2,16 +2,16 @@ package archive
import (
"context"
+ "errors"
"fmt"
"strconv"
"strings"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
- ctrImage "github.com/containers/image/v5/image"
+ ctrImage "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
func init() {
@@ -53,13 +53,13 @@ type archiveReference struct {
// file, not necessarily path precisely).
archiveReader *tarfile.Reader
// If not nil, must have been created for path
- archiveWriter *tarfile.Writer
+ writer *Writer
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
func ParseReference(refString string) (types.ImageReference, error) {
if refString == "" {
- return nil, errors.Errorf("docker-archive reference %s isn't of the form [:]", refString)
+ return nil, fmt.Errorf("docker-archive reference %s isn't of the form [:]", refString)
}
parts := strings.SplitN(refString, ":", 2)
@@ -72,21 +72,21 @@ func ParseReference(refString string) (types.ImageReference, error) {
if len(parts[1]) > 0 && parts[1][0] == '@' {
i, err := strconv.Atoi(parts[1][1:])
if err != nil {
- return nil, errors.Wrapf(err, "Invalid source index %s", parts[1])
+ return nil, fmt.Errorf("Invalid source index %s: %w", parts[1], err)
}
if i < 0 {
- return nil, errors.Errorf("Invalid source index @%d: must not be negative", i)
+ return nil, fmt.Errorf("Invalid source index @%d: must not be negative", i)
}
sourceIndex = i
} else {
ref, err := reference.ParseNormalizedNamed(parts[1])
if err != nil {
- return nil, errors.Wrapf(err, "docker-archive parsing reference")
+ return nil, fmt.Errorf("docker-archive parsing reference: %w", err)
}
ref = reference.TagNameOnly(ref)
refTagged, isTagged := ref.(reference.NamedTagged)
if !isTagged { // If ref contains a digest, TagNameOnly does not change it
- return nil, errors.Errorf("reference does not include a tag: %s", ref.String())
+ return nil, fmt.Errorf("reference does not include a tag: %s", ref.String())
}
nt = refTagged
}
@@ -108,25 +108,25 @@ func NewIndexReference(path string, sourceIndex int) (types.ImageReference, erro
// newReference returns a docker archive reference for a path, an optional reference or sourceIndex,
// and optionally a tarfile.Reader and/or a tarfile.Writer matching path.
func newReference(path string, ref reference.NamedTagged, sourceIndex int,
- archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) {
+ archiveReader *tarfile.Reader, writer *Writer) (types.ImageReference, error) {
if strings.Contains(path, ":") {
- return nil, errors.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
+ return nil, fmt.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
}
if ref != nil && sourceIndex != -1 {
- return nil, errors.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index")
+ return nil, fmt.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index")
}
if _, isDigest := ref.(reference.Canonical); isDigest {
- return nil, errors.Errorf("docker-archive doesn't support digest references: %s", ref.String())
+ return nil, fmt.Errorf("docker-archive doesn't support digest references: %s", ref.String())
}
if sourceIndex != -1 && sourceIndex < 0 {
- return nil, errors.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex)
+ return nil, fmt.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex)
}
return archiveReference{
path: path,
ref: ref,
sourceIndex: sourceIndex,
archiveReader: archiveReader,
- archiveWriter: archiveWriter,
+ writer: writer,
}, nil
}
@@ -185,11 +185,7 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(ctx, sys, ref)
- if err != nil {
- return nil, err
- }
- return ctrImage.FromSource(ctx, sys, src)
+ return ctrImage.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
diff --git a/vendor/github.com/containers/image/v5/docker/archive/writer.go b/vendor/github.com/containers/image/v5/docker/archive/writer.go
index 6a4b8c645a1..aa8cdd60052 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/writer.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/writer.go
@@ -1,38 +1,76 @@
package archive
import (
+ "errors"
+ "fmt"
"io"
"os"
+ "sync"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
// Writer manages a single in-progress Docker archive and allows adding images to it.
type Writer struct {
- path string // The original, user-specified path; not the maintained temporary file, if any
- archive *tarfile.Writer
- writer io.Closer
+ path string // The original, user-specified path; not the maintained temporary file, if any
+ regularFile bool // path refers to a regular file (e.g. not a pipe)
+ archive *tarfile.Writer
+ writer io.Closer
+
+ // The following state can only be acccessed with the mutex held.
+ mutex sync.Mutex
+ hadCommit bool // At least one successful commit has happened
}
// NewWriter returns a Writer for path.
// The caller should call .Close() on the returned object.
func NewWriter(sys *types.SystemContext, path string) (*Writer, error) {
- fh, err := openArchiveForWriting(path)
+ // path can be either a pipe or a regular file
+ // in the case of a pipe, we require that we can open it for write
+ // in the case of a regular file, we don't want to overwrite any pre-existing file
+ // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
+ // only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
+ fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("opening file %q: %w", path, err)
}
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ fh.Close()
+ }
+ }()
+
+ fhStat, err := fh.Stat()
+ if err != nil {
+ return nil, fmt.Errorf("statting file %q: %w", path, err)
+ }
+ regularFile := fhStat.Mode().IsRegular()
+ if regularFile && fhStat.Size() != 0 {
+ return nil, errors.New("docker-archive doesn't support modifying existing images")
+ }
+
archive := tarfile.NewWriter(fh)
+ succeeded = true
return &Writer{
- path: path,
- archive: archive,
- writer: fh,
+ path: path,
+ regularFile: regularFile,
+ archive: archive,
+ writer: fh,
+ hadCommit: false,
}, nil
}
+// imageCommitted notifies the Writer that at least one image was successfully commited to the stream.
+func (w *Writer) imageCommitted() {
+ w.mutex.Lock()
+ defer w.mutex.Unlock()
+ w.hadCommit = true
+}
+
// Close writes all outstanding data about images to the archive, and
// releases state associated with the Writer, if any.
// No more images can be added after this is called.
@@ -41,42 +79,25 @@ func (w *Writer) Close() error {
if err2 := w.writer.Close(); err2 != nil && err == nil {
err = err2
}
+ if err == nil && w.regularFile && !w.hadCommit {
+ // Writing to the destination never had a success; delete the destination if we created it.
+ // This is done primarily because we don’t implement adding another image to a pre-existing image, so if we
+ // left a partial archive around (notably because reading from the _source_ has failed), we couldn’t retry without
+ // the caller manually deleting the partial archive. So, delete it instead.
+ //
+ // Archives with at least one successfully created image are left around; they might still be valuable.
+ //
+ // Note a corner case: If there _originally_ was an empty file (which is not a valid archive anyway), this deletes it.
+ // Ideally, if w.regularFile, we should write the full contents to a temporary file and use os.Rename here, only on success.
+ if err2 := os.Remove(w.path); err2 != nil {
+ err = err2
+ }
+ }
return err
}
// NewReference returns an ImageReference that allows adding an image to Writer,
// with an optional reference.
func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) {
- return newReference(w.path, destinationRef, -1, nil, w.archive)
-}
-
-// openArchiveForWriting opens path for writing a tar archive,
-// making a few sanity checks.
-func openArchiveForWriting(path string) (*os.File, error) {
- // path can be either a pipe or a regular file
- // in the case of a pipe, we require that we can open it for write
- // in the case of a regular file, we don't want to overwrite any pre-existing file
- // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
- // only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
- fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
- if err != nil {
- return nil, errors.Wrapf(err, "opening file %q", path)
- }
- succeeded := false
- defer func() {
- if !succeeded {
- fh.Close()
- }
- }()
- fhStat, err := fh.Stat()
- if err != nil {
- return nil, errors.Wrapf(err, "statting file %q", path)
- }
-
- if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
- return nil, errors.New("docker-archive doesn't support modifying existing images")
- }
-
- succeeded = true
- return fh, nil
+ return newReference(w.path, destinationRef, -1, nil, w)
}
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
index f68981472f3..dc4aa70d3a5 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
@@ -2,13 +2,15 @@ package daemon
import (
"context"
+ "errors"
+ "fmt"
"io"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
"github.com/docker/docker/client"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -26,13 +28,13 @@ type daemonImageDestination struct {
}
// newImageDestination returns a types.ImageDestination for the specified image reference.
-func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageDestination, error) {
+func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (private.ImageDestination, error) {
if ref.ref == nil {
- return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
+ return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
}
namedTaggedRef, ok := ref.ref.(reference.NamedTagged)
if !ok {
- return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
+ return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
}
var mustMatchRuntimeOS = true
@@ -42,7 +44,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
c, err := newDockerClient(sys)
if err != nil {
- return nil, errors.Wrap(err, "initializing docker engine client")
+ return nil, fmt.Errorf("initializing docker engine client: %w", err)
}
reader, writer := io.Pipe()
@@ -56,7 +58,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
return &daemonImageDestination{
ref: ref,
mustMatchRuntimeOS: mustMatchRuntimeOS,
- Destination: tarfile.NewDestination(sys, archive, namedTaggedRef),
+ Destination: tarfile.NewDestination(sys, archive, ref.Transport().Name(), namedTaggedRef),
archive: archive,
goroutineCancel: goroutineCancel,
statusChannel: statusChannel,
@@ -84,7 +86,7 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
resp, err := c.ImageLoad(ctx, reader, true)
if err != nil {
- err = errors.Wrap(err, "saving image to docker engine")
+ err = fmt.Errorf("saving image to docker engine: %w", err)
return
}
defer resp.Body.Close()
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
index a6d8a6cf587..b57936654b6 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
@@ -2,10 +2,11 @@ package daemon
import (
"context"
+ "fmt"
"github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
type daemonImageSource struct {
@@ -22,16 +23,16 @@ type daemonImageSource struct {
// (We could, perhaps, expect an exact sequence, assume that the first plaintext file
// is the config, and that the following len(RootFS) files are the layers, but that feels
// way too brittle.)
-func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) {
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (private.ImageSource, error) {
c, err := newDockerClient(sys)
if err != nil {
- return nil, errors.Wrap(err, "initializing docker engine client")
+ return nil, fmt.Errorf("initializing docker engine client: %w", err)
}
// Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
// Either way ImageSave should create a tarball with exactly one image.
inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()})
if err != nil {
- return nil, errors.Wrap(err, "loading image from docker engine")
+ return nil, fmt.Errorf("loading image from docker engine: %w", err)
}
defer inputStream.Close()
@@ -39,7 +40,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef
if err != nil {
return nil, err
}
- src := tarfile.NewSource(archive, true, nil, -1)
+ src := tarfile.NewSource(archive, true, ref.Transport().Name(), nil, -1)
return &daemonImageSource{
ref: ref,
Source: src,
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
index 4e4ed688148..ad218908439 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
@@ -2,15 +2,15 @@ package daemon
import (
"context"
+ "errors"
"fmt"
"github.com/containers/image/v5/docker/policyconfiguration"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
func init() {
@@ -39,7 +39,7 @@ func (t daemonTransport) ParseReference(reference string) (types.ImageReference,
func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error {
// ID values cannot be effectively namespaced, and are clearly invalid host:port values.
if _, err := digest.Parse(scope); err == nil {
- return errors.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope)
+ return fmt.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope)
}
// FIXME? We could be verifying the various character set and length restrictions
@@ -53,7 +53,7 @@ func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error {
// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon)
// For daemonImageDestination, it must be a ref, which is NamedTagged.
// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest.
-// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.)
+// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.)
type daemonReference struct {
id digest.Digest
ref reference.Named // !reference.IsNameOnly
@@ -70,7 +70,7 @@ func ParseReference(refString string) (types.ImageReference, error) {
// The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name.
// Other digest references are ambiguous, so refuse them.
if dgst.Algorithm() != digest.Canonical {
- return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical)
+ return nil, fmt.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical)
}
return NewReference(dgst, nil)
}
@@ -80,7 +80,7 @@ func ParseReference(refString string) (types.ImageReference, error) {
return nil, err
}
if reference.FamiliarName(ref) == digest.Canonical.String() {
- return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical)
+ return nil, fmt.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical)
}
return NewReference("", ref)
}
@@ -92,7 +92,7 @@ func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference,
}
if ref != nil {
if reference.IsNameOnly(ref) {
- return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
+ return nil, fmt.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
}
// A github.com/distribution/reference value can have a tag and a digest at the same time!
// Most versions of docker/reference do not handle that (ignoring the tag), so reject such input.
@@ -102,7 +102,7 @@ func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference,
_, isTagged := ref.(reference.NamedTagged)
_, isDigested := ref.(reference.Canonical)
if isTagged && isDigested {
- return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported")
+ return nil, fmt.Errorf("docker-daemon: references with both a tag and digest are currently not supported")
}
}
return daemonReference{
@@ -195,11 +195,7 @@ func (ref daemonReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(ctx, sys, ref)
- if err != nil {
- return nil, err
- }
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
@@ -219,5 +215,5 @@ func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemCon
// Should this just untag the image? Should this stop running containers?
// The semantics is not quite as clear as for remote repositories.
// The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant.
- return errors.Errorf("Deleting images not implemented for docker-daemon: images")
+ return errors.New("Deleting images not implemented for docker-daemon: images")
}
diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/containers/image/v5/docker/distribution_error.go
similarity index 62%
rename from vendor/github.com/docker/distribution/registry/client/errors.go
rename to vendor/github.com/containers/image/v5/docker/distribution_error.go
index 52d49d5d295..0fe915249b7 100644
--- a/vendor/github.com/docker/distribution/registry/client/errors.go
+++ b/vendor/github.com/containers/image/v5/docker/distribution_error.go
@@ -1,46 +1,60 @@
-package client
+// Code below is taken from https://github.com/distribution/distribution/blob/a4d9db5a884b70be0c96dd6a7a9dbef4f2798c51/registry/client/errors.go
+// Copyright 2022 github.com/distribution/distribution authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package docker
import (
"encoding/json"
"errors"
"fmt"
"io"
- "io/ioutil"
"net/http"
"github.com/docker/distribution/registry/api/errcode"
- "github.com/docker/distribution/registry/client/auth/challenge"
+ dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
)
-// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
+// errNoErrorsInBody is returned when an HTTP response body parses to an empty
// errcode.Errors slice.
-var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body")
+var errNoErrorsInBody = errors.New("no error details found in HTTP response body")
-// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is
+// unexpectedHTTPStatusError is returned when an unexpected HTTP status is
// returned when making a registry api call.
-type UnexpectedHTTPStatusError struct {
+type unexpectedHTTPStatusError struct {
Status string
}
-func (e *UnexpectedHTTPStatusError) Error() string {
+func (e *unexpectedHTTPStatusError) Error() string {
return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
}
-// UnexpectedHTTPResponseError is returned when an expected HTTP status code
+// unexpectedHTTPResponseError is returned when an expected HTTP status code
// is returned, but the content was unexpected and failed to be parsed.
-type UnexpectedHTTPResponseError struct {
+type unexpectedHTTPResponseError struct {
ParseErr error
StatusCode int
Response []byte
}
-func (e *UnexpectedHTTPResponseError) Error() string {
+func (e *unexpectedHTTPResponseError) Error() string {
return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response))
}
func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
var errors errcode.Errors
- body, err := ioutil.ReadAll(r)
+ body, err := io.ReadAll(r)
if err != nil {
return err
}
@@ -63,7 +77,7 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
}
if err := json.Unmarshal(body, &errors); err != nil {
- return &UnexpectedHTTPResponseError{
+ return &unexpectedHTTPResponseError{
ParseErr: err,
StatusCode: statusCode,
Response: body,
@@ -73,8 +87,8 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
if len(errors) == 0 {
// If there was no error specified in the body, return
// UnexpectedHTTPResponseError.
- return &UnexpectedHTTPResponseError{
- ParseErr: ErrNoErrorsInBody,
+ return &unexpectedHTTPResponseError{
+ ParseErr: errNoErrorsInBody,
StatusCode: statusCode,
Response: body,
}
@@ -94,15 +108,15 @@ func mergeErrors(err1, err2 error) error {
return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
}
-// HandleErrorResponse returns error parsed from HTTP response for an
+// handleErrorResponse returns error parsed from HTTP response for an
// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
// UnexpectedHTTPStatusError returned for response code outside of expected
// range.
-func HandleErrorResponse(resp *http.Response) error {
+func handleErrorResponse(resp *http.Response) error {
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
// Check for OAuth errors within the `WWW-Authenticate` header first
// See https://tools.ietf.org/html/rfc6750#section-3
- for _, c := range challenge.ResponseChallenges(resp) {
+ for _, c := range dockerChallenge.ResponseChallenges(resp) {
if c.Scheme == "bearer" {
var err errcode.Error
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
@@ -124,16 +138,10 @@ func HandleErrorResponse(resp *http.Response) error {
}
}
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
- if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
+ if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
}
return err
}
- return &UnexpectedHTTPStatusError{Status: resp.Status}
-}
-
-// SuccessStatus returns true if the argument is a successful HTTP response
-// code (in the range 200 - 399 inclusive).
-func SuccessStatus(status int) bool {
- return status >= 200 && status <= 399
+ return &unexpectedHTTPStatusError{Status: resp.Status}
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go
index 3fe9a11d003..73e26a1ad4a 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_client.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_client.go
@@ -5,9 +5,9 @@ import (
"context"
"crypto/tls"
"encoding/json"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"net/http"
"net/url"
"os"
@@ -19,16 +19,18 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/docker/config"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types"
"github.com/containers/image/v5/version"
"github.com/containers/storage/pkg/homedir"
- clientLib "github.com/docker/distribution/registry/client"
+ "github.com/docker/distribution/registry/api/errcode"
+ v2 "github.com/docker/distribution/registry/api/v2"
"github.com/docker/go-connections/tlsconfig"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
@@ -63,8 +65,8 @@ type certPath struct {
var (
homeCertDir = filepath.FromSlash(".config/containers/certs.d")
perHostCertDirs = []certPath{
- {path: "/etc/containers/certs.d", absolute: true},
- {path: "/etc/docker/certs.d", absolute: true},
+ {path: etcDir + "/containers/certs.d", absolute: true},
+ {path: etcDir + "/docker/certs.d", absolute: true},
}
defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)"
@@ -103,10 +105,11 @@ type dockerClient struct {
// by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime.
tlsClientConfig *tls.Config
// The following members are not set by newDockerClient and must be set by callers if needed.
- auth types.DockerAuthConfig
- registryToken string
- signatureBase signatureStorageBase
- scope authScope
+ auth types.DockerAuthConfig
+ registryToken string
+ signatureBase lookasideStorageBase
+ useSigstoreAttachments bool
+ scope authScope
// The following members are detected registry properties:
// They are set after a successful detectProperties(), and never change afterwards.
@@ -123,8 +126,9 @@ type dockerClient struct {
}
type authScope struct {
- remoteName string
- actions string
+ resourceType string
+ remoteName string
+ actions string
}
// sendAuth determines whether we need authentication for v2 or v1 endpoint.
@@ -165,9 +169,8 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
func serverDefault() *tls.Config {
return &tls.Config{
// Avoid fallback to SSL protocols < TLS1.0
- MinVersion: tls.VersionTLS10,
- PreferServerCipherSuites: true,
- CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
+ MinVersion: tls.VersionTLS10,
+ CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
}
}
@@ -212,13 +215,13 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
// signatureBase is always set in the return value
-func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
+func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, registryConfig *registryConfiguration, write bool, actions string) (*dockerClient, error) {
auth, err := config.GetCredentialsForRef(sys, ref.ref)
if err != nil {
- return nil, errors.Wrapf(err, "getting username and password")
+ return nil, fmt.Errorf("getting username and password: %w", err)
}
- sigBase, err := SignatureStorageBaseURL(sys, ref, write)
+ sigBase, err := registryConfig.lookasideStorageBaseURL(ref, write)
if err != nil {
return nil, err
}
@@ -233,6 +236,8 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write
client.registryToken = sys.DockerBearerRegistryToken
}
client.signatureBase = sigBase
+ client.useSigstoreAttachments = registryConfig.useSigstoreAttachments(ref)
+ client.scope.resourceType = "repository"
client.scope.actions = actions
client.scope.remoteName = reference.Path(ref.ref)
return client, nil
@@ -269,7 +274,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
skipVerify := false
reg, err := sysregistriesv2.FindRegistry(sys, reference)
if err != nil {
- return nil, errors.Wrapf(err, "loading registries")
+ return nil, fmt.Errorf("loading registries: %w", err)
}
if reg != nil {
if reg.Blocked {
@@ -297,7 +302,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
client, err := newDockerClient(sys, registry, registry)
if err != nil {
- return errors.Wrapf(err, "creating new docker client")
+ return fmt.Errorf("creating new docker client: %w", err)
}
client.auth = types.DockerAuthConfig{
Username: username,
@@ -346,7 +351,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
// We can't use GetCredentialsForRef here because we want to search the whole registry.
auth, err := config.GetCredentials(sys, registry)
if err != nil {
- return nil, errors.Wrapf(err, "getting username and password")
+ return nil, fmt.Errorf("getting username and password: %w", err)
}
// The /v2/_catalog endpoint has been disabled for docker.io therefore
@@ -360,7 +365,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
client, err := newDockerClient(sys, hostname, registry)
if err != nil {
- return nil, errors.Wrapf(err, "creating new docker client")
+ return nil, fmt.Errorf("creating new docker client: %w", err)
}
client.auth = auth
if sys != nil {
@@ -403,13 +408,13 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
resp, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
- return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
+ return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
err := httpResponseToError(resp, "")
logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err)
- return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
+ return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err)
}
v2Res := &V2Results{}
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
@@ -463,10 +468,41 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea
return nil, err
}
- url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
+ urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
+ url, err := url.Parse(urlString)
+ if err != nil {
+ return nil, err
+ }
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope)
}
+// Checks if the auth headers in the response contain an indication of a failed
+// authorizdation because of an "insufficient_scope" error. If that's the case,
+// returns the required scope to be used for fetching a new token.
+func needsRetryWithUpdatedScope(err error, res *http.Response) (bool, *authScope) {
+ if err == nil && res.StatusCode == http.StatusUnauthorized {
+ challenges := parseAuthHeader(res.Header)
+ for _, challenge := range challenges {
+ if challenge.Scheme == "bearer" {
+ if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" {
+ if scope, ok := challenge.Parameters["scope"]; ok && scope != "" {
+ if newScope, err := parseAuthScope(scope); err == nil {
+ return true, newScope
+ } else {
+ logrus.WithFields(logrus.Fields{
+ "error": err,
+ "scope": scope,
+ "challenge": challenge,
+ }).Error("Failed to parse the authentication scope from the given challenge")
+ }
+ }
+ }
+ }
+ }
+ }
+ return false, nil
+}
+
// parseRetryAfter determines the delay required by the "Retry-After" header in res and returns it,
// silently falling back to fallbackDelay if the header is missing or invalid.
func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Duration {
@@ -500,12 +536,35 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat
// makeRequest should generally be preferred.
// In case of an HTTP 429 status code in the response, it may automatically retry a few times.
// TODO(runcom): too many arguments here, use a struct
-func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
+func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
delay := backoffInitialDelay
attempts := 0
for {
res, err := c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, extraScope)
attempts++
+
+ // By default we use pre-defined scopes per operation. In
+ // certain cases, this can fail when our authentication is
+ // insufficient, then we might be getting an error back with a
+ // Www-Authenticate Header indicating an insufficient scope.
+ //
+ // Check for that and update the client challenges to retry after
+ // requesting a new token
+ //
+ // We only try this on the first attempt, to not overload an
+ // already struggling server.
+ // We also cannot retry with a body (stream != nil) as stream
+ // was already read
+ if attempts == 1 && stream == nil && auth != noAuth {
+ if retry, newScope := needsRetryWithUpdatedScope(err, res); retry {
+ logrus.Debug("Detected insufficient_scope error, will retry request with updated scope")
+ // Note: This retry ignores extraScope. That’s, strictly speaking, incorrect, but we don’t currently
+ // expect the insufficient_scope errors to happen for those callers. If that changes, we can add support
+ // for more than one extra scope.
+ res, err = c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, newScope)
+ extraScope = newScope
+ }
+ }
if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
stream != nil || // We can't retry with a body (which is not restartable in the general case)
attempts == backoffNumIterations {
@@ -518,7 +577,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
if delay > backoffMaxDelay {
delay = backoffMaxDelay
}
- logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url, delay.Seconds())
+ logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url.Redacted(), delay.Seconds())
select {
case <-ctx.Done():
return nil, ctx.Err()
@@ -533,12 +592,12 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
// streamLen, if not -1, specifies the length of the data expected on stream.
// makeRequest should generally be preferred.
// Note that no exponential back off is performed when receiving an http 429 status code.
-func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
- req, err := http.NewRequestWithContext(ctx, method, url, stream)
+func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
+ req, err := http.NewRequestWithContext(ctx, method, url.String(), stream)
if err != nil {
return nil, err
}
- if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
+ if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequestWithContext above can figure out the length of bytes.Reader and similar objects without us having to compute it.
req.ContentLength = streamLen
}
req.Header.Set("Docker-Distribution-API-Version", "registry/2.0")
@@ -553,7 +612,7 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method,
return nil, err
}
}
- logrus.Debugf("%s %s", method, url)
+ logrus.Debugf("%s %s", method, url.Redacted())
res, err := c.client.Do(req)
if err != nil {
return nil, err
@@ -585,8 +644,18 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope
cacheKey := ""
scopes := []authScope{c.scope}
if extraScope != nil {
- // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
- cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions)
+ // Using ':' as a separator here is unambiguous because getBearerToken below
+ // uses the same separator when formatting a remote request (and because
+ // repository names that we create can't contain colons, and extraScope values
+ // coming from a server come from `parseAuthScope`, which also splits on colons).
+ cacheKey = fmt.Sprintf("%s:%s:%s", extraScope.resourceType, extraScope.remoteName, extraScope.actions)
+ if colonCount := strings.Count(cacheKey, ":"); colonCount != 2 {
+ return fmt.Errorf(
+ "Internal error: there must be exactly 2 colons in the cacheKey ('%s') but got %d",
+ cacheKey,
+ colonCount,
+ )
+ }
scopes = append(scopes, *extraScope)
}
var token bearerToken
@@ -627,7 +696,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
scopes []authScope) (*bearerToken, error) {
realm, ok := challenge.Parameters["realm"]
if !ok {
- return nil, errors.Errorf("missing realm in bearer auth challenge")
+ return nil, errors.New("missing realm in bearer auth challenge")
}
authReq, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, nil)
@@ -641,19 +710,20 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
if service, ok := challenge.Parameters["service"]; ok && service != "" {
params.Add("service", service)
}
+
for _, scope := range scopes {
- if scope.remoteName != "" && scope.actions != "" {
- params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
+ if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" {
+ params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions))
}
}
params.Add("grant_type", "refresh_token")
params.Add("refresh_token", c.auth.IdentityToken)
params.Add("client_id", "containers/image")
- authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode()))
+ authReq.Body = io.NopCloser(strings.NewReader(params.Encode()))
authReq.Header.Add("User-Agent", c.userAgent)
authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
- logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
+ logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted())
res, err := c.client.Do(authReq)
if err != nil {
return nil, err
@@ -675,7 +745,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
scopes []authScope) (*bearerToken, error) {
realm, ok := challenge.Parameters["realm"]
if !ok {
- return nil, errors.Errorf("missing realm in bearer auth challenge")
+ return nil, errors.New("missing realm in bearer auth challenge")
}
authReq, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil)
@@ -693,8 +763,8 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
}
for _, scope := range scopes {
- if scope.remoteName != "" && scope.actions != "" {
- params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
+ if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" {
+ params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions))
}
}
@@ -705,13 +775,13 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
}
authReq.Header.Add("User-Agent", c.userAgent)
- logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
+ logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted())
res, err := c.client.Do(authReq)
if err != nil {
return nil, err
}
defer res.Body.Close()
- if err := httpResponseToError(res, "Requesting bear token"); err != nil {
+ if err := httpResponseToError(res, "Requesting bearer token"); err != nil {
return nil, err
}
tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
@@ -735,14 +805,17 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
c.client = &http.Client{Transport: tr}
ping := func(scheme string) error {
- url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
+ url, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry))
+ if err != nil {
+ return err
+ }
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
if err != nil {
- logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
+ logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err)
return err
}
defer resp.Body.Close()
- logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
+ logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode)
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
return httpResponseToError(resp, "")
}
@@ -756,20 +829,23 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
err = ping("http")
}
if err != nil {
- err = errors.Wrapf(err, "pinging container registry %s", c.registry)
+ err = fmt.Errorf("pinging container registry %s: %w", c.registry, err)
if c.sys != nil && c.sys.DockerDisableV1Ping {
return err
}
// best effort to understand if we're talking to a V1 registry
pingV1 := func(scheme string) bool {
- url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
+ url, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry))
+ if err != nil {
+ return false
+ }
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
if err != nil {
- logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
+ logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err)
return false
}
defer resp.Body.Close()
- logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
+ logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode)
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
return false
}
@@ -793,6 +869,176 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
return c.detectPropertiesError
}
+func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, tagOrDigest string) ([]byte, string, error) {
+ path := fmt.Sprintf(manifestPath, reference.Path(ref.ref), tagOrDigest)
+ headers := map[string][]string{
+ "Accept": manifest.DefaultRequestedManifestMIMETypes,
+ }
+ res, err := c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ return nil, "", fmt.Errorf("reading manifest %s in %s: %w", tagOrDigest, ref.ref.Name(), registryHTTPResponseToError(res))
+ }
+
+ manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
+ if err != nil {
+ return nil, "", err
+ }
+ return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil
+}
+
+// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty.
+// This function can return nil reader when no url is supported by this function. In this case, the caller
+// should fallback to fetch the non-external blob (i.e. pull from the registry).
+func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
+ var (
+ resp *http.Response
+ err error
+ )
+ if len(urls) == 0 {
+ return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
+ }
+ for _, u := range urls {
+ url, err := url.Parse(u)
+ if err != nil || (url.Scheme != "http" && url.Scheme != "https") {
+ continue // unsupported url. skip this url.
+ }
+ // NOTE: we must not authenticate on additional URLs as those
+ // can be abused to leak credentials or tokens. Please
+ // refer to CVE-2020-15157 for more information.
+ resp, err = c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
+ if err == nil {
+ if resp.StatusCode != http.StatusOK {
+ err = fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
+ logrus.Debug(err)
+ resp.Body.Close()
+ continue
+ }
+ break
+ }
+ }
+ if resp == nil && err == nil {
+ return nil, 0, nil // fallback to non-external blob
+ }
+ if err != nil {
+ return nil, 0, err
+ }
+ return resp.Body, getBlobSize(resp), nil
+}
+
+func getBlobSize(resp *http.Response) int64 {
+ size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ size = -1
+ }
+ return size
+}
+
+// getBlob returns a stream for the specified blob in ref, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ if len(info.URLs) != 0 {
+ r, s, err := c.getExternalBlob(ctx, info.URLs)
+ if err != nil {
+ return nil, 0, err
+ } else if r != nil {
+ return r, s, nil
+ }
+ }
+
+ path := fmt.Sprintf(blobsPath, reference.Path(ref.ref), info.Digest.String())
+ logrus.Debugf("Downloading %s", path)
+ res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
+ if err != nil {
+ return nil, 0, err
+ }
+ if err := httpResponseToError(res, "Error fetching blob"); err != nil {
+ res.Body.Close()
+ return nil, 0, err
+ }
+ cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref))
+ return res.Body, getBlobSize(res), nil
+}
+
+// getOCIDescriptorContents returns the contents a blob spcified by descriptor in ref, which must fit within limit.
+func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) {
+ // Note that this copies all kinds of attachments: attestations, and whatever else is there,
+ // not just signatures. We leave the signature consumers to decide based on the MIME type.
+ reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache)
+ if err != nil {
+ return nil, err
+ }
+ defer reader.Close()
+ payload, err := iolimits.ReadAtMost(reader, iolimits.MaxSignatureBodySize)
+ if err != nil {
+ return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err)
+ }
+ return payload, nil
+}
+
+// isManifestUnknownError returns true iff err from fetchManifest is a “manifest unknown” error.
+func isManifestUnknownError(err error) bool {
+ var errs errcode.Errors
+ if errors.As(err, &errs) && len(errs) != 0 {
+ firstErr := errs[0]
+ // docker/distribution, and as defined in the spec
+ var ec errcode.ErrorCoder
+ if errors.As(firstErr, &ec) && ec.ErrorCode() == v2.ErrorCodeManifestUnknown {
+ return true
+ }
+ // registry.redhat.io as of October 2022
+ var e errcode.Error
+ if errors.As(firstErr, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" {
+ return true
+ }
+ }
+ // ALSO registry.redhat.io as of October 2022
+ var unexpected *unexpectedHTTPResponseError
+ if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound && bytes.Contains(unexpected.Response, []byte("Not found")) {
+ return true
+ }
+ return false
+}
+
+// getSigstoreAttachmentManifest loads and parses the manifest for sigstore attachments for
+// digest in ref.
+// It returns (nil, nil) if the manifest does not exist.
+func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref dockerReference, digest digest.Digest) (*manifest.OCI1, error) {
+ tag := sigstoreAttachmentTag(digest)
+ sigstoreRef, err := reference.WithTag(reference.TrimNamed(ref.ref), tag)
+ if err != nil {
+ return nil, err
+ }
+ logrus.Debugf("Looking for sigstore attachments in %s", sigstoreRef.String())
+ manifestBlob, mimeType, err := c.fetchManifest(ctx, ref, tag)
+ if err != nil {
+ // FIXME: Are we going to need better heuristics??
+ // This alone is probably a good enough reason for sigstore to be opt-in only,
+ // otherwise we would just break ordinary copies.
+ if isManifestUnknownError(err) {
+ logrus.Debugf("Fetching sigstore attachment manifest failed, assuming it does not exist: %v", err)
+ return nil, nil
+ }
+ logrus.Debugf("Fetching sigstore attachment manifest failed: %v", err)
+ return nil, err
+ }
+ if mimeType != imgspecv1.MediaTypeImageManifest {
+ // FIXME: Try anyway??
+ return nil, fmt.Errorf("unexpected MIME type for sigstore attachment manifest %s: %q",
+ sigstoreRef.String(), mimeType)
+ }
+ res, err := manifest.OCI1FromManifest(manifestBlob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing manifest %s: %w", sigstoreRef.String(), err)
+ }
+ return res, nil
+}
+
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
// using the original data structures.
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
@@ -804,7 +1050,7 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
- return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "downloading signatures for %s in %s", manifestDigest, ref.ref.Name())
+ return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), handleErrorResponse(res))
}
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize)
@@ -814,7 +1060,12 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
var parsedBody extensionSignatureList
if err := json.Unmarshal(body, &parsedBody); err != nil {
- return nil, errors.Wrapf(err, "decoding signature list")
+ return nil, fmt.Errorf("decoding signature list: %w", err)
}
return &parsedBody, nil
}
+
+// sigstoreAttachmentTag returns a sigstore attachment tag for the specified digest.
+func sigstoreAttachmentTag(d digest.Digest) string {
+ return strings.Replace(d.String(), ":", "-", 1) + ".sig"
+}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go
index c84bb37d2ab..3e8dbbee134 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image.go
@@ -3,17 +3,17 @@ package docker
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods
@@ -56,13 +56,17 @@ func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) {
func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) {
dr, ok := ref.(dockerReference)
if !ok {
- return nil, errors.Errorf("ref must be a dockerReference")
+ return nil, errors.New("ref must be a dockerReference")
}
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return nil, err
+ }
path := fmt.Sprintf(tagsPath, reference.Path(dr.ref))
- client, err := newDockerClientFromRef(sys, dr, false, "pull")
+ client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull")
if err != nil {
- return nil, errors.Wrap(err, "failed to create client")
+ return nil, fmt.Errorf("failed to create client: %w", err)
}
tags := make([]string, 0)
@@ -116,7 +120,7 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (digest.Digest, error) {
dr, ok := ref.(dockerReference)
if !ok {
- return "", errors.Errorf("ref must be a dockerReference")
+ return "", errors.New("ref must be a dockerReference")
}
tagOrDigest, err := dr.tagOrDigest()
@@ -124,9 +128,13 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
return "", err
}
- client, err := newDockerClientFromRef(sys, dr, false, "pull")
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return "", err
+ }
+ client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull")
if err != nil {
- return "", errors.Wrap(err, "failed to create client")
+ return "", fmt.Errorf("failed to create client: %w", err)
}
path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest)
@@ -141,7 +149,7 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
- return "", errors.Wrapf(registryHTTPResponseToError(res), "reading digest %s in %s", tagOrDigest, dr.ref.Name())
+ return "", fmt.Errorf("reading digest %s in %s: %w", tagOrDigest, dr.ref.Name(), registryHTTPResponseToError(res))
}
dig, err := digest.Parse(res.Header.Get("Docker-Content-Digest"))
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
index 80701a76162..bcbae7db144 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
@@ -5,9 +5,9 @@ import (
"context"
"crypto/rand"
"encoding/json"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"net/http"
"net/url"
"os"
@@ -16,8 +16,13 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/internal/streamdigest"
"github.com/containers/image/v5/internal/uploadreader"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
@@ -26,11 +31,14 @@ import (
v2 "github.com/docker/distribution/registry/api/v2"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type dockerImageDestination struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+
ref dockerReference
c *dockerClient
// State
@@ -38,15 +46,40 @@ type dockerImageDestination struct {
}
// newImageDestination creates a new ImageDestination for the specified image reference.
-func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) {
- c, err := newDockerClientFromRef(sys, ref, true, "pull,push")
+func newImageDestination(sys *types.SystemContext, ref dockerReference) (private.ImageDestination, error) {
+ registryConfig, err := loadRegistryConfiguration(sys)
if err != nil {
return nil, err
}
- return &dockerImageDestination{
+ c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "pull,push")
+ if err != nil {
+ return nil, err
+ }
+ mimeTypes := []string{
+ imgspecv1.MediaTypeImageManifest,
+ manifest.DockerV2Schema2MediaType,
+ imgspecv1.MediaTypeImageIndex,
+ manifest.DockerV2ListMediaType,
+ }
+ if c.sys == nil || !c.sys.DockerDisableDestSchema1MIMETypes {
+ mimeTypes = append(mimeTypes, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType)
+ }
+
+ dest := &dockerImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: mimeTypes,
+ DesiredLayerCompression: types.Compress,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match.
+ HasThreadSafePutBlob: true,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+
ref: ref,
c: c,
- }, nil
+ }
+ dest.Compat = impl.AddCompat(dest)
+ return dest, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
@@ -60,19 +93,6 @@ func (d *dockerImageDestination) Close() error {
return nil
}
-func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
- mimeTypes := []string{
- imgspecv1.MediaTypeImageManifest,
- manifest.DockerV2Schema2MediaType,
- imgspecv1.MediaTypeImageIndex,
- manifest.DockerV2ListMediaType,
- }
- if d.c.sys == nil || !d.c.sys.DockerDisableDestSchema1MIMETypes {
- mimeTypes = append(mimeTypes, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType)
- }
- return mimeTypes
-}
-
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error {
@@ -85,32 +105,16 @@ func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error {
case d.c.signatureBase != nil:
return nil
default:
- return errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+ return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
}
}
-func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression {
- return types.Compress
-}
-
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool {
return true
}
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
- return false
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool {
- return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match.
-}
-
// sizeCounter is an io.Writer which only counts the total size of its input.
type sizeCounter struct{ size int64 }
@@ -119,24 +123,31 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
return len(p), nil
}
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *dockerImageDestination) HasThreadSafePutBlob() bool {
- return true
-}
-
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
-// May update cache.
+// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
+ // If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
+ // This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
+ // the source blob is uncompressed, and the destination blob is being compressed "on the fly".
+ if inputInfo.Digest == "" && d.c.sys.DockerRegistryPushPrecomputeDigests {
+ logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref))
+ streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ defer cleanup()
+ stream = streamCopy
+ }
+
if inputInfo.Digest != "" {
// This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
- // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
- haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false)
+ haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, options.Cache)
if err != nil {
return types.BlobInfo{}, err
}
@@ -155,11 +166,11 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
defer res.Body.Close()
if res.StatusCode != http.StatusAccepted {
logrus.Debugf("Error initiating layer upload, response %#v", *res)
- return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "initiating layer upload to %s in %s", uploadPath, d.c.registry)
+ return types.BlobInfo{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res))
}
uploadLocation, err := res.Location()
if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "determining upload URL")
+ return types.BlobInfo{}, fmt.Errorf("determining upload URL: %w", err)
}
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
@@ -171,18 +182,18 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// This error text should never be user-visible, we terminate only after makeRequestToResolvedURL
// returns, so there isn’t a way for the error text to be provided to any of our callers.
defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload"))
- res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
+ res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
if err != nil {
logrus.Debugf("Error uploading layer chunked %v", err)
return nil, err
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
- return nil, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer chunked")
+ return nil, fmt.Errorf("uploading layer chunked: %w", registryHTTPResponseToError(res))
}
uploadLocation, err := res.Location()
if err != nil {
- return nil, errors.Wrap(err, "determining upload URL")
+ return nil, fmt.Errorf("determining upload URL: %w", err)
}
return uploadLocation, nil
}()
@@ -196,18 +207,18 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
locationQuery := uploadLocation.Query()
locationQuery.Set("digest", blobDigest.String())
uploadLocation.RawQuery = locationQuery.Encode()
- res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
+ res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
if err != nil {
return types.BlobInfo{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
logrus.Debugf("Error uploading layer, response %#v", *res)
- return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer to %s", uploadLocation)
+ return types.BlobInfo{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res))
}
logrus.Debugf("Upload of layer %s complete", blobDigest)
- cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref))
+ options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref))
return types.BlobInfo{Digest: blobDigest, Size: sizeCounter.size}, nil
}
@@ -228,12 +239,12 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.
return true, getBlobSize(res), nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
- return false, -1, errors.Wrapf(registryHTTPResponseToError(res), "checking whether a blob %s exists in %s", digest, repo.Name())
+ return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res))
case http.StatusNotFound:
logrus.Debugf("... not present")
return false, -1, nil
default:
- return false, -1, errors.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode))
+ return false, -1, fmt.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode))
}
}
@@ -246,9 +257,8 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
"from": {reference.Path(srcRepo)},
}.Encode(),
}
- mountPath := u.String()
- logrus.Debugf("Trying to mount %s", mountPath)
- res, err := d.c.makeRequest(ctx, http.MethodPost, mountPath, nil, nil, v2Auth, extraScope)
+ logrus.Debugf("Trying to mount %s", u.Redacted())
+ res, err := d.c.makeRequest(ctx, http.MethodPost, u.String(), nil, nil, v2Auth, extraScope)
if err != nil {
return err
}
@@ -263,10 +273,10 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
// NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
uploadLocation, err := res.Location()
if err != nil {
- return errors.Wrap(err, "determining upload URL after a mount attempt")
+ return fmt.Errorf("determining upload URL after a mount attempt: %w", err)
}
- logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
- res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
+ logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.Redacted())
+ res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation, nil, nil, -1, v2Auth, extraScope)
if err != nil {
logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
} else {
@@ -279,37 +289,48 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
default:
logrus.Debugf("Error mounting, response %#v", *res)
- return errors.Wrapf(registryHTTPResponseToError(res), "mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
+ return fmt.Errorf("mounting %s from %s to %s: %w", srcDigest, srcRepo.Name(), d.ref.ref.Name(), registryHTTPResponseToError(res))
}
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified
+// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read.
+// The caller must ensure info.Digest is set.
+func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, types.BlobInfo, error) {
+ exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
+ if err != nil {
+ return false, types.BlobInfo{}, err
+ }
+ if exists {
+ cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
+ return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil
+ }
+ return false, types.BlobInfo{}, nil
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
}
// First, check whether the blob happens to already exist at the destination.
- exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
+ haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
if err != nil {
return false, types.BlobInfo{}, err
}
- if exists {
- cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
- return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil
+ if haveBlob {
+ return true, reusedInfo, nil
}
// Then try reusing blobs from other locations.
- bic := blobinfocache.FromBlobInfoCache(cache)
- candidates := bic.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute)
+ candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute)
for _, candidate := range candidates {
candidateRepo, err := parseBICLocationReference(candidate.Location)
if err != nil {
@@ -337,8 +358,9 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
// Checking candidateRepo, and mounting from it, requires an
// expanded token scope.
extraScope := &authScope{
- remoteName: reference.Path(candidateRepo),
- actions: "pull",
+ resourceType: "repository",
+ remoteName: reference.Path(candidateRepo),
+ actions: "pull",
}
// This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
// But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
@@ -363,7 +385,7 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
}
}
- bic.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
+ options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
if err != nil {
@@ -394,14 +416,14 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
// Double-check that the manifest we've been given matches the digest we've been given.
matches, err := manifest.MatchesDigest(m, *instanceDigest)
if err != nil {
- return errors.Wrapf(err, "digesting manifest in PutManifest")
+ return fmt.Errorf("digesting manifest in PutManifest: %w", err)
}
if !matches {
manifestDigest, merr := manifest.Digest(m)
if merr != nil {
- return errors.Wrapf(err, "Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%v attempting to compute it)", instanceDigest.String(), merr)
+ return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest: %w", instanceDigest.String(), merr)
}
- return errors.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String())
+ return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String())
}
} else {
// Compute the digest of the main manifest, or the list if it's a list, so that we
@@ -419,7 +441,12 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
}
}
- path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail)
+ return d.uploadManifest(ctx, m, refTail)
+}
+
+// uploadManifest writes manifest to tagOrDigest.
+func (d *dockerImageDestination) uploadManifest(ctx context.Context, m []byte, tagOrDigest string) error {
+ path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), tagOrDigest)
headers := map[string][]string{}
mimeType := manifest.GuessMIMEType(m)
@@ -432,12 +459,25 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
- err = errors.Wrapf(registryHTTPResponseToError(res), "uploading manifest %s to %s", refTail, d.ref.ref.Name())
- if isManifestInvalidError(errors.Cause(err)) {
+ rawErr := registryHTTPResponseToError(res)
+ err := fmt.Errorf("uploading manifest %s to %s: %w", tagOrDigest, d.ref.ref.Name(), rawErr)
+ if isManifestInvalidError(rawErr) {
err = types.ManifestTypeRejectedError{Err: err}
}
return err
}
+ // A HTTP server may not be a registry at all, and just return 200 OK to everything
+ // (in particular that can fairly easily happen after tearing down a website and
+ // replacing it with a global 302 redirect to a new website, completely ignoring the
+ // path in the request); in that case we could “succeed” uploading a whole image.
+ // With docker/distribution we could rely on a Docker-Content-Digest header being present
+ // (because docker/distribution/registry/client has been failing uploads if it was missing),
+ // but that has been defined as explicitly optional by
+ // https://github.com/opencontainers/distribution-spec/blob/ec90a2af85fe4d612cf801e1815b95bfa40ae72b/spec.md#legacy-docker-support-http-headers
+ // So, just note the missing header in a debug log.
+ if v := res.Header.Values("Docker-Content-Digest"); len(v) == 0 {
+ logrus.Debugf("Manifest upload response didn’t contain a Docker-Content-Digest header, it might not be a container registry")
+ }
return nil
}
@@ -478,38 +518,63 @@ func isManifestInvalidError(err error) bool {
}
}
-// PutSignatures uploads a set of signatures to the relevant lookaside or API extension point.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to upload the signatures for (when
-// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
-func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- // Do not fail if we don’t really need to support signatures.
- if len(signatures) == 0 {
- return nil
- }
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *dockerImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
if instanceDigest == nil {
if d.manifestDigest == "" {
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
- return errors.Errorf("Unknown manifest digest, can't add signatures")
+ return errors.New("Unknown manifest digest, can't add signatures")
}
instanceDigest = &d.manifestDigest
}
- if err := d.c.detectProperties(ctx); err != nil {
- return err
+ sigstoreSignatures := []signature.Sigstore{}
+ otherSignatures := []signature.Signature{}
+ for _, sig := range signatures {
+ if sigstoreSig, ok := sig.(signature.Sigstore); ok {
+ sigstoreSignatures = append(sigstoreSignatures, sigstoreSig)
+ } else {
+ otherSignatures = append(otherSignatures, sig)
+ }
}
- switch {
- case d.c.supportsSignatures:
- return d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest)
- case d.c.signatureBase != nil:
- return d.putSignaturesToLookaside(signatures, *instanceDigest)
- default:
- return errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+
+ // Only write sigstores signatures to sigstores attachments. We _could_ store them to lookaside
+ // instead, but that would probably be rather surprising.
+ // FIXME: So should we enable sigstores in all cases? Or write in all cases, but opt-in to read?
+
+ if len(sigstoreSignatures) != 0 {
+ if err := d.putSignaturesToSigstoreAttachments(ctx, sigstoreSignatures, *instanceDigest); err != nil {
+ return err
+ }
+ }
+
+ if len(otherSignatures) != 0 {
+ if err := d.c.detectProperties(ctx); err != nil {
+ return err
+ }
+ switch {
+ case d.c.supportsSignatures:
+ if err := d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest); err != nil {
+ return err
+ }
+ case d.c.signatureBase != nil:
+ if err := d.putSignaturesToLookaside(signatures, *instanceDigest); err != nil {
+ return err
+ }
+ default:
+ return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+ }
}
+
+ return nil
}
-// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
+// putSignaturesToLookaside implements PutSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
// which is not nil, for a manifest with manifestDigest.
-func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, manifestDigest digest.Digest) error {
+func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature.Signature, manifestDigest digest.Digest) error {
// FIXME? This overwrites files one at a time, definitely not atomic.
// A failure when updating signatures with a reordered copy could lose some of them.
@@ -520,7 +585,7 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m
// NOTE: Keep this in sync with docs/signature-protocols.md!
for i, signature := range signatures {
- url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
+ url := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
err := d.putOneSignature(url, signature)
if err != nil {
return err
@@ -532,7 +597,7 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m
// is enough for dockerImageSource to stop looking for other signatures, so that
// is sufficient.
for i := len(signatures); ; i++ {
- url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
+ url := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
missing, err := d.c.deleteOneSignature(url)
if err != nil {
return err
@@ -545,9 +610,9 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m
return nil
}
-// putOneSignature stores one signature to url.
+// putOneSignature stores sig to url.
// NOTE: Keep this in sync with docs/signature-protocols.md!
-func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error {
+func (d *dockerImageDestination) putOneSignature(url *url.URL, sig signature.Signature) error {
switch url.Scheme {
case "file":
logrus.Debugf("Writing to %s", url.Path)
@@ -555,19 +620,158 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte)
if err != nil {
return err
}
- err = ioutil.WriteFile(url.Path, signature, 0644)
+ blob, err := signature.Blob(sig)
+ if err != nil {
+ return err
+ }
+ err = os.WriteFile(url.Path, blob, 0644)
if err != nil {
return err
}
return nil
case "http", "https":
- return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
+ return fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", url.Scheme, url.Redacted())
default:
- return errors.Errorf("Unsupported scheme when writing signature to %s", url.String())
+ return fmt.Errorf("Unsupported scheme when writing signature to %s", url.Redacted())
}
}
+func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.Context, signatures []signature.Sigstore, manifestDigest digest.Digest) error {
+ if !d.c.useSigstoreAttachments {
+ return errors.New("writing sigstore attachments is disabled by configuration")
+ }
+
+ ociManifest, err := d.c.getSigstoreAttachmentManifest(ctx, d.ref, manifestDigest)
+ if err != nil {
+ return nil
+ }
+ var ociConfig imgspecv1.Image // Most fields empty by default
+ if ociManifest == nil {
+ ociManifest = manifest.OCI1FromComponents(imgspecv1.Descriptor{
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ Digest: "", // We will fill this in later.
+ Size: 0,
+ }, nil)
+ ociConfig.RootFS.Type = "layers"
+ } else {
+ logrus.Debugf("Fetching sigstore attachment config %s", ociManifest.Config.Digest.String())
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs.
+ configBlob, err := d.c.getOCIDescriptorContents(ctx, d.ref, ociManifest.Config, iolimits.MaxConfigBodySize,
+ none.NoCache)
+ if err != nil {
+ return err
+ }
+ if err := json.Unmarshal(configBlob, &ociConfig); err != nil {
+ return fmt.Errorf("parsing sigstore attachment config %s in %s: %w", ociManifest.Config.Digest.String(),
+ d.ref.ref.Name(), err)
+ }
+ }
+
+ for _, sig := range signatures {
+ mimeType := sig.UntrustedMIMEType()
+ payloadBlob := sig.UntrustedPayload()
+ annotations := sig.UntrustedAnnotations()
+
+ alreadyOnRegistry := false
+ for _, layer := range ociManifest.Layers {
+ if layerMatchesSigstoreSignature(layer, mimeType, payloadBlob, annotations) {
+ logrus.Debugf("Signature with digest %s already exists on the registry", layer.Digest.String())
+ alreadyOnRegistry = true
+ break
+ }
+ }
+ if alreadyOnRegistry {
+ continue
+ }
+
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads.
+ // That might eventually need to change if payloads grow to be not just signatures, but something
+ // significantly large.
+ sigDesc, err := d.putBlobBytesAsOCI(ctx, payloadBlob, mimeType, private.PutBlobOptions{
+ Cache: none.NoCache,
+ IsConfig: false,
+ EmptyLayer: false,
+ LayerIndex: nil,
+ })
+ if err != nil {
+ return err
+ }
+ sigDesc.Annotations = annotations
+ ociManifest.Layers = append(ociManifest.Layers, sigDesc)
+ ociConfig.RootFS.DiffIDs = append(ociConfig.RootFS.DiffIDs, sigDesc.Digest)
+ logrus.Debugf("Adding new signature, digest %s", sigDesc.Digest.String())
+ }
+
+ configBlob, err := json.Marshal(ociConfig)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("Uploading updated sigstore attachment config")
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs.
+ configDesc, err := d.putBlobBytesAsOCI(ctx, configBlob, imgspecv1.MediaTypeImageConfig, private.PutBlobOptions{
+ Cache: none.NoCache,
+ IsConfig: true,
+ EmptyLayer: false,
+ LayerIndex: nil,
+ })
+ if err != nil {
+ return nil
+ }
+ ociManifest.Config = configDesc
+
+ manifestBlob, err := ociManifest.Serialize()
+ if err != nil {
+ return nil
+ }
+ logrus.Debugf("Uploading sigstore attachment manifest")
+ return d.uploadManifest(ctx, manifestBlob, sigstoreAttachmentTag(manifestDigest))
+}
+
+func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string,
+ payloadBlob []byte, annotations map[string]string) bool {
+ if layer.MediaType != mimeType ||
+ layer.Size != int64(len(payloadBlob)) ||
+ // This is not quite correct, we should use the layer’s digest algorithm.
+ // But right now we don’t want to deal with corner cases like bad digest formats
+ // or unavailable algorithms; in the worst case we end up with duplicate signature
+ // entries.
+ layer.Digest.String() != digest.FromBytes(payloadBlob).String() {
+ return false
+ }
+ if len(layer.Annotations) != len(annotations) {
+ return false
+ }
+ for k, v1 := range layer.Annotations {
+ if v2, ok := annotations[k]; !ok || v1 != v2 {
+ return false
+ }
+ }
+ // All annotations in layer exist in sig, and the number of annotations is the same, so all annotations
+ // in sig also exist in layer.
+ return true
+}
+
+// putBlobBytesAsOCI uploads a blob with the specified contents, and returns an appropriate
+// OCI descriptior.
+func (d *dockerImageDestination) putBlobBytesAsOCI(ctx context.Context, contents []byte, mimeType string, options private.PutBlobOptions) (imgspecv1.Descriptor, error) {
+ blobDigest := digest.FromBytes(contents)
+ info, err := d.PutBlobWithOptions(ctx, bytes.NewReader(contents),
+ types.BlobInfo{
+ Digest: blobDigest,
+ Size: int64(len(contents)),
+ MediaType: mimeType,
+ }, options)
+ if err != nil {
+ return imgspecv1.Descriptor{}, fmt.Errorf("writing blob %s: %w", blobDigest.String(), err)
+ }
+ return imgspecv1.Descriptor{
+ MediaType: mimeType,
+ Digest: info.Digest,
+ Size: info.Size,
+ }, nil
+}
+
// deleteOneSignature deletes a signature from url, if it exists.
// If it successfully determines that the signature does not exist, returns (true, nil)
// NOTE: Keep this in sync with docs/signature-protocols.md!
@@ -582,15 +786,15 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error
return false, err
case "http", "https":
- return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
+ return false, fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", url.Scheme, url.Redacted())
default:
- return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String())
+ return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", url.Redacted())
}
}
-// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension,
+// putSignaturesToAPIExtension implements PutSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension,
// for a manifest with manifestDigest.
-func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte, manifestDigest digest.Digest) error {
+func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures []signature.Signature, manifestDigest digest.Digest) error {
// Skip dealing with the manifest digest, or reading the old state, if not necessary.
if len(signatures) == 0 {
return nil
@@ -610,7 +814,13 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
}
sigExists:
- for _, newSig := range signatures {
+ for _, newSigWithFormat := range signatures {
+ newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
+ if !ok {
+ return signature.UnsupportedFormatError(newSigWithFormat)
+ }
+ newSig := newSigSimple.UntrustedSignature()
+
for _, existingSig := range existingSignatures.Signatures {
if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
continue sigExists
@@ -623,7 +833,7 @@ sigExists:
randBytes := make([]byte, 16)
n, err := rand.Read(randBytes)
if err != nil || n != 16 {
- return errors.Wrapf(err, "generating random signature len %d", n)
+ return fmt.Errorf("generating random signature len %d: %w", n, err)
}
signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
if _, ok := existingSigNames[signatureName]; !ok {
@@ -648,12 +858,8 @@ sigExists:
}
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
- body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxErrorBodySize)
- if err == nil {
- logrus.Debugf("Error body %s", string(body))
- }
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
- return errors.Wrapf(registryHTTPResponseToError(res), "uploading signature to %s in %s", path, d.c.registry)
+ return fmt.Errorf("uploading signature to %s in %s: %w", path, d.c.registry, registryHTTPResponseToError(res))
}
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
index 1333cf9e2ef..b0e87797102 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
@@ -2,30 +2,38 @@ package docker
import (
"context"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"mime"
"mime/multipart"
"net/http"
"net/url"
"os"
- "strconv"
+ "regexp"
"strings"
"sync"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/iolimits"
- internalTypes "github.com/containers/image/v5/internal/types"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type dockerImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.ImplementsGetBlobAt
+
logicalRef dockerReference // The reference the user requested.
physicalRef dockerReference // The actual reference we are accessing (possibly a mirror)
c *dockerClient
@@ -37,9 +45,13 @@ type dockerImageSource struct {
// newImageSource creates a new ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return nil, err
+ }
registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name())
if err != nil {
- return nil, errors.Wrapf(err, "loading registries configuration")
+ return nil, fmt.Errorf("loading registries configuration: %w", err)
}
if registry == nil {
// No configuration was found for the provided reference, so use the
@@ -72,7 +84,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
} else {
logrus.Debugf("Trying to access %q", pullSource.Reference)
}
- s, err := newImageSourceAttempt(ctx, sys, ref, pullSource)
+ s, err := newImageSourceAttempt(ctx, sys, ref, pullSource, registryConfig)
if err == nil {
return s, nil
}
@@ -96,14 +108,15 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
// The paired [] at least have some chance of being unambiguous.
extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err))
}
- return nil, errors.Wrapf(primary.err, "(Mirrors also failed: %s): %s", strings.Join(extras, "\n"), primary.ref.String())
+ return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err)
}
}
// newImageSourceAttempt is an internal helper for newImageSource. Everyone else must call newImageSource.
// Given a logicalReference and a pullSource, return a dockerImageSource if it is reachable.
// The caller must call .Close() on the returned ImageSource.
-func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource) (*dockerImageSource, error) {
+func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource,
+ registryConfig *registryConfiguration) (*dockerImageSource, error) {
physicalRef, err := newReference(pullSource.Reference)
if err != nil {
return nil, err
@@ -118,17 +131,22 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logica
endpointSys = ©
}
- client, err := newDockerClientFromRef(endpointSys, physicalRef, false, "pull")
+ client, err := newDockerClientFromRef(endpointSys, physicalRef, registryConfig, false, "pull")
if err != nil {
return nil, err
}
client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure
s := &dockerImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+
logicalRef: logicalRef,
physicalRef: physicalRef,
c: client,
}
+ s.Compat = impl.AddCompat(s)
if err := s.ensureManifestIsLoaded(ctx); err != nil {
return nil, err
@@ -147,18 +165,6 @@ func (s *dockerImageSource) Close() error {
return nil
}
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *dockerImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
-}
-
// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string.
func simplifyContentType(contentType string) string {
@@ -188,25 +194,7 @@ func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *dig
}
func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
- path := fmt.Sprintf(manifestPath, reference.Path(s.physicalRef.ref), tagOrDigest)
- headers := map[string][]string{
- "Accept": manifest.DefaultRequestedManifestMIMETypes,
- }
- res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
- if err != nil {
- return nil, "", err
- }
- logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
- defer res.Body.Close()
- if res.StatusCode != http.StatusOK {
- return nil, "", errors.Wrapf(registryHTTPResponseToError(res), "reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
- }
-
- manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
- if err != nil {
- return nil, "", err
- }
- return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil
+ return s.c.fetchManifest(ctx, s.physicalRef, tagOrDigest)
}
// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType
@@ -236,50 +224,8 @@ func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error {
return nil
}
-func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
- var (
- resp *http.Response
- err error
- )
- if len(urls) == 0 {
- return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
- }
- for _, url := range urls {
- // NOTE: we must not authenticate on additional URLs as those
- // can be abused to leak credentials or tokens. Please
- // refer to CVE-2020-15157 for more information.
- resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
- if err == nil {
- if resp.StatusCode != http.StatusOK {
- err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode))
- logrus.Debug(err)
- resp.Body.Close()
- continue
- }
- break
- }
- }
- if err != nil {
- return nil, 0, err
- }
- return resp.Body, getBlobSize(resp), nil
-}
-
-func getBlobSize(resp *http.Response) int64 {
- size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
- if err != nil {
- size = -1
- }
- return size
-}
-
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *dockerImageSource) HasThreadSafeGetBlob() bool {
- return true
-}
-
// splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks
-func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk) {
+func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk) {
defer close(streams)
defer close(errs)
currentOffset := uint64(0)
@@ -293,7 +239,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
break
}
toSkip := c.Offset - currentOffset
- if _, err := io.Copy(ioutil.Discard, io.LimitReader(body, int64(toSkip))); err != nil {
+ if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil {
errs <- err
break
}
@@ -301,7 +247,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
}
s := signalCloseReader{
closed: make(chan interface{}),
- stream: ioutil.NopCloser(io.LimitReader(body, int64(c.Length))),
+ stream: io.NopCloser(io.LimitReader(body, int64(c.Length))),
consumeStream: true,
}
streams <- s
@@ -313,7 +259,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
}
// handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan.
-func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk, mediaType string, params map[string]string) {
+func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk, mediaType string, params map[string]string) {
defer close(streams)
defer close(errs)
if !strings.HasPrefix(mediaType, "multipart/") {
@@ -322,19 +268,23 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
}
boundary, found := params["boundary"]
if !found {
- errs <- errors.Errorf("could not find boundary")
+ errs <- errors.New("could not find boundary")
body.Close()
return
}
buffered := makeBufferedNetworkReader(body, 64, 16384)
defer buffered.Close()
mr := multipart.NewReader(buffered, boundary)
+ parts := 0
for {
p, err := mr.NextPart()
if err != nil {
if err != io.EOF {
errs <- err
}
+ if parts != len(chunks) {
+ errs <- errors.New("invalid number of chunks returned by the server")
+ }
return
}
s := signalCloseReader{
@@ -345,12 +295,40 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
// NextPart() cannot be called while the current part
// is being read, so wait until it is closed
<-s.closed
+ parts++
+ }
+}
+
+var multipartByteRangesRe = regexp.MustCompile("multipart/byteranges; boundary=([A-Za-z-0-9:]+)")
+
+func parseMediaType(contentType string) (string, map[string]string, error) {
+ mediaType, params, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ if err == mime.ErrInvalidMediaParameter {
+ // CloudFront returns an invalid MIME type, that contains an unquoted ":" in the boundary
+ // param, let's handle it here.
+ matches := multipartByteRangesRe.FindStringSubmatch(contentType)
+ if len(matches) == 2 {
+ mediaType = "multipart/byteranges"
+ params = map[string]string{
+ "boundary": matches[1],
+ }
+ err = nil
+ }
+ }
+ if err != nil {
+ return "", nil, err
+ }
}
+ return mediaType, params, err
}
-// GetBlobAt returns a stream for the specified blob.
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
// The specified chunks must be not overlapping and sorted by their offset.
-func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
headers := make(map[string][]string)
var rangeVals []string
@@ -370,12 +348,6 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
if err != nil {
return nil, nil, err
}
- if err := httpResponseToError(res, "Error fetching partial blob"); err != nil {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, nil, err
- }
switch res.StatusCode {
case http.StatusOK:
@@ -386,7 +358,7 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
go splitHTTP200ResponseToPartial(streams, errs, res.Body, chunks)
return streams, errs, nil
case http.StatusPartialContent:
- mediaType, params, err := mime.ParseMediaType(res.Header.Get("Content-Type"))
+ mediaType, params, err := parseMediaType(res.Header.Get("Content-Type"))
if err != nil {
return nil, nil, err
}
@@ -396,9 +368,16 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
go handle206Response(streams, errs, res.Body, chunks, mediaType, params)
return streams, errs, nil
+ case http.StatusBadRequest:
+ res.Body.Close()
+ return nil, nil, private.BadPartialRequestError{Status: res.Status}
default:
+ err := httpResponseToError(res, "Error fetching partial blob")
+ if err == nil {
+ err = fmt.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+ }
res.Body.Close()
- return nil, nil, errors.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+ return nil, nil, err
}
}
@@ -406,40 +385,41 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
- if len(info.URLs) != 0 {
- return s.getExternalBlob(ctx, info.URLs)
- }
-
- path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
- logrus.Debugf("Downloading %s", path)
- res, err := s.c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
- if err != nil {
- return nil, 0, err
- }
- if err := httpResponseToError(res, "Error fetching blob"); err != nil {
- res.Body.Close()
- return nil, 0, err
- }
- cache.RecordKnownLocation(s.physicalRef.Transport(), bicTransportScope(s.physicalRef), info.Digest, newBICLocationReference(s.physicalRef))
- return res.Body, getBlobSize(res), nil
+ return s.c.getBlob(ctx, s.physicalRef, info, cache)
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
-func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+func (s *dockerImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
if err := s.c.detectProperties(ctx); err != nil {
return nil, err
}
+ var res []signature.Signature
switch {
case s.c.supportsSignatures:
- return s.getSignaturesFromAPIExtension(ctx, instanceDigest)
+ sigs, err := s.getSignaturesFromAPIExtension(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, sigs...)
case s.c.signatureBase != nil:
- return s.getSignaturesFromLookaside(ctx, instanceDigest)
+ sigs, err := s.getSignaturesFromLookaside(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, sigs...)
default:
- return nil, errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+ return nil, errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+ }
+
+ sigstoreSigs, err := s.getSignaturesFromSigstoreAttachments(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
}
+ res = append(res, sigstoreSigs...)
+ return res, nil
}
// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference,
@@ -460,18 +440,18 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *
return manifest.Digest(s.cachedManifest)
}
-// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase,
+// getSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
// which is not nil.
-func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
if err != nil {
return nil, err
}
// NOTE: Keep this in sync with docs/signature-protocols.md!
- signatures := [][]byte{}
+ signatures := []signature.Signature{}
for i := 0; ; i++ {
- url := signatureStorageURL(s.c.signatureBase, manifestDigest, i)
+ url := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
signature, missing, err := s.getOneSignature(ctx, url)
if err != nil {
return nil, err
@@ -484,24 +464,28 @@ func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, inst
return signatures, nil
}
-// getOneSignature downloads one signature from url.
-// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil.
+// getOneSignature downloads one signature from url, and returns (signature, false, nil)
+// If it successfully determines that the signature does not exist, returns (nil, true, nil).
// NOTE: Keep this in sync with docs/signature-protocols.md!
-func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) {
+func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature.Signature, bool, error) {
switch url.Scheme {
case "file":
logrus.Debugf("Reading %s", url.Path)
- sig, err := ioutil.ReadFile(url.Path)
+ sigBlob, err := os.ReadFile(url.Path)
if err != nil {
if os.IsNotExist(err) {
return nil, true, nil
}
return nil, false, err
}
+ sig, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, false, fmt.Errorf("parsing signature %q: %w", url.Path, err)
+ }
return sig, false, nil
case "http", "https":
- logrus.Debugf("GET %s", url)
+ logrus.Debugf("GET %s", url.Redacted())
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil)
if err != nil {
return nil, false, err
@@ -514,21 +498,25 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
if res.StatusCode == http.StatusNotFound {
return nil, true, nil
} else if res.StatusCode != http.StatusOK {
- return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode))
+ return nil, false, fmt.Errorf("reading signature from %s: status %d (%s)", url.Redacted(), res.StatusCode, http.StatusText(res.StatusCode))
}
- sig, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize)
+ sigBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize)
if err != nil {
return nil, false, err
}
+ sig, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, false, fmt.Errorf("parsing signature %s: %w", url.Redacted(), err)
+ }
return sig, false, nil
default:
- return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String())
+ return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", url.Redacted())
}
}
-// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension.
-func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+// getSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension.
+func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
if err != nil {
return nil, err
@@ -539,17 +527,59 @@ func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, i
return nil, err
}
- var sigs [][]byte
+ var sigs []signature.Signature
for _, sig := range parsedBody.Signatures {
if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic {
- sigs = append(sigs, sig.Content)
+ sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content))
}
}
return sigs, nil
}
+func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ if !s.c.useSigstoreAttachments {
+ logrus.Debugf("Not looking for sigstore attachments: disabled by configuration")
+ return nil, nil
+ }
+
+ manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ ociManifest, err := s.c.getSigstoreAttachmentManifest(ctx, s.physicalRef, manifestDigest)
+ if err != nil {
+ return nil, err
+ }
+ if ociManifest == nil {
+ return nil, nil
+ }
+
+ logrus.Debugf("Found a sigstore attachment manifest with %d layers", len(ociManifest.Layers))
+ res := []signature.Signature{}
+ for layerIndex, layer := range ociManifest.Layers {
+ // Note that this copies all kinds of attachments: attestations, and whatever else is there,
+ // not just signatures. We leave the signature consumers to decide based on the MIME type.
+ logrus.Debugf("Fetching sigstore attachment %d/%d: %s", layerIndex+1, len(ociManifest.Layers), layer.Digest.String())
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads.
+ // That might eventually need to change if payloads grow to be not just signatures, but something
+ // significantly large.
+ payload, err := s.c.getOCIDescriptorContents(ctx, s.physicalRef, layer, iolimits.MaxSignatureBodySize,
+ none.NoCache)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations))
+ }
+ return res, nil
+}
+
// deleteImage deletes the named image from the registry, if supported.
func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error {
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return err
+ }
// docker/distribution does not document what action should be used for deleting images.
//
// Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it.
@@ -557,7 +587,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
// OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user).
//
// We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything".
- c, err := newDockerClientFromRef(sys, ref, true, "*")
+ c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "*")
if err != nil {
return err
}
@@ -582,13 +612,16 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
switch get.StatusCode {
case http.StatusOK:
case http.StatusNotFound:
- return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
+ return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
default:
- return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
+ return fmt.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
}
- digest := get.Header.Get("Docker-Content-Digest")
- deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest)
+ manifestDigest, err := manifest.Digest(manifestBody)
+ if err != nil {
+ return fmt.Errorf("computing manifest digest: %w", err)
+ }
+ deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), manifestDigest)
// When retrieving the digest from a registry >= 2.3 use the following header:
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
@@ -603,16 +636,11 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
return err
}
if delete.StatusCode != http.StatusAccepted {
- return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status)
- }
-
- manifestDigest, err := manifest.Digest(manifestBody)
- if err != nil {
- return err
+ return fmt.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status)
}
for i := 0; ; i++ {
- url := signatureStorageURL(c.signatureBase, manifestDigest, i)
+ url := lookasideStorageURL(c.signatureBase, manifestDigest, i)
missing, err := c.deleteOneSignature(url)
if err != nil {
return err
@@ -741,7 +769,7 @@ func (s signalCloseReader) Read(p []byte) (int, error) {
func (s signalCloseReader) Close() error {
defer close(s.closed)
if s.consumeStream {
- if _, err := io.Copy(ioutil.Discard, s.stream); err != nil {
+ if _, err := io.Copy(io.Discard, s.stream); err != nil {
s.stream.Close()
return err
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_transport.go b/vendor/github.com/containers/image/v5/docker/docker_transport.go
index 541e053f3c8..0544bb3c931 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_transport.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_transport.go
@@ -2,6 +2,7 @@ package docker
import (
"context"
+ "errors"
"fmt"
"strings"
@@ -9,7 +10,6 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
func init() {
@@ -49,7 +49,7 @@ type dockerReference struct {
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
func ParseReference(refString string) (types.ImageReference, error) {
if !strings.HasPrefix(refString, "//") {
- return nil, errors.Errorf("docker: image reference %s does not start with //", refString)
+ return nil, fmt.Errorf("docker: image reference %s does not start with //", refString)
}
ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
if err != nil {
@@ -67,7 +67,7 @@ func NewReference(ref reference.Named) (types.ImageReference, error) {
// newReference returns a dockerReference for a named reference.
func newReference(ref reference.Named) (dockerReference, error) {
if reference.IsNameOnly(ref) {
- return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
+ return dockerReference{}, fmt.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
}
// A github.com/distribution/reference value can have a tag and a digest at the same time!
// The docker/distribution API does not really support that (we can’t ask for an image with a specific
@@ -77,7 +77,7 @@ func newReference(ref reference.Named) (dockerReference, error) {
_, isTagged := ref.(reference.NamedTagged)
_, isDigested := ref.(reference.Canonical)
if isTagged && isDigested {
- return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported")
+ return dockerReference{}, errors.New("Docker references with both a tag and digest are currently not supported")
}
return dockerReference{
@@ -164,5 +164,5 @@ func (ref dockerReference) tagOrDigest() (string, error) {
return ref.Tag(), nil
}
// This should not happen, NewReference above refuses reference.IsNameOnly values.
- return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref))
+ return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref))
}
diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go
index 6f2c5fde5e7..e1986554c55 100644
--- a/vendor/github.com/containers/image/v5/docker/errors.go
+++ b/vendor/github.com/containers/image/v5/docker/errors.go
@@ -4,10 +4,6 @@ import (
"errors"
"fmt"
"net/http"
-
- internalTypes "github.com/containers/image/v5/internal/types"
- "github.com/docker/distribution/registry/client"
- perrors "github.com/pkg/errors"
)
var (
@@ -29,37 +25,35 @@ func (e ErrUnauthorizedForCredentials) Error() string {
// httpResponseToError translates the https.Response into an error, possibly prefixing it with the supplied context. It returns
// nil if the response is not considered an error.
+// NOTE: Almost all callers in this package should use registryHTTPResponseToError instead.
func httpResponseToError(res *http.Response, context string) error {
switch res.StatusCode {
case http.StatusOK:
return nil
- case http.StatusPartialContent:
- return nil
case http.StatusTooManyRequests:
return ErrTooManyRequests
case http.StatusUnauthorized:
- err := client.HandleErrorResponse(res)
+ err := handleErrorResponse(res)
return ErrUnauthorizedForCredentials{Err: err}
- case http.StatusBadRequest:
- return internalTypes.BadPartialRequestError{Status: res.Status}
default:
if context != "" {
context = context + ": "
}
- return perrors.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
+ return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
}
}
// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution
// registry
func registryHTTPResponseToError(res *http.Response) error {
- errResponse := client.HandleErrorResponse(res)
- if e, ok := perrors.Cause(errResponse).(*client.UnexpectedHTTPResponseError); ok {
+ err := handleErrorResponse(res)
+ if e, ok := err.(*unexpectedHTTPResponseError); ok {
response := string(e.Response)
if len(response) > 50 {
response = response[:50] + "..."
}
- errResponse = fmt.Errorf("StatusCode: %d, %s", e.StatusCode, response)
+ // %.0w makes e visible to error.Unwrap() without including any text
+ err = fmt.Errorf("StatusCode: %d, %s%.0w", e.StatusCode, response, e)
}
- return errResponse
+ return err
}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
index 44b0af110ab..9a0ea683e6a 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
@@ -4,23 +4,29 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
+ "fmt"
"io"
- "io/ioutil"
- "os"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/iolimits"
- "github.com/containers/image/v5/internal/putblobdigest"
- "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/streamdigest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
-// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
+// Destination is a partial implementation of private.ImageDestination for writing to an io.Writer.
type Destination struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.NoSignaturesInitialize
+
archive *Writer
repoTags []reference.NamedTagged
// Other state.
@@ -29,16 +35,34 @@ type Destination struct {
}
// NewDestination returns a tarfile.Destination adding images to the specified Writer.
-func NewDestination(sys *types.SystemContext, archive *Writer, ref reference.NamedTagged) *Destination {
+func NewDestination(sys *types.SystemContext, archive *Writer, transportName string, ref reference.NamedTagged) *Destination {
repoTags := []reference.NamedTagged{}
if ref != nil {
repoTags = append(repoTags, ref)
}
- return &Destination{
+ dest := &Destination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{
+ manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
+ },
+ DesiredLayerCompression: types.Decompress,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
+ // The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where
+ // this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t
+ // much benefit from concurrency, mostly just extra CPU, memory and I/O contention.
+ HasThreadSafePutBlob: false,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartialRaw(transportName),
+ NoSignaturesInitialize: stubs.NoSignatures("Storing signatures for docker tar files is not supported"),
+
archive: archive,
repoTags: repoTags,
sysCtx: sys,
}
+ dest.Compat = impl.AddCompat(dest)
+ return dest
}
// AddRepoTags adds the specified tags to the destination's repoTags.
@@ -46,77 +70,23 @@ func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
d.repoTags = append(d.repoTags, tags...)
}
-// SupportedManifestMIMETypes tells which manifest mime types the destination supports
-// If an empty slice or nil it's returned, then any mime type can be tried to upload
-func (d *Destination) SupportedManifestMIMETypes() []string {
- return []string{
- manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
- }
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *Destination) SupportsSignatures(ctx context.Context) error {
- return errors.Errorf("Storing signatures for docker tar files is not supported")
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *Destination) AcceptsForeignLayerURLs() bool {
- return false
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *Destination) MustMatchRuntimeOS() bool {
- return false
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *Destination) IgnoresEmbeddedDockerReference() bool {
- return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *Destination) HasThreadSafePutBlob() bool {
- // The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where
- // this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t
- // much benefit from concurrency, mostly just extra CPU, memory and I/O contention.
- return false
-}
-
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
-// May update cache.
+// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed data.
if inputInfo.Size == -1 || inputInfo.Digest == "" {
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
- streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
- if err != nil {
- return types.BlobInfo{}, err
- }
- defer os.Remove(streamCopy.Name())
- defer streamCopy.Close()
-
- digester, stream2 := putblobdigest.DigestIfUnknown(stream, inputInfo)
- // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- size, err := io.Copy(streamCopy, stream2)
- if err != nil {
- return types.BlobInfo{}, err
- }
- _, err = streamCopy.Seek(0, io.SeekStart)
+ streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo)
if err != nil {
return types.BlobInfo{}, err
}
- inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
- inputInfo.Digest = digester.Digest()
+ defer cleanup()
stream = streamCopy
logrus.Debugf("... streaming done")
}
@@ -135,14 +105,14 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
return reusedInfo, nil
}
- if isConfig {
+ if options.IsConfig {
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
if err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "reading Config file stream")
+ return types.BlobInfo{}, fmt.Errorf("reading Config file stream: %w", err)
}
d.config = buf
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
- return types.BlobInfo{}, errors.Wrap(err, "writing Config file")
+ return types.BlobInfo{}, fmt.Errorf("writing Config file: %w", err)
}
} else {
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
@@ -153,16 +123,14 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
if err := d.archive.lock(); err != nil {
return false, types.BlobInfo{}, err
}
@@ -185,10 +153,10 @@ func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest
// so the caller trying a different manifest kind would be pointless.
var man manifest.Schema2
if err := json.Unmarshal(m, &man); err != nil {
- return errors.Wrap(err, "parsing manifest")
+ return fmt.Errorf("parsing manifest: %w", err)
}
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
- return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
+ return errors.New("Unsupported manifest type, need a Docker schema 2 manifest")
}
if err := d.archive.lock(); err != nil {
@@ -202,16 +170,3 @@ func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest
return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags)
}
-
-// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
-// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
-// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
-func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- if instanceDigest != nil {
- return errors.Errorf(`Manifest lists are not supported for docker tar files`)
- }
- if len(signatures) != 0 {
- return errors.Errorf("Storing signatures for docker tar files is not supported")
- }
- return nil
-}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
index 6164ceb66ed..eec7b84e526 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
@@ -3,8 +3,9 @@ package tarfile
import (
"archive/tar"
"encoding/json"
+ "errors"
+ "fmt"
"io"
- "io/ioutil"
"os"
"path"
@@ -13,7 +14,6 @@ import (
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
// Reader is a ((docker save)-formatted) tar archive that allows random access to any component.
@@ -30,7 +30,7 @@ type Reader struct {
func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
file, err := os.Open(path)
if err != nil {
- return nil, errors.Wrapf(err, "opening file %q", path)
+ return nil, fmt.Errorf("opening file %q: %w", path, err)
}
defer file.Close()
@@ -38,7 +38,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
// as a source. Otherwise we pass the stream to NewReaderFromStream.
stream, isCompressed, err := compression.AutoDecompress(file)
if err != nil {
- return nil, errors.Wrapf(err, "detecting compression for file %q", path)
+ return nil, fmt.Errorf("detecting compression for file %q: %w", path, err)
}
defer stream.Close()
if !isCompressed {
@@ -53,9 +53,9 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
// The caller should call .Close() on the returned archive when done.
func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
// Save inputStream to a temporary file
- tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
+ tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
if err != nil {
- return nil, errors.Wrap(err, "creating temporary file")
+ return nil, fmt.Errorf("creating temporary file: %w", err)
}
defer tarCopyFile.Close()
@@ -71,7 +71,7 @@ func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Read
// giving users really confusing "invalid tar header" errors).
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
if err != nil {
- return nil, errors.Wrap(err, "auto-decompressing input")
+ return nil, fmt.Errorf("auto-decompressing input: %w", err)
}
defer uncompressedStream.Close()
@@ -80,7 +80,7 @@ func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Read
// TODO: This can take quite some time, and should ideally be cancellable
// using a context.Context.
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
- return nil, errors.Wrapf(err, "copying contents to temporary file %q", tarCopyFile.Name())
+ return nil, fmt.Errorf("copying contents to temporary file %q: %w", tarCopyFile.Name(), err)
}
succeeded = true
@@ -113,7 +113,7 @@ func newReader(path string, removeOnClose bool) (*Reader, error) {
return nil, err
}
if err := json.Unmarshal(bytes, &r.Manifest); err != nil {
- return nil, errors.Wrap(err, "decoding tar manifest.json")
+ return nil, fmt.Errorf("decoding tar manifest.json: %w", err)
}
succeeded = true
@@ -137,7 +137,7 @@ func (r *Reader) Close() error {
func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) (*ManifestItem, int, error) {
switch {
case ref != nil && sourceIndex != -1:
- return nil, -1, errors.Errorf("Internal error: Cannot have both ref %s and source index @%d",
+ return nil, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d",
ref.String(), sourceIndex)
case ref != nil:
@@ -146,25 +146,25 @@ func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int)
for tagIndex, tag := range r.Manifest[i].RepoTags {
parsedTag, err := reference.ParseNormalizedNamed(tag)
if err != nil {
- return nil, -1, errors.Wrapf(err, "Invalid tag %#v in manifest.json item @%d", tag, i)
+ return nil, -1, fmt.Errorf("Invalid tag %#v in manifest.json item @%d: %w", tag, i, err)
}
if parsedTag.String() == refString {
return &r.Manifest[i], tagIndex, nil
}
}
}
- return nil, -1, errors.Errorf("Tag %#v not found", refString)
+ return nil, -1, fmt.Errorf("Tag %#v not found", refString)
case sourceIndex != -1:
if sourceIndex >= len(r.Manifest) {
- return nil, -1, errors.Errorf("Invalid source index @%d, only %d manifest items available",
+ return nil, -1, fmt.Errorf("Invalid source index @%d, only %d manifest items available",
sourceIndex, len(r.Manifest))
}
return &r.Manifest[sourceIndex], -1, nil
default:
if len(r.Manifest) != 1 {
- return nil, -1, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest))
+ return nil, -1, fmt.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest))
}
return &r.Manifest[0], -1, nil
}
@@ -227,7 +227,7 @@ func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) {
}
if !header.FileInfo().Mode().IsRegular() {
- return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
+ return nil, fmt.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
}
succeeded = true
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
@@ -258,7 +258,7 @@ func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *
func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
file, err := r.openTarComponent(path)
if err != nil {
- return nil, errors.Wrapf(err, "loading tar component %s", path)
+ return nil, fmt.Errorf("loading tar component %s: %w", path, err)
}
defer file.Close()
bytes, err := iolimits.ReadAtMost(file, limit)
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
index b8d84d2452a..b63b5316ef9 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
@@ -5,23 +5,31 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
+ "fmt"
"io"
- "io/ioutil"
"os"
"path"
"sync"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// Source is a partial implementation of types.ImageSource for reading from tarPath.
type Source struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
archive *Reader
closeArchive bool // .Close() the archive when the source is closed.
// If ref is nil and sourceIndex is -1, indicates the only image in the archive.
@@ -47,13 +55,20 @@ type layerInfo struct {
// NewSource returns a tarfile.Source for an image in the specified archive matching ref
// and sourceIndex (or the only image if they are (nil, -1)).
// The archive will be closed if closeArchive
-func NewSource(archive *Reader, closeArchive bool, ref reference.NamedTagged, sourceIndex int) *Source {
- return &Source{
+func NewSource(archive *Reader, closeArchive bool, transportName string, ref reference.NamedTagged, sourceIndex int) *Source {
+ s := &Source{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAtRaw(transportName),
+
archive: archive,
closeArchive: closeArchive,
ref: ref,
sourceIndex: sourceIndex,
}
+ s.Compat = impl.AddCompat(s)
+ return s
}
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
@@ -80,10 +95,10 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error {
}
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
- return errors.Wrapf(err, "decoding tar config %s", tarManifest.Config)
+ return fmt.Errorf("decoding tar config %s: %w", tarManifest.Config, err)
}
if parsedConfig.RootFS == nil {
- return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
+ return fmt.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
}
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
@@ -116,7 +131,7 @@ func (s *Source) TarManifest() []ManifestItem {
func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) {
// Collect layer data available in manifest and config.
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
- return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
+ return nil, fmt.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
}
knownLayers := map[digest.Digest]*layerInfo{}
unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
@@ -129,7 +144,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
}
layerPath := path.Clean(tarManifest.Layers[i])
if _, ok := unknownLayerSizes[layerPath]; ok {
- return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
+ return nil, fmt.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
}
li := &layerInfo{ // A new element in each iteration
path: layerPath,
@@ -164,15 +179,15 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
// the slower method of checking if it's compressed.
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
if err != nil {
- return nil, errors.Wrapf(err, "auto-decompressing %s to determine its size", layerPath)
+ return nil, fmt.Errorf("auto-decompressing %s to determine its size: %w", layerPath, err)
}
defer uncompressedStream.Close()
uncompressedSize := h.Size
if isCompressed {
- uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
+ uncompressedSize, err = io.Copy(io.Discard, uncompressedStream)
if err != nil {
- return nil, errors.Wrapf(err, "reading %s to find its size", layerPath)
+ return nil, fmt.Errorf("reading %s to find its size: %w", layerPath, err)
}
}
li.size = uncompressedSize
@@ -180,7 +195,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
}
}
if len(unknownLayerSizes) != 0 {
- return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
+ return nil, errors.New("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
}
return knownLayers, nil
@@ -214,7 +229,7 @@ func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest)
for _, diffID := range s.orderedDiffIDList {
li, ok := s.knownLayers[diffID]
if !ok {
- return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
+ return nil, "", fmt.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
}
m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{
Digest: diffID, // diffID is a digest of the uncompressed tarball
@@ -249,11 +264,6 @@ func (r uncompressedReadCloser) Close() error {
return res
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *Source) HasThreadSafeGetBlob() bool {
- return true
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@@ -263,7 +273,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
}
if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
- return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
+ return io.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
}
if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
@@ -292,7 +302,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
if err != nil {
- return nil, 0, errors.Wrapf(err, "auto-decompressing blob %s", info.Digest)
+ return nil, 0, fmt.Errorf("auto-decompressing blob %s: %w", info.Digest, err)
}
newStream := uncompressedReadCloser{
@@ -305,27 +315,5 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
return newStream, li.size, nil
}
- return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
-}
-
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
-// as there can be no secondary manifests.
-func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- if instanceDigest != nil {
- // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType.
- return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
- }
- return [][]byte{}, nil
-}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
-// as the primary manifest can not be a list, so there can be no secondary manifests.
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *Source) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
+ return nil, 0, fmt.Errorf("Unknown blob %s", info.Digest)
}
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
index 255f0d354ed..f6ee041c496 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
@@ -4,6 +4,7 @@ import (
"archive/tar"
"bytes"
"encoding/json"
+ "errors"
"fmt"
"io"
"os"
@@ -15,7 +16,6 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -72,7 +72,7 @@ func (w *Writer) unlock() {
// The caller must have locked the Writer.
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
+ return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
}
if blob, ok := w.blobs[info.Digest]; ok {
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
@@ -94,16 +94,16 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges
// See also the comment in physicalLayerPath.
physicalLayerPath := w.physicalLayerPath(layerDigest)
if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
- return errors.Wrap(err, "creating layer symbolic link")
+ return fmt.Errorf("creating layer symbolic link: %w", err)
}
b := []byte("1.0")
if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
- return errors.Wrap(err, "writing VERSION file")
+ return fmt.Errorf("writing VERSION file: %w", err)
}
if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil {
- return errors.Wrap(err, "writing config json file")
+ return fmt.Errorf("writing config json file: %w", err)
}
w.legacyLayers[layerID] = struct{}{}
@@ -128,7 +128,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
var config map[string]*json.RawMessage
err := json.Unmarshal(configBytes, &config)
if err != nil {
- return errors.Wrap(err, "unmarshaling config")
+ return fmt.Errorf("unmarshaling config: %w", err)
}
for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
layerConfig[attr] = config[attr]
@@ -152,7 +152,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
layerConfig["layer_id"] = chainID
b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point.
if err != nil {
- return errors.Wrap(err, "marshaling layer config")
+ return fmt.Errorf("marshaling layer config: %w", err)
}
delete(layerConfig, "layer_id")
layerID := digest.Canonical.FromBytes(b).Hex()
@@ -160,7 +160,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
configBytes, err := json.Marshal(layerConfig)
if err != nil {
- return errors.Wrap(err, "marshaling layer config")
+ return fmt.Errorf("marshaling layer config: %w", err)
}
if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil {
@@ -280,10 +280,10 @@ func (w *Writer) Close() error {
b, err = json.Marshal(w.repositories)
if err != nil {
- return errors.Wrap(err, "marshaling repositories")
+ return fmt.Errorf("marshaling repositories: %w", err)
}
if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil {
- return errors.Wrap(err, "writing config json file")
+ return fmt.Errorf("writing config json file: %w", err)
}
if err := w.tar.Close(); err != nil {
@@ -375,7 +375,7 @@ func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reade
return err
}
if size != expectedSize {
- return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
+ return fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
}
return nil
}
diff --git a/vendor/github.com/containers/image/v5/docker/paths_common.go b/vendor/github.com/containers/image/v5/docker/paths_common.go
new file mode 100644
index 00000000000..862e8803978
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/paths_common.go
@@ -0,0 +1,6 @@
+//go:build !freebsd
+// +build !freebsd
+
+package docker
+
+const etcDir = "/etc"
diff --git a/vendor/github.com/containers/image/v5/docker/paths_freebsd.go b/vendor/github.com/containers/image/v5/docker/paths_freebsd.go
new file mode 100644
index 00000000000..2bf27ac06cb
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/paths_freebsd.go
@@ -0,0 +1,6 @@
+//go:build freebsd
+// +build freebsd
+
+package docker
+
+const etcDir = "/usr/local/etc"
diff --git a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
index 94e9e5f234a..5d42c38706f 100644
--- a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
+++ b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
@@ -1,10 +1,11 @@
package policyconfiguration
import (
+ "errors"
+ "fmt"
"strings"
"github.com/containers/image/v5/docker/reference"
- "github.com/pkg/errors"
)
// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup,
@@ -16,9 +17,9 @@ func DockerReferenceIdentity(ref reference.Named) (string, error) {
digested, isDigested := ref.(reference.Canonical)
switch {
case isTagged && isDigested: // Note that this CAN actually happen.
- return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref))
+ return "", fmt.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref))
case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly()
- return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref))
+ return "", fmt.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref))
case isTagged:
res = res + ":" + tagged.Tag()
case isDigested:
diff --git a/vendor/github.com/containers/image/v5/docker/reference/reference.go b/vendor/github.com/containers/image/v5/docker/reference/reference.go
index 8c0c23b2fe1..b7cd00b0d68 100644
--- a/vendor/github.com/containers/image/v5/docker/reference/reference.go
+++ b/vendor/github.com/containers/image/v5/docker/reference/reference.go
@@ -3,13 +3,13 @@
//
// Grammar
//
-// reference := name [ ":" tag ] [ "@" digest ]
+// reference := name [ ":" tag ] [ "@" digest ]
// name := [domain '/'] path-component ['/' path-component]*
// domain := domain-component ['.' domain-component]* [':' port-number]
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/
// path-component := alpha-numeric [separator alpha-numeric]*
-// alpha-numeric := /[a-z0-9]+/
+// alpha-numeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/
//
// tag := /[\w][\w.-]{0,127}/
diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go
new file mode 100644
index 00000000000..7b15871f7b6
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go
@@ -0,0 +1,6 @@
+package reference
+
+// Return true if the specified string fully matches `IdentifierRegexp`.
+func IsFullIdentifier(s string) bool {
+ return anchoredIdentifierRegexp.MatchString(s)
+}
diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/registries_d.go
similarity index 60%
rename from vendor/github.com/containers/image/v5/docker/lookaside.go
rename to vendor/github.com/containers/image/v5/docker/registries_d.go
index 515e59327d3..37087dd857d 100644
--- a/vendor/github.com/containers/image/v5/docker/lookaside.go
+++ b/vendor/github.com/containers/image/v5/docker/registries_d.go
@@ -1,8 +1,8 @@
package docker
import (
+ "errors"
"fmt"
- "io/ioutil"
"net/url"
"os"
"path"
@@ -15,7 +15,6 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/ghodss/yaml"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -26,15 +25,15 @@ var systemRegistriesDirPath = builtinRegistriesDirPath
// builtinRegistriesDirPath is the path to registries.d.
// DO NOT change this, instead see systemRegistriesDirPath above.
-const builtinRegistriesDirPath = "/etc/containers/registries.d"
+const builtinRegistriesDirPath = etcDir + "/containers/registries.d"
// userRegistriesDirPath is the path to the per user registries.d.
var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d")
-// defaultUserDockerDir is the default sigstore directory for unprivileged user
+// defaultUserDockerDir is the default lookaside directory for unprivileged user
var defaultUserDockerDir = filepath.FromSlash(".local/share/containers/sigstore")
-// defaultDockerDir is the default sigstore directory for root
+// defaultDockerDir is the default lookaside directory for root
var defaultDockerDir = "/var/lib/containers/sigstore"
// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
@@ -47,51 +46,39 @@ type registryConfiguration struct {
// registryNamespace defines lookaside locations for a single namespace.
type registryNamespace struct {
- SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing.
- SigStoreStaging string `json:"sigstore-staging"` // For writing only.
+ Lookaside string `json:"lookaside"` // For reading, and if LookasideStaging is not present, for writing.
+ LookasideStaging string `json:"lookaside-staging"` // For writing only.
+ SigStore string `json:"sigstore"` // For compatibility, deprecated in favor of Lookaside.
+ SigStoreStaging string `json:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging.
+ UseSigstoreAttachments *bool `json:"use-sigstore-attachments,omitempty"`
}
-// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage.
-// Users outside of this file should use SignatureStorageBaseURL and signatureStorageURL below.
-type signatureStorageBase *url.URL
+// lookasideStorageBase is an "opaque" type representing a lookaside Docker signature storage.
+// Users outside of this file should use SignatureStorageBaseURL and lookasideStorageURL below.
+type lookasideStorageBase *url.URL
-// SignatureStorageBaseURL reads configuration to find an appropriate signature storage URL for ref, for write access if “write”.
+// SignatureStorageBaseURL reads configuration to find an appropriate lookaside storage URL for ref, for write access if “write”.
// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md
// Warning: This function only exposes configuration in registries.d;
// just because this function returns an URL does not mean that the URL will be used by c/image/docker (e.g. if the registry natively supports X-R-S-S).
func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, write bool) (*url.URL, error) {
dr, ok := ref.(dockerReference)
if !ok {
- return nil, errors.Errorf("ref must be a dockerReference")
+ return nil, errors.New("ref must be a dockerReference")
}
- // FIXME? Loading and parsing the config could be cached across calls.
- dirPath := registriesDirPath(sys)
- logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath)
- config, err := loadAndMergeConfig(dirPath)
+ config, err := loadRegistryConfiguration(sys)
if err != nil {
return nil, err
}
- topLevel := config.signatureTopLevel(dr, write)
- var url *url.URL
- if topLevel != "" {
- url, err = url.Parse(topLevel)
- if err != nil {
- return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel)
- }
- } else {
- // returns default directory if no sigstore specified in configuration file
- url = builtinDefaultSignatureStorageDir(rootless.GetRootlessEUID())
- logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.String())
- }
- // NOTE: Keep this in sync with docs/signature-protocols.md!
- // FIXME? Restrict to explicitly supported schemes?
- repo := reference.Path(dr.ref) // Note that this is without a tag or digest.
- if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
- return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String())
- }
- url.Path = url.Path + "/" + repo
- return url, nil
+ return config.lookasideStorageBaseURL(dr, write)
+}
+
+// loadRegistryConfiguration returns a registryConfiguration appropriate for sys.
+func loadRegistryConfiguration(sys *types.SystemContext) (*registryConfiguration, error) {
+ dirPath := registriesDirPath(sys)
+ logrus.Debugf(`Using registries.d directory %s`, dirPath)
+ return loadAndMergeConfig(dirPath)
}
// registriesDirPath returns a path to registries.d
@@ -116,15 +103,8 @@ func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) stri
return systemRegistriesDirPath
}
-// builtinDefaultSignatureStorageDir returns default signature storage URL as per euid
-func builtinDefaultSignatureStorageDir(euid int) *url.URL {
- if euid != 0 {
- return &url.URL{Scheme: "file", Path: filepath.Join(homedir.Get(), defaultUserDockerDir)}
- }
- return &url.URL{Scheme: "file", Path: defaultDockerDir}
-}
-
// loadAndMergeConfig loads configuration files in dirPath
+// FIXME: Probably rename to loadRegistryConfigurationForPath
func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}}
dockerDefaultMergedFrom := ""
@@ -146,7 +126,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
continue
}
configPath := filepath.Join(dirPath, configName)
- configBytes, err := ioutil.ReadFile(configPath)
+ configBytes, err := os.ReadFile(configPath)
if err != nil {
return nil, err
}
@@ -154,12 +134,12 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
var config registryConfiguration
err = yaml.Unmarshal(configBytes, &config)
if err != nil {
- return nil, errors.Wrapf(err, "parsing %s", configPath)
+ return nil, fmt.Errorf("parsing %s: %w", configPath, err)
}
if config.DefaultDocker != nil {
if mergedConfig.DefaultDocker != nil {
- return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
+ return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
dockerDefaultMergedFrom, configPath)
}
mergedConfig.DefaultDocker = config.DefaultDocker
@@ -168,7 +148,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
if _, ok := mergedConfig.Docker[nsName]; ok {
- return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
+ return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
nsName, nsMergedFrom[nsName], configPath)
}
mergedConfig.Docker[nsName] = nsConfig
@@ -179,6 +159,40 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
return &mergedConfig, nil
}
+// lookasideStorageBaseURL returns an appropriate signature storage URL for ref, for write access if “write”.
+// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md
+func (config *registryConfiguration) lookasideStorageBaseURL(dr dockerReference, write bool) (*url.URL, error) {
+ topLevel := config.signatureTopLevel(dr, write)
+ var url *url.URL
+ if topLevel != "" {
+ u, err := url.Parse(topLevel)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid signature storage URL %s: %w", topLevel, err)
+ }
+ url = u
+ } else {
+ // returns default directory if no lookaside specified in configuration file
+ url = builtinDefaultLookasideStorageDir(rootless.GetRootlessEUID())
+ logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.Redacted())
+ }
+ // NOTE: Keep this in sync with docs/signature-protocols.md!
+ // FIXME? Restrict to explicitly supported schemes?
+ repo := reference.Path(dr.ref) // Note that this is without a tag or digest.
+ if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
+ return nil, fmt.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String())
+ }
+ url.Path = url.Path + "/" + repo
+ return url, nil
+}
+
+// builtinDefaultLookasideStorageDir returns default signature storage URL as per euid
+func builtinDefaultLookasideStorageDir(euid int) *url.URL {
+ if euid != 0 {
+ return &url.URL{Scheme: "file", Path: filepath.Join(homedir.Get(), defaultUserDockerDir)}
+ }
+ return &url.URL{Scheme: "file", Path: defaultDockerDir}
+}
+
// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”.
// (the top level of the storage, namespaced by repo.FullName etc.), or "" if nothing has been configured.
func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string {
@@ -186,7 +200,7 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ
// Look for a full match.
identity := ref.PolicyConfigurationIdentity()
if ns, ok := config.Docker[identity]; ok {
- logrus.Debugf(` Using "docker" namespace %s`, identity)
+ logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, identity)
if url := ns.signatureTopLevel(write); url != "" {
return url
}
@@ -195,7 +209,7 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ
// Look for a match of the possible parent namespaces.
for _, name := range ref.PolicyConfigurationNamespaces() {
if ns, ok := config.Docker[name]; ok {
- logrus.Debugf(` Using "docker" namespace %s`, name)
+ logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, name)
if url := ns.signatureTopLevel(write); url != "" {
return url
}
@@ -204,7 +218,7 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ
}
// Look for a default location
if config.DefaultDocker != nil {
- logrus.Debugf(` Using "default-docker" configuration`)
+ logrus.Debugf(` Lookaside configuration: using "default-docker" configuration`)
if url := config.DefaultDocker.signatureTopLevel(write); url != "" {
return url
}
@@ -212,24 +226,67 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ
return ""
}
+// config.useSigstoreAttachments returns whether we should look for and write sigstore attachments.
+// for ref.
+func (config *registryConfiguration) useSigstoreAttachments(ref dockerReference) bool {
+ if config.Docker != nil {
+ // Look for a full match.
+ identity := ref.PolicyConfigurationIdentity()
+ if ns, ok := config.Docker[identity]; ok {
+ logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, identity)
+ if ns.UseSigstoreAttachments != nil {
+ return *ns.UseSigstoreAttachments
+ }
+ }
+
+ // Look for a match of the possible parent namespaces.
+ for _, name := range ref.PolicyConfigurationNamespaces() {
+ if ns, ok := config.Docker[name]; ok {
+ logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, name)
+ if ns.UseSigstoreAttachments != nil {
+ return *ns.UseSigstoreAttachments
+ }
+ }
+ }
+ }
+ // Look for a default location
+ if config.DefaultDocker != nil {
+ logrus.Debugf(` Sigstore attachments: using "default-docker" configuration`)
+ if config.DefaultDocker.UseSigstoreAttachments != nil {
+ return *config.DefaultDocker.UseSigstoreAttachments
+ }
+ }
+ return false
+}
+
// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”.
// or "" if nothing has been configured.
func (ns registryNamespace) signatureTopLevel(write bool) string {
- if write && ns.SigStoreStaging != "" {
- logrus.Debugf(` Using %s`, ns.SigStoreStaging)
- return ns.SigStoreStaging
+ if write {
+ if ns.LookasideStaging != "" {
+ logrus.Debugf(` Using "lookaside-staging" %s`, ns.LookasideStaging)
+ return ns.LookasideStaging
+ }
+ if ns.SigStoreStaging != "" {
+ logrus.Debugf(` Using "sigstore-staging" %s`, ns.SigStoreStaging)
+ return ns.SigStoreStaging
+ }
+ }
+ if ns.Lookaside != "" {
+ logrus.Debugf(` Using "lookaside" %s`, ns.Lookaside)
+ return ns.Lookaside
}
if ns.SigStore != "" {
- logrus.Debugf(` Using %s`, ns.SigStore)
+ logrus.Debugf(` Using "sigstore" %s`, ns.SigStore)
return ns.SigStore
}
return ""
}
-// signatureStorageURL returns an URL usable for accessing signature index in base with known manifestDigest.
+// lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest.
// base is not nil from the caller
// NOTE: Keep this in sync with docs/signature-protocols.md!
-func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL {
+func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) *url.URL {
url := *base
url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
return &url
diff --git a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
index d0bbbba8a54..37ca098a810 100644
--- a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
+++ b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
@@ -3,6 +3,7 @@ package docker
// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies.
import (
+ "fmt"
"net/http"
"strings"
)
@@ -70,6 +71,18 @@ func parseAuthHeader(header http.Header) []challenge {
return challenges
}
+// parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions`
+func parseAuthScope(scopeStr string) (*authScope, error) {
+ if parts := strings.Split(scopeStr, ":"); len(parts) == 3 {
+ return &authScope{
+ resourceType: parts[0],
+ remoteName: parts[1],
+ actions: parts[2],
+ }, nil
+ }
+ return nil, fmt.Errorf("error parsing auth scope: '%s'", scopeStr)
+}
+
// NOTE: This is not a fully compliant parser per RFC 7235:
// Most notably it does not support more than one challenge within a single header
// Some of the whitespace parsing also seems noncompliant.
diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go
index b250a6b1d2e..e5a3b899122 100644
--- a/vendor/github.com/containers/image/v5/image/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go
@@ -1,400 +1,14 @@
package image
import (
- "bytes"
- "context"
- "crypto/sha256"
- "encoding/hex"
- "encoding/json"
- "fmt"
- "strings"
-
- "github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/internal/iolimits"
- "github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/pkg/blobinfocache/none"
- "github.com/containers/image/v5/types"
- "github.com/opencontainers/go-digest"
- imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
+ "github.com/containers/image/v5/internal/image"
)
// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
// in registries, so let’s use the same values.
-var GzippedEmptyLayer = []byte{
- 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
- 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
-}
+var GzippedEmptyLayer = image.GzippedEmptyLayer
// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
-const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
-
-type manifestSchema2 struct {
- src types.ImageSource // May be nil if configBlob is not nil
- configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
- m *manifest.Schema2
-}
-
-func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
- m, err := manifest.Schema2FromManifest(manifestBlob)
- if err != nil {
- return nil, err
- }
- return &manifestSchema2{
- src: src,
- m: m,
- }, nil
-}
-
-// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
-func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 {
- return &manifestSchema2{
- src: src,
- configBlob: configBlob,
- m: manifest.Schema2FromComponents(config, layers),
- }
-}
-
-func (m *manifestSchema2) serialize() ([]byte, error) {
- return m.m.Serialize()
-}
-
-func (m *manifestSchema2) manifestMIMEType() string {
- return m.m.MediaType
-}
-
-// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
-// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
-func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
- return m.m.ConfigInfo()
-}
-
-// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
-// layers in the resulting configuration isn't guaranteed to be returned to due how
-// old image manifests work (docker v2s1 especially).
-func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
- configBlob, err := m.ConfigBlob(ctx)
- if err != nil {
- return nil, err
- }
- // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
- // than OCI v1. This unmarshal makes sure we drop docker v2s2
- // fields that aren't needed in OCI v1.
- configOCI := &imgspecv1.Image{}
- if err := json.Unmarshal(configBlob, configOCI); err != nil {
- return nil, err
- }
- return configOCI, nil
-}
-
-// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
-// The result is cached; it is OK to call this however often you need.
-func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
- if m.configBlob == nil {
- if m.src == nil {
- return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
- }
- stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
- if err != nil {
- return nil, err
- }
- defer stream.Close()
- blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
- if err != nil {
- return nil, err
- }
- computedDigest := digest.FromBytes(blob)
- if computedDigest != m.m.ConfigDescriptor.Digest {
- return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
- }
- m.configBlob = blob
- }
- return m.configBlob, nil
-}
-
-// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
- return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
-}
-
-// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
-// It returns false if the manifest does not embed a Docker reference.
-// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
-func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
- return false
-}
-
-// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
-func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
- getter := func(info types.BlobInfo) ([]byte, error) {
- if info.Digest != m.ConfigInfo().Digest {
- // Shouldn't ever happen
- return nil, errors.New("asked for a different config blob")
- }
- config, err := m.ConfigBlob(ctx)
- if err != nil {
- return nil, err
- }
- return config, nil
- }
- return m.m.Inspect(getter)
-}
-
-// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
-// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
-// (most importantly it forces us to download the full layers even if they are already present at the destination).
-func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
- return false
-}
-
-// UpdatedImage returns a types.Image modified according to options.
-// This does not change the state of the original Image object.
-// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
-// if the CompressionOperation and CompressionAlgorithm specified in one or more
-// options.LayerInfos items is anything other than gzip.
-func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
- copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
- src: m.src,
- configBlob: m.configBlob,
- m: manifest.Schema2Clone(m.m),
- }
-
- converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
- manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
- manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
- imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
- })
- if err != nil {
- return nil, err
- }
-
- if converted != nil {
- return converted, nil
- }
-
- // No conversion required, update manifest
- if options.LayerInfos != nil {
- if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
- return nil, err
- }
- }
- // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
-
- return memoryImageFromManifest(©), nil
-}
-
-func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
- return imgspecv1.Descriptor{
- MediaType: d.MediaType,
- Size: d.Size,
- Digest: d.Digest,
- URLs: d.URLs,
- }
-}
-
-// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest.
-// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
-// value.
-// This does not change the state of the original manifestSchema2 object.
-func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) {
- configOCI, err := m.OCIConfig(ctx)
- if err != nil {
- return nil, err
- }
- configOCIBytes, err := json.Marshal(configOCI)
- if err != nil {
- return nil, err
- }
-
- config := imgspecv1.Descriptor{
- MediaType: imgspecv1.MediaTypeImageConfig,
- Size: int64(len(configOCIBytes)),
- Digest: digest.FromBytes(configOCIBytes),
- }
-
- layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
- for idx := range layers {
- layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
- switch m.m.LayersDescriptors[idx].MediaType {
- case manifest.DockerV2Schema2ForeignLayerMediaType:
- layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
- case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip:
- layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip
- case manifest.DockerV2SchemaLayerMediaTypeUncompressed:
- layers[idx].MediaType = imgspecv1.MediaTypeImageLayer
- case manifest.DockerV2Schema2LayerMediaType:
- layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip
- default:
- return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType)
- }
- }
-
- return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil
-}
-
-// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
-// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
-// value.
-// This does not change the state of the original manifestSchema2 object.
-//
-// Based on docker/distribution/manifest/schema1/config_builder.go
-func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
- dest := options.InformationOnly.Destination
-
- var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
- if options.LayerInfos != nil {
- if len(options.LayerInfos) != len(m.m.LayersDescriptors) {
- return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
- len(options.LayerInfos), len(m.m.LayersDescriptors))
- }
- convertedLayerUpdates = []types.BlobInfo{}
- }
-
- configBytes, err := m.ConfigBlob(ctx)
- if err != nil {
- return nil, err
- }
- imageConfig := &manifest.Schema2Image{}
- if err := json.Unmarshal(configBytes, imageConfig); err != nil {
- return nil, err
- }
-
- // Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
- fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
- history := make([]manifest.Schema1History, len(imageConfig.History))
- nonemptyLayerIndex := 0
- var parentV1ID string // Set in the loop
- v1ID := ""
- haveGzippedEmptyLayer := false
- if len(imageConfig.History) == 0 {
- // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
- return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
- }
- for v2Index, historyEntry := range imageConfig.History {
- parentV1ID = v1ID
- v1Index := len(imageConfig.History) - 1 - v2Index
-
- var blobDigest digest.Digest
- if historyEntry.EmptyLayer {
- emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}
-
- if !haveGzippedEmptyLayer {
- logrus.Debugf("Uploading empty layer during conversion to schema 1")
- // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
- // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
- info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
- if err != nil {
- return nil, errors.Wrap(err, "uploading empty layer")
- }
- if info.Digest != emptyLayerBlobInfo.Digest {
- return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
- }
- haveGzippedEmptyLayer = true
- }
- if options.LayerInfos != nil {
- convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo)
- }
- blobDigest = emptyLayerBlobInfo.Digest
- } else {
- if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
- return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
- }
- if options.LayerInfos != nil {
- convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex])
- }
- blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
- nonemptyLayerIndex++
- }
-
- // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency.
- v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID)
- if err != nil {
- return nil, err
- }
- v1ID = v
-
- fakeImage := manifest.Schema1V1Compatibility{
- ID: v1ID,
- Parent: parentV1ID,
- Comment: historyEntry.Comment,
- Created: historyEntry.Created,
- Author: historyEntry.Author,
- ThrowAway: historyEntry.EmptyLayer,
- }
- fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
- v1CompatibilityBytes, err := json.Marshal(&fakeImage)
- if err != nil {
- return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
- }
-
- fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
- history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
- // Note that parentV1ID of the top layer is preserved when exiting this loop
- }
-
- // Now patch in real configuration for the top layer (v1Index == 0)
- v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency.
- if err != nil {
- return nil, err
- }
- v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer)
- if err != nil {
- return nil, err
- }
- history[0].V1Compatibility = string(v1Config)
-
- if options.LayerInfos != nil {
- options.LayerInfos = convertedLayerUpdates
- }
- m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
- if err != nil {
- return nil, err // This should never happen, we should have created all the components correctly.
- }
- return m1, nil
-}
-
-func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) {
- if err := blobDigest.Validate(); err != nil {
- return "", err
- }
- parts := append([]string{blobDigest.Hex()}, others...)
- v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
- return hex.EncodeToString(v1IDHash[:]), nil
-}
-
-func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
- // Preserve everything we don't specifically know about.
- // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
- rawContents := map[string]*json.RawMessage{}
- if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
- return nil, err
- }
- delete(rawContents, "rootfs")
- delete(rawContents, "history")
-
- updates := map[string]interface{}{"id": v1ID}
- if parentV1ID != "" {
- updates["parent"] = parentV1ID
- }
- if throwaway {
- updates["throwaway"] = throwaway
- }
- for field, value := range updates {
- encoded, err := json.Marshal(value)
- if err != nil {
- return nil, err
- }
- rawContents[field] = (*json.RawMessage)(&encoded)
- }
- return json.Marshal(rawContents)
-}
-
-// SupportsEncryption returns if encryption is supported for the manifest type
-func (m *manifestSchema2) SupportsEncryption(context.Context) bool {
- return false
-}
+const GzippedEmptyLayerDigest = image.GzippedEmptyLayerDigest
diff --git a/vendor/github.com/containers/image/v5/image/sourced.go b/vendor/github.com/containers/image/v5/image/sourced.go
index 3a016e1d09a..2b7f6b144b9 100644
--- a/vendor/github.com/containers/image/v5/image/sourced.go
+++ b/vendor/github.com/containers/image/v5/image/sourced.go
@@ -6,17 +6,10 @@ package image
import (
"context"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
)
-// imageCloser implements types.ImageCloser, perhaps allowing simple users
-// to use a single object without having keep a reference to a types.ImageSource
-// only to call types.ImageSource.Close().
-type imageCloser struct {
- types.Image
- src types.ImageSource
-}
-
// FromSource returns a types.ImageCloser implementation for the default instance of source.
// If source is a manifest list, .Manifest() still returns the manifest list,
// but other methods transparently return data from an appropriate image instance.
@@ -31,33 +24,7 @@ type imageCloser struct {
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) {
- img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil))
- if err != nil {
- return nil, err
- }
- return &imageCloser{
- Image: img,
- src: src,
- }, nil
-}
-
-func (ic *imageCloser) Close() error {
- return ic.src.Close()
-}
-
-// sourcedImage is a general set of utilities for working with container images,
-// whatever is their underlying location (i.e. dockerImageSource-independent).
-// Note the existence of skopeo/docker.Image: some instances of a `types.Image`
-// may not be a `sourcedImage` directly. However, most users of `types.Image`
-// do not care, and those who care about `skopeo/docker.Image` know they do.
-type sourcedImage struct {
- *UnparsedImage
- manifestBlob []byte
- manifestMIMEType string
- // genericManifest contains data corresponding to manifestBlob.
- // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest
- // if you want to preserve the original manifest; use manifestBlob directly.
- genericManifest
+ return image.FromSource(ctx, sys, src)
}
// FromUnparsedImage returns a types.Image implementation for unparsed.
@@ -66,39 +33,5 @@ type sourcedImage struct {
//
// The Image must not be used after the underlying ImageSource is Close()d.
func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) {
- // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage:
- // we want to be able to use unparsed.src. We could make that an explicit interface, but, well,
- // this is the only UnparsedImage implementation around, anyway.
-
- // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest().
- manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx)
- if err != nil {
- return nil, err
- }
-
- parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType)
- if err != nil {
- return nil, err
- }
-
- return &sourcedImage{
- UnparsedImage: unparsed,
- manifestBlob: manifestBlob,
- manifestMIMEType: manifestMIMEType,
- genericManifest: parsedManifest,
- }, nil
-}
-
-// Size returns the size of the image as stored, if it's known, or -1 if it isn't.
-func (i *sourcedImage) Size() (int64, error) {
- return -1, nil
-}
-
-// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
-func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) {
- return i.manifestBlob, i.manifestMIMEType, nil
-}
-
-func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
- return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest)
+ return image.FromUnparsedImage(ctx, sys, unparsed)
}
diff --git a/vendor/github.com/containers/image/v5/image/unparsed.go b/vendor/github.com/containers/image/v5/image/unparsed.go
index c64852f722b..123f6ce6f1c 100644
--- a/vendor/github.com/containers/image/v5/image/unparsed.go
+++ b/vendor/github.com/containers/image/v5/image/unparsed.go
@@ -1,95 +1,19 @@
package image
import (
- "context"
-
- "github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// UnparsedImage implements types.UnparsedImage .
// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
-type UnparsedImage struct {
- src types.ImageSource
- instanceDigest *digest.Digest
- cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
- // A private cache for Manifest(), may be the empty string if guessing failed.
- // Valid iff cachedManifest is not nil.
- cachedManifestMIMEType string
- cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known.
-}
+type UnparsedImage = image.UnparsedImage
// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list).
//
// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
- return &UnparsedImage{
- src: src,
- instanceDigest: instanceDigest,
- }
-}
-
-// Reference returns the reference used to set up this source, _as specified by the user_
-// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
-func (i *UnparsedImage) Reference() types.ImageReference {
- // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity.
- return i.src.Reference()
-}
-
-// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
-func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
- if i.cachedManifest == nil {
- m, mt, err := i.src.GetManifest(ctx, i.instanceDigest)
- if err != nil {
- return nil, "", err
- }
-
- // ImageSource.GetManifest does not do digest verification, but we do;
- // this immediately protects also any user of types.Image.
- if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
- matches, err := manifest.MatchesDigest(m, digest)
- if err != nil {
- return nil, "", errors.Wrap(err, "computing manifest digest")
- }
- if !matches {
- return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
- }
- }
-
- i.cachedManifest = m
- i.cachedManifestMIMEType = mt
- }
- return i.cachedManifest, i.cachedManifestMIMEType, nil
-}
-
-// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known.
-// The bool return value seems redundant with digest != ""; it is used explicitly
-// to refuse (unexpected) situations when the digest exists but is "".
-func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) {
- if i.instanceDigest != nil {
- return *i.instanceDigest, true
- }
- ref := i.Reference().DockerReference()
- if ref != nil {
- if canonical, ok := ref.(reference.Canonical); ok {
- return canonical.Digest(), true
- }
- }
- return "", false
-}
-
-// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
-func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
- if i.cachedSignatures == nil {
- sigs, err := i.src.GetSignatures(ctx, i.instanceDigest)
- if err != nil {
- return nil, err
- }
- i.cachedSignatures = sigs
- }
- return i.cachedSignatures, nil
+ return image.UnparsedInstance(src, instanceDigest)
}
diff --git a/vendor/github.com/containers/image/v5/image/docker_list.go b/vendor/github.com/containers/image/v5/internal/image/docker_list.go
similarity index 62%
rename from vendor/github.com/containers/image/v5/image/docker_list.go
rename to vendor/github.com/containers/image/v5/internal/image/docker_list.go
index 4fe84413c12..8afc406282e 100644
--- a/vendor/github.com/containers/image/v5/image/docker_list.go
+++ b/vendor/github.com/containers/image/v5/internal/image/docker_list.go
@@ -2,32 +2,32 @@ package image
import (
"context"
+ "fmt"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
list, err := manifest.Schema2ListFromManifest(manblob)
if err != nil {
- return nil, errors.Wrapf(err, "parsing schema2 manifest list")
+ return nil, fmt.Errorf("parsing schema2 manifest list: %w", err)
}
targetManifestDigest, err := list.ChooseInstance(sys)
if err != nil {
- return nil, errors.Wrapf(err, "choosing image instance")
+ return nil, fmt.Errorf("choosing image instance: %w", err)
}
manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
if err != nil {
- return nil, errors.Wrapf(err, "loading manifest for target platform")
+ return nil, fmt.Errorf("fetching target platform image selected from manifest list: %w", err)
}
matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
if err != nil {
- return nil, errors.Wrap(err, "computing manifest digest")
+ return nil, fmt.Errorf("computing manifest digest: %w", err)
}
if !matches {
- return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
+ return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
}
return manifestInstanceFromBlob(ctx, sys, src, manblob, mt)
diff --git a/vendor/github.com/containers/image/v5/image/docker_schema1.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go
similarity index 89%
rename from vendor/github.com/containers/image/v5/image/docker_schema1.go
rename to vendor/github.com/containers/image/v5/internal/image/docker_schema1.go
index 5f24970c371..3ef8e144d7b 100644
--- a/vendor/github.com/containers/image/v5/image/docker_schema1.go
+++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go
@@ -2,13 +2,13 @@ package image
import (
"context"
+ "fmt"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
type manifestSchema1 struct {
@@ -165,22 +165,22 @@ func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *t
if len(m.m.ExtractedV1Compatibility) == 0 {
// What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing.
- return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
+ return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
}
if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) {
- return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers))
+ return nil, fmt.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers))
}
if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) {
- return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
+ return nil, fmt.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
}
if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) {
- return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers))
+ return nil, fmt.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers))
}
var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
if options.LayerInfos != nil {
if len(options.LayerInfos) != len(m.m.FSLayers) {
- return nil, errors.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
+ return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
len(options.LayerInfos), len(m.m.FSLayers))
}
convertedLayerUpdates = []types.BlobInfo{}
@@ -246,3 +246,12 @@ func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *ty
func (m *manifestSchema1) SupportsEncryption(context.Context) bool {
return false
}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+// to a different manifest format).
+func (m *manifestSchema1) CanChangeLayerCompression(mimeType string) bool {
+ return true // There are no MIME types in the manifest, so we must assume a valid image.
+}
diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
new file mode 100644
index 00000000000..23a21999aa8
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
@@ -0,0 +1,413 @@
+package image
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
+// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
+// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
+// in registries, so let’s use the same values.
+//
+// This is publicly visible as c/image/image.GzippedEmptyLayer.
+var GzippedEmptyLayer = []byte{
+ 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
+ 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
+}
+
+// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
+//
+// This is publicly visible as c/image/image.GzippedEmptyLayerDigest.
+const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
+
+type manifestSchema2 struct {
+ src types.ImageSource // May be nil if configBlob is not nil
+ configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
+ m *manifest.Schema2
+}
+
+func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
+ m, err := manifest.Schema2FromManifest(manifestBlob)
+ if err != nil {
+ return nil, err
+ }
+ return &manifestSchema2{
+ src: src,
+ m: m,
+ }, nil
+}
+
+// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
+func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 {
+ return &manifestSchema2{
+ src: src,
+ configBlob: configBlob,
+ m: manifest.Schema2FromComponents(config, layers),
+ }
+}
+
+func (m *manifestSchema2) serialize() ([]byte, error) {
+ return m.m.Serialize()
+}
+
+func (m *manifestSchema2) manifestMIMEType() string {
+ return m.m.MediaType
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
+ return m.m.ConfigInfo()
+}
+
+// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+// layers in the resulting configuration isn't guaranteed to be returned to due how
+// old image manifests work (docker v2s1 especially).
+func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
+ configBlob, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
+ // than OCI v1. This unmarshal makes sure we drop docker v2s2
+ // fields that aren't needed in OCI v1.
+ configOCI := &imgspecv1.Image{}
+ if err := json.Unmarshal(configBlob, configOCI); err != nil {
+ return nil, err
+ }
+ return configOCI, nil
+}
+
+// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+// The result is cached; it is OK to call this however often you need.
+func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
+ if m.configBlob == nil {
+ if m.src == nil {
+ return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
+ }
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
+ if err != nil {
+ return nil, err
+ }
+ defer stream.Close()
+ blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
+ if err != nil {
+ return nil, err
+ }
+ computedDigest := digest.FromBytes(blob)
+ if computedDigest != m.m.ConfigDescriptor.Digest {
+ return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
+ }
+ m.configBlob = blob
+ }
+ return m.configBlob, nil
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
+ return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
+}
+
+// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+// It returns false if the manifest does not embed a Docker reference.
+// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
+ return false
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
+ getter := func(info types.BlobInfo) ([]byte, error) {
+ if info.Digest != m.ConfigInfo().Digest {
+ // Shouldn't ever happen
+ return nil, errors.New("asked for a different config blob")
+ }
+ config, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return config, nil
+ }
+ return m.m.Inspect(getter)
+}
+
+// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+// (most importantly it forces us to download the full layers even if they are already present at the destination).
+func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
+ return false
+}
+
+// UpdatedImage returns a types.Image modified according to options.
+// This does not change the state of the original Image object.
+// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
+// if the CompressionOperation and CompressionAlgorithm specified in one or more
+// options.LayerInfos items is anything other than gzip.
+func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
+ copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
+ src: m.src,
+ configBlob: m.configBlob,
+ m: manifest.Schema2Clone(m.m),
+ }
+
+ converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
+ manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
+ manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
+ imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if converted != nil {
+ return converted, nil
+ }
+
+ // No conversion required, update manifest
+ if options.LayerInfos != nil {
+ if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
+ return nil, err
+ }
+ }
+ // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
+
+ return memoryImageFromManifest(©), nil
+}
+
+func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
+ return imgspecv1.Descriptor{
+ MediaType: d.MediaType,
+ Size: d.Size,
+ Digest: d.Digest,
+ URLs: d.URLs,
+ }
+}
+
+// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestSchema2 object.
+func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) {
+ configOCI, err := m.OCIConfig(ctx)
+ if err != nil {
+ return nil, err
+ }
+ configOCIBytes, err := json.Marshal(configOCI)
+ if err != nil {
+ return nil, err
+ }
+
+ config := imgspecv1.Descriptor{
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ Size: int64(len(configOCIBytes)),
+ Digest: digest.FromBytes(configOCIBytes),
+ }
+
+ layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
+ for idx := range layers {
+ layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
+ switch m.m.LayersDescriptors[idx].MediaType {
+ case manifest.DockerV2Schema2ForeignLayerMediaType:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
+ case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip
+ case manifest.DockerV2SchemaLayerMediaTypeUncompressed:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayer
+ case manifest.DockerV2Schema2LayerMediaType:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip
+ default:
+ return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType)
+ }
+ }
+
+ return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil
+}
+
+// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestSchema2 object.
+//
+// Based on docker/distribution/manifest/schema1/config_builder.go
+func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
+ dest := options.InformationOnly.Destination
+
+ var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
+ if options.LayerInfos != nil {
+ if len(options.LayerInfos) != len(m.m.LayersDescriptors) {
+ return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
+ len(options.LayerInfos), len(m.m.LayersDescriptors))
+ }
+ convertedLayerUpdates = []types.BlobInfo{}
+ }
+
+ configBytes, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ imageConfig := &manifest.Schema2Image{}
+ if err := json.Unmarshal(configBytes, imageConfig); err != nil {
+ return nil, err
+ }
+
+ // Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
+ fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
+ history := make([]manifest.Schema1History, len(imageConfig.History))
+ nonemptyLayerIndex := 0
+ var parentV1ID string // Set in the loop
+ v1ID := ""
+ haveGzippedEmptyLayer := false
+ if len(imageConfig.History) == 0 {
+ // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
+ return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
+ }
+ for v2Index, historyEntry := range imageConfig.History {
+ parentV1ID = v1ID
+ v1Index := len(imageConfig.History) - 1 - v2Index
+
+ var blobDigest digest.Digest
+ if historyEntry.EmptyLayer {
+ emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}
+
+ if !haveGzippedEmptyLayer {
+ logrus.Debugf("Uploading empty layer during conversion to schema 1")
+ // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
+ // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
+ info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
+ if err != nil {
+ return nil, fmt.Errorf("uploading empty layer: %w", err)
+ }
+ if info.Digest != emptyLayerBlobInfo.Digest {
+ return nil, fmt.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
+ }
+ haveGzippedEmptyLayer = true
+ }
+ if options.LayerInfos != nil {
+ convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo)
+ }
+ blobDigest = emptyLayerBlobInfo.Digest
+ } else {
+ if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
+ return nil, fmt.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
+ }
+ if options.LayerInfos != nil {
+ convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex])
+ }
+ blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
+ nonemptyLayerIndex++
+ }
+
+ // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency.
+ v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID)
+ if err != nil {
+ return nil, err
+ }
+ v1ID = v
+
+ fakeImage := manifest.Schema1V1Compatibility{
+ ID: v1ID,
+ Parent: parentV1ID,
+ Comment: historyEntry.Comment,
+ Created: historyEntry.Created,
+ Author: historyEntry.Author,
+ ThrowAway: historyEntry.EmptyLayer,
+ }
+ fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
+ v1CompatibilityBytes, err := json.Marshal(&fakeImage)
+ if err != nil {
+ return nil, fmt.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
+ }
+
+ fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
+ history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
+ // Note that parentV1ID of the top layer is preserved when exiting this loop
+ }
+
+ // Now patch in real configuration for the top layer (v1Index == 0)
+ v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency.
+ if err != nil {
+ return nil, err
+ }
+ v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer)
+ if err != nil {
+ return nil, err
+ }
+ history[0].V1Compatibility = string(v1Config)
+
+ if options.LayerInfos != nil {
+ options.LayerInfos = convertedLayerUpdates
+ }
+ m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
+ if err != nil {
+ return nil, err // This should never happen, we should have created all the components correctly.
+ }
+ return m1, nil
+}
+
+func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) {
+ if err := blobDigest.Validate(); err != nil {
+ return "", err
+ }
+ parts := append([]string{blobDigest.Hex()}, others...)
+ v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
+ return hex.EncodeToString(v1IDHash[:]), nil
+}
+
+func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
+ // Preserve everything we don't specifically know about.
+ // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
+ rawContents := map[string]*json.RawMessage{}
+ if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
+ return nil, err
+ }
+ delete(rawContents, "rootfs")
+ delete(rawContents, "history")
+
+ updates := map[string]interface{}{"id": v1ID}
+ if parentV1ID != "" {
+ updates["parent"] = parentV1ID
+ }
+ if throwaway {
+ updates["throwaway"] = throwaway
+ }
+ for field, value := range updates {
+ encoded, err := json.Marshal(value)
+ if err != nil {
+ return nil, err
+ }
+ rawContents[field] = (*json.RawMessage)(&encoded)
+ }
+ return json.Marshal(rawContents)
+}
+
+// SupportsEncryption returns if encryption is supported for the manifest type
+func (m *manifestSchema2) SupportsEncryption(context.Context) bool {
+ return false
+}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+// to a different manifest format).
+func (m *manifestSchema2) CanChangeLayerCompression(mimeType string) bool {
+ return m.m.CanChangeLayerCompression(mimeType)
+}
diff --git a/vendor/github.com/containers/image/v5/image/manifest.go b/vendor/github.com/containers/image/v5/internal/image/manifest.go
similarity index 87%
rename from vendor/github.com/containers/image/v5/image/manifest.go
rename to vendor/github.com/containers/image/v5/internal/image/manifest.go
index 36d70b5c23d..75e472aa746 100644
--- a/vendor/github.com/containers/image/v5/image/manifest.go
+++ b/vendor/github.com/containers/image/v5/internal/image/manifest.go
@@ -8,13 +8,11 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
// genericManifest is an interface for parsing, modifying image manifests and related data.
-// Note that the public methods are intended to be a subset of types.Image
-// so that embedding a genericManifest into structs works.
-// will support v1 one day...
+// The public methods are related to types.Image so that embedding a genericManifest implements most of it,
+// but there are also public methods that are only visible by packages that can import c/image/internal/image.
type genericManifest interface {
serialize() ([]byte, error)
manifestMIMEType() string
@@ -51,6 +49,16 @@ type genericManifest interface {
// the process of updating a manifest between different manifest types was to update then convert.
// This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836
SupportsEncryption(ctx context.Context) bool
+
+ // The following methods are not a part of types.Image:
+ // ===
+
+ // CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+ // (and the code can handle that).
+ // NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+ // algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+ // to a different manifest format).
+ CanChangeLayerCompression(mimeType string) bool
}
// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src.
@@ -98,7 +106,7 @@ func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.Mani
converter, ok := converters[options.ManifestMIMEType]
if !ok {
- return nil, errors.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType)
+ return nil, fmt.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType)
}
optionsCopy := options
diff --git a/vendor/github.com/containers/image/v5/image/memory.go b/vendor/github.com/containers/image/v5/internal/image/memory.go
similarity index 99%
rename from vendor/github.com/containers/image/v5/image/memory.go
rename to vendor/github.com/containers/image/v5/internal/image/memory.go
index 4c96b37d889..e22c7aafdfa 100644
--- a/vendor/github.com/containers/image/v5/image/memory.go
+++ b/vendor/github.com/containers/image/v5/internal/image/memory.go
@@ -2,9 +2,9 @@ package image
import (
"context"
+ "errors"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
// memoryImage is a mostly-implementation of types.Image assembled from data
diff --git a/vendor/github.com/containers/image/v5/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go
similarity index 86%
rename from vendor/github.com/containers/image/v5/image/oci.go
rename to vendor/github.com/containers/image/v5/internal/image/oci.go
index 58e9c03ba3a..4b74de3e580 100644
--- a/vendor/github.com/containers/image/v5/image/oci.go
+++ b/vendor/github.com/containers/image/v5/internal/image/oci.go
@@ -3,16 +3,17 @@ package image
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
+ internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
type manifestOCI1 struct {
@@ -60,7 +61,7 @@ func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.configBlob == nil {
if m.src == nil {
- return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
+ return nil, errors.New("Internal error: neither src nor configBlob set in manifestOCI1")
}
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache)
if err != nil {
@@ -73,7 +74,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
}
computedDigest := digest.FromBytes(blob)
if computedDigest != m.m.Config.Digest {
- return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest)
+ return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest)
}
m.configBlob = blob
}
@@ -84,6 +85,10 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
// layers in the resulting configuration isn't guaranteed to be returned to due how
// old image manifests work (docker v2s1 especially).
func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
+ if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
+ }
+
cb, err := m.ConfigBlob(ctx)
if err != nil {
return nil, err
@@ -194,10 +199,15 @@ func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, opti
// value.
// This does not change the state of the original manifestOCI1 object.
func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) {
+ if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
+ }
+
// Create a copy of the descriptor.
config := schema2DescriptorFromOCI1Descriptor(m.m.Config)
- // The only difference between OCI and DockerSchema2 is the mediatypes. The
+ // Above, we have already checked that this manifest refers to an image, not an OCI artifact,
+ // so the only difference between OCI and DockerSchema2 is the mediatypes. The
// media type of the manifest is handled by manifestSchema2FromComponents.
config.MediaType = manifest.DockerV2Schema2ConfigMediaType
@@ -233,7 +243,11 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani
// value.
// This does not change the state of the original manifestOCI1 object.
func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
- // We can't directly convert to V1, but we can transitively convert via a V2 image
+ if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
+ }
+
+ // We can't directly convert images to V1, but we can transitively convert via a V2 image
m2, err := m.convertToManifestSchema2(ctx, options)
if err != nil {
return nil, err
@@ -246,3 +260,12 @@ func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *ty
func (m *manifestOCI1) SupportsEncryption(context.Context) bool {
return true
}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+// to a different manifest format).
+func (m *manifestOCI1) CanChangeLayerCompression(mimeType string) bool {
+ return m.m.CanChangeLayerCompression(mimeType)
+}
diff --git a/vendor/github.com/containers/image/v5/image/oci_index.go b/vendor/github.com/containers/image/v5/internal/image/oci_index.go
similarity index 63%
rename from vendor/github.com/containers/image/v5/image/oci_index.go
rename to vendor/github.com/containers/image/v5/internal/image/oci_index.go
index 4e6ca879aad..f4f76622c59 100644
--- a/vendor/github.com/containers/image/v5/image/oci_index.go
+++ b/vendor/github.com/containers/image/v5/internal/image/oci_index.go
@@ -2,32 +2,32 @@ package image
import (
"context"
+ "fmt"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
index, err := manifest.OCI1IndexFromManifest(manblob)
if err != nil {
- return nil, errors.Wrapf(err, "parsing OCI1 index")
+ return nil, fmt.Errorf("parsing OCI1 index: %w", err)
}
targetManifestDigest, err := index.ChooseInstance(sys)
if err != nil {
- return nil, errors.Wrapf(err, "choosing image instance")
+ return nil, fmt.Errorf("choosing image instance: %w", err)
}
manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
if err != nil {
- return nil, errors.Wrapf(err, "loading manifest for target platform")
+ return nil, fmt.Errorf("fetching target platform image selected from image index: %w", err)
}
matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
if err != nil {
- return nil, errors.Wrap(err, "computing manifest digest")
+ return nil, fmt.Errorf("computing manifest digest: %w", err)
}
if !matches {
- return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
+ return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
}
return manifestInstanceFromBlob(ctx, sys, src, manblob, mt)
diff --git a/vendor/github.com/containers/image/v5/internal/image/sourced.go b/vendor/github.com/containers/image/v5/internal/image/sourced.go
new file mode 100644
index 00000000000..dc09a9e04b1
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/image/sourced.go
@@ -0,0 +1,134 @@
+// Package image consolidates knowledge about various container image formats
+// (as opposed to image storage mechanisms, which are handled by types.ImageSource)
+// and exposes all of them using an unified interface.
+package image
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/types"
+)
+
+// FromReference returns a types.ImageCloser implementation for the default instance reading from reference.
+// If reference poitns to a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate image instance.
+//
+// The caller must call .Close() on the returned ImageCloser.
+//
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
+func FromReference(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (types.ImageCloser, error) {
+ src, err := ref.NewImageSource(ctx, sys)
+ if err != nil {
+ return nil, err
+ }
+ img, err := FromSource(ctx, sys, src)
+ if err != nil {
+ src.Close()
+ return nil, err
+ }
+ return img, nil
+}
+
+// imageCloser implements types.ImageCloser, perhaps allowing simple users
+// to use a single object without having keep a reference to a types.ImageSource
+// only to call types.ImageSource.Close().
+type imageCloser struct {
+ types.Image
+ src types.ImageSource
+}
+
+// FromSource returns a types.ImageCloser implementation for the default instance of source.
+// If source is a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate image instance.
+//
+// The caller must call .Close() on the returned ImageCloser.
+//
+// FromSource “takes ownership” of the input ImageSource and will call src.Close()
+// when the image is closed. (This does not prevent callers from using both the
+// Image and ImageSource objects simultaneously, but it means that they only need to
+// the Image.)
+//
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
+//
+// Most callers can use either FromUnparsedImage or FromReference instead.
+//
+// This is publicly visible as c/image/image.FromSource.
+func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) {
+ img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil))
+ if err != nil {
+ return nil, err
+ }
+ return &imageCloser{
+ Image: img,
+ src: src,
+ }, nil
+}
+
+func (ic *imageCloser) Close() error {
+ return ic.src.Close()
+}
+
+// SourcedImage is a general set of utilities for working with container images,
+// whatever is their underlying transport (i.e. ImageSource-independent).
+// Note the existence of docker.Image and image.memoryImage: various instances
+// of a types.Image may not be a SourcedImage directly.
+//
+// Most external users of `types.Image` do not care, and those who care about `docker.Image` know they do.
+//
+// Internal users may depend on methods available in SourcedImage but not (yet?) in types.Image.
+type SourcedImage struct {
+ *UnparsedImage
+ ManifestBlob []byte // The manifest of the relevant instance
+ ManifestMIMEType string // MIME type of ManifestBlob
+ // genericManifest contains data corresponding to manifestBlob.
+ // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest
+ // if you want to preserve the original manifest; use manifestBlob directly.
+ genericManifest
+}
+
+// FromUnparsedImage returns a types.Image implementation for unparsed.
+// If unparsed represents a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate single image.
+//
+// The Image must not be used after the underlying ImageSource is Close()d.
+//
+// This is publicly visible as c/image/image.FromUnparsedImage.
+func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (*SourcedImage, error) {
+ // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage:
+ // we want to be able to use unparsed.src. We could make that an explicit interface, but, well,
+ // this is the only UnparsedImage implementation around, anyway.
+
+ // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest().
+ manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType)
+ if err != nil {
+ return nil, err
+ }
+
+ return &SourcedImage{
+ UnparsedImage: unparsed,
+ ManifestBlob: manifestBlob,
+ ManifestMIMEType: manifestMIMEType,
+ genericManifest: parsedManifest,
+ }, nil
+}
+
+// Size returns the size of the image as stored, if it's known, or -1 if it isn't.
+func (i *SourcedImage) Size() (int64, error) {
+ return -1, nil
+}
+
+// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
+func (i *SourcedImage) Manifest(ctx context.Context) ([]byte, string, error) {
+ return i.ManifestBlob, i.ManifestMIMEType, nil
+}
+
+func (i *SourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+ return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/image/unparsed.go b/vendor/github.com/containers/image/v5/internal/image/unparsed.go
new file mode 100644
index 00000000000..0f026501c27
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/image/unparsed.go
@@ -0,0 +1,119 @@
+package image
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagesource"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// UnparsedImage implements types.UnparsedImage .
+// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
+//
+// This is publicly visible as c/image/image.UnparsedImage.
+type UnparsedImage struct {
+ src private.ImageSource
+ instanceDigest *digest.Digest
+ cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
+ // A private cache for Manifest(), may be the empty string if guessing failed.
+ // Valid iff cachedManifest is not nil.
+ cachedManifestMIMEType string
+ cachedSignatures []signature.Signature // A private cache for Signatures(); nil if not yet known.
+}
+
+// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list).
+//
+// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
+//
+// This is publicly visible as c/image/image.UnparsedInstance.
+func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
+ return &UnparsedImage{
+ src: imagesource.FromPublic(src),
+ instanceDigest: instanceDigest,
+ }
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (i *UnparsedImage) Reference() types.ImageReference {
+ // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity.
+ return i.src.Reference()
+}
+
+// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
+ if i.cachedManifest == nil {
+ m, mt, err := i.src.GetManifest(ctx, i.instanceDigest)
+ if err != nil {
+ return nil, "", err
+ }
+
+ // ImageSource.GetManifest does not do digest verification, but we do;
+ // this immediately protects also any user of types.Image.
+ if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
+ matches, err := manifest.MatchesDigest(m, digest)
+ if err != nil {
+ return nil, "", fmt.Errorf("computing manifest digest: %w", err)
+ }
+ if !matches {
+ return nil, "", fmt.Errorf("Manifest does not match provided manifest digest %s", digest)
+ }
+ }
+
+ i.cachedManifest = m
+ i.cachedManifestMIMEType = mt
+ }
+ return i.cachedManifest, i.cachedManifestMIMEType, nil
+}
+
+// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known.
+// The bool return value seems redundant with digest != ""; it is used explicitly
+// to refuse (unexpected) situations when the digest exists but is "".
+func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) {
+ if i.instanceDigest != nil {
+ return *i.instanceDigest, true
+ }
+ ref := i.Reference().DockerReference()
+ if ref != nil {
+ if canonical, ok := ref.(reference.Canonical); ok {
+ return canonical.Digest(), true
+ }
+ }
+ return "", false
+}
+
+// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
+ // It would be consistent to make this an internal/unparsedimage/impl.Compat wrapper,
+ // but this is very likely to be the only implementation ever.
+ sigs, err := i.UntrustedSignatures(ctx)
+ if err != nil {
+ return nil, err
+ }
+ simpleSigs := [][]byte{}
+ for _, sig := range sigs {
+ if sig, ok := sig.(signature.SimpleSigning); ok {
+ simpleSigs = append(simpleSigs, sig.UntrustedSignature())
+ }
+ }
+ return simpleSigs, nil
+}
+
+// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) {
+ if i.cachedSignatures == nil {
+ sigs, err := i.src.GetSignaturesWithFormat(ctx, i.instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ i.cachedSignatures = sigs
+ }
+ return i.cachedSignatures, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
new file mode 100644
index 00000000000..cff68ac16e2
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
@@ -0,0 +1,78 @@
+package impl
+
+import (
+ "context"
+ "io"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// Compat implements the obsolete parts of types.ImageDestination
+// for implementations of private.ImageDestination.
+// See AddCompat below.
+type Compat struct {
+ dest private.ImageDestinationInternalOnly
+}
+
+// AddCompat initializes Compat to implement the obsolete parts of types.ImageDestination
+// for implementations of private.ImageDestination.
+//
+// Use it like this:
+//
+// type yourDestination struct {
+// impl.Compat
+// …
+// }
+//
+// dest := &yourDestination{…}
+// dest.Compat = impl.AddCompat(dest)
+func AddCompat(dest private.ImageDestinationInternalOnly) Compat {
+ return Compat{dest}
+}
+
+// PutBlob writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ return c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{
+ Cache: blobinfocache.FromBlobInfoCache(cache),
+ IsConfig: isConfig,
+ })
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+// reflected in the manifest that will be written.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (c *Compat) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ return c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{
+ Cache: blobinfocache.FromBlobInfoCache(cache),
+ CanSubstitute: canSubstitute,
+ })
+}
+
+// PutSignatures writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (c *Compat) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+ withFormat := []signature.Signature{}
+ for _, sig := range signatures {
+ withFormat = append(withFormat, signature.SimpleSigningFromBlob(sig))
+ }
+ return c.dest.PutSignaturesWithFormat(ctx, withFormat, instanceDigest)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go
new file mode 100644
index 00000000000..704812e9abf
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go
@@ -0,0 +1,72 @@
+package impl
+
+import "github.com/containers/image/v5/types"
+
+// Properties collects properties of an ImageDestination that are constant throughout its lifetime
+// (but might differ across instances).
+type Properties struct {
+ // SupportedManifestMIMETypes tells which manifest MIME types the destination supports.
+ // A empty slice or nil means any MIME type can be tried to upload.
+ SupportedManifestMIMETypes []string
+ // DesiredLayerCompression indicates the kind of compression to apply on layers
+ DesiredLayerCompression types.LayerCompression
+ // AcceptsForeignLayerURLs is false if foreign layers in manifest should be actually
+ // uploaded to the image destination, true otherwise.
+ AcceptsForeignLayerURLs bool
+ // MustMatchRuntimeOS is set to true if the destination can store only images targeted for the current runtime architecture and OS.
+ MustMatchRuntimeOS bool
+ // IgnoresEmbeddedDockerReference is set to true if the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+ // and would prefer to receive an unmodified manifest instead of one modified for the destination.
+ // Does not make a difference if Reference().DockerReference() is nil.
+ IgnoresEmbeddedDockerReference bool
+ // HasThreadSafePutBlob indicates that PutBlob can be executed concurrently.
+ HasThreadSafePutBlob bool
+}
+
+// PropertyMethodsInitialize implements parts of private.ImageDestination corresponding to Properties.
+type PropertyMethodsInitialize struct {
+ // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name.
+
+ vals Properties
+}
+
+// PropertyMethods creates an PropertyMethodsInitialize for vals.
+func PropertyMethods(vals Properties) PropertyMethodsInitialize {
+ return PropertyMethodsInitialize{
+ vals: vals,
+ }
+}
+
+// SupportedManifestMIMETypes tells which manifest mime types the destination supports
+// If an empty slice or nil it's returned, then any mime type can be tried to upload
+func (o PropertyMethodsInitialize) SupportedManifestMIMETypes() []string {
+ return o.vals.SupportedManifestMIMETypes
+}
+
+// DesiredLayerCompression indicates the kind of compression to apply on layers
+func (o PropertyMethodsInitialize) DesiredLayerCompression() types.LayerCompression {
+ return o.vals.DesiredLayerCompression
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (o PropertyMethodsInitialize) AcceptsForeignLayerURLs() bool {
+ return o.vals.AcceptsForeignLayerURLs
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
+func (o PropertyMethodsInitialize) MustMatchRuntimeOS() bool {
+ return o.vals.MustMatchRuntimeOS
+}
+
+// IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+// and would prefer to receive an unmodified manifest instead of one modified for the destination.
+// Does not make a difference if Reference().DockerReference() is nil.
+func (o PropertyMethodsInitialize) IgnoresEmbeddedDockerReference() bool {
+ return o.vals.IgnoresEmbeddedDockerReference
+}
+
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (o PropertyMethodsInitialize) HasThreadSafePutBlob() bool {
+ return o.vals.HasThreadSafePutBlob
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go
new file mode 100644
index 00000000000..225ea4491f4
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go
@@ -0,0 +1,52 @@
+package stubs
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+)
+
+// NoPutBlobPartialInitialize implements parts of private.ImageDestination
+// for transports that don’t support PutBlobPartial().
+// See NoPutBlobPartial() below.
+type NoPutBlobPartialInitialize struct {
+ transportName string
+}
+
+// NoPutBlobPartial creates a NoPutBlobPartialInitialize for ref.
+func NoPutBlobPartial(ref types.ImageReference) NoPutBlobPartialInitialize {
+ return NoPutBlobPartialRaw(ref.Transport().Name())
+}
+
+// NoPutBlobPartialRaw is the same thing as NoPutBlobPartial, but it can be used
+// in situations where no ImageReference is available.
+func NoPutBlobPartialRaw(transportName string) NoPutBlobPartialInitialize {
+ return NoPutBlobPartialInitialize{
+ transportName: transportName,
+ }
+}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool {
+ return false
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
+ return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
+}
+
+// ImplementsPutBlobPartial implements SupportsPutBlobPartial() that returns true.
+type ImplementsPutBlobPartial struct{}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (stub ImplementsPutBlobPartial) SupportsPutBlobPartial() bool {
+ return true
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go
new file mode 100644
index 00000000000..7015fd06896
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go
@@ -0,0 +1,50 @@
+package stubs
+
+import (
+ "context"
+ "errors"
+
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/opencontainers/go-digest"
+)
+
+// NoSignaturesInitialize implements parts of private.ImageDestination
+// for transports that don’t support storing signatures.
+// See NoSignatures() below.
+type NoSignaturesInitialize struct {
+ message string
+}
+
+// NoSignatures creates a NoSignaturesInitialize, failing with message.
+func NoSignatures(message string) NoSignaturesInitialize {
+ return NoSignaturesInitialize{
+ message: message,
+ }
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (stub NoSignaturesInitialize) SupportsSignatures(ctx context.Context) error {
+ return errors.New(stub.message)
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (stub NoSignaturesInitialize) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ if len(signatures) != 0 {
+ return errors.New(stub.message)
+ }
+ return nil
+}
+
+// SupportsSignatures implements SupportsSignatures() that returns nil.
+// Note that it might be even more useful to return a value dynamically detected based on
+type AlwaysSupportsSignatures struct{}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (stub AlwaysSupportsSignatures) SupportsSignatures(ctx context.Context) error {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go
new file mode 100644
index 00000000000..ab233406a40
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go
@@ -0,0 +1,27 @@
+// Package stubs contains trivial stubs for parts of private.ImageDestination.
+// It can be used from internal/wrapper, so it should not drag in any extra dependencies.
+// Compare with imagedestination/impl, which might require non-trivial implementation work.
+//
+// There are two kinds of stubs:
+//
+// First, there are pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination
+// implementation:
+//
+// type yourDestination struct {
+// stubs.ImplementsPutBlobPartial
+// …
+// }
+//
+// Second, there are stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker
+// means that a constructor must be called:
+//
+// type yourDestination struct {
+// stubs.NoPutBlobPartialInitialize
+// …
+// }
+//
+// dest := &yourDestination{
+// …
+// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+// }
+package stubs
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
new file mode 100644
index 00000000000..43575ede33f
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
@@ -0,0 +1,79 @@
+package imagedestination
+
+import (
+ "context"
+ "io"
+
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// wrapped provides the private.ImageDestination operations
+// for a destination that only implements types.ImageDestination
+type wrapped struct {
+ stubs.NoPutBlobPartialInitialize
+
+ types.ImageDestination
+}
+
+// FromPublic(dest) returns an object that provides the private.ImageDestination API
+//
+// Eventually, we might want to expose this function, and methods of the returned object,
+// as a public API (or rather, a variant that does not include the already-superseded
+// methods of types.ImageDestination, and has added more future-proofing), and more strongly
+// deprecate direct use of types.ImageDestination.
+//
+// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
+// with public methods, or perhaps a private interface), so that we can add methods
+// without breaking any external implementors of a public interface.
+func FromPublic(dest types.ImageDestination) private.ImageDestination {
+ if dest2, ok := dest.(private.ImageDestination); ok {
+ return dest2
+ }
+ return &wrapped{
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(dest.Reference()),
+
+ ImageDestination: dest,
+ }
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
+ return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig)
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+// reflected in the manifest that will be written.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
+ return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (w *wrapped) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ simpleSigs := [][]byte{}
+ for _, sig := range signatures {
+ simpleSig, ok := sig.(signature.SimpleSigning)
+ if !ok {
+ return signature.UnsupportedFormatError(sig)
+ }
+ simpleSigs = append(simpleSigs, simpleSig.UntrustedSignature())
+ }
+ return w.PutSignatures(ctx, simpleSigs, instanceDigest)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go
new file mode 100644
index 00000000000..7d859c31258
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go
@@ -0,0 +1,55 @@
+package impl
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/opencontainers/go-digest"
+)
+
+// Compat implements the obsolete parts of types.ImageSource
+// for implementations of private.ImageSource.
+// See AddCompat below.
+type Compat struct {
+ src private.ImageSourceInternalOnly
+}
+
+// AddCompat initializes Compat to implement the obsolete parts of types.ImageSource
+// for implementations of private.ImageSource.
+//
+// Use it like this:
+//
+// type yourSource struct {
+// impl.Compat
+// …
+// }
+//
+// src := &yourSource{…}
+// src.Compat = impl.AddCompat(src)
+func AddCompat(src private.ImageSourceInternalOnly) Compat {
+ return Compat{src}
+}
+
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (c *Compat) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ // Silently ignore signatures with other formats; the caller can’t handle them.
+ // Admittedly callers that want to sync all of the image might want to fail instead; this
+ // way an upgrade of c/image neither breaks them nor adds new functionality.
+ // Alternatively, we could possibly define the old GetSignatures to use the multi-format
+ // signature.Blob representation now, in general, but that could silently break them as well.
+ sigs, err := c.src.GetSignaturesWithFormat(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ simpleSigs := [][]byte{}
+ for _, sig := range sigs {
+ if sig, ok := sig.(signature.SimpleSigning); ok {
+ simpleSigs = append(simpleSigs, sig.UntrustedSignature())
+ }
+ }
+ return simpleSigs, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go
new file mode 100644
index 00000000000..d5eae63519e
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go
@@ -0,0 +1,23 @@
+package impl
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// DoesNotAffectLayerInfosForCopy implements LayerInfosForCopy() that returns nothing.
+type DoesNotAffectLayerInfosForCopy struct{}
+
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (stub DoesNotAffectLayerInfosForCopy) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go
new file mode 100644
index 00000000000..73e8c78e95a
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go
@@ -0,0 +1,27 @@
+package impl
+
+// Properties collects properties of an ImageSource that are constant throughout its lifetime
+// (but might differ across instances).
+type Properties struct {
+ // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+ HasThreadSafeGetBlob bool
+}
+
+// PropertyMethodsInitialize implements parts of private.ImageSource corresponding to Properties.
+type PropertyMethodsInitialize struct {
+ // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name.
+
+ vals Properties
+}
+
+// PropertyMethods creates an PropertyMethodsInitialize for vals.
+func PropertyMethods(vals Properties) PropertyMethodsInitialize {
+ return PropertyMethodsInitialize{
+ vals: vals,
+ }
+}
+
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (o PropertyMethodsInitialize) HasThreadSafeGetBlob() bool {
+ return o.vals.HasThreadSafeGetBlob
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go
new file mode 100644
index 00000000000..b3a8c7e88d9
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go
@@ -0,0 +1,19 @@
+package impl
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/opencontainers/go-digest"
+)
+
+// NoSignatures implements GetSignatures() that returns nothing.
+type NoSignatures struct{}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (stub NoSignatures) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go
new file mode 100644
index 00000000000..15aee6d42f8
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go
@@ -0,0 +1,52 @@
+package stubs
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+)
+
+// NoGetBlobAtInitialize implements parts of private.ImageSource
+// for transports that don’t support GetBlobAt().
+// See NoGetBlobAt() below.
+type NoGetBlobAtInitialize struct {
+ transportName string
+}
+
+// NoGetBlobAt() creates a NoGetBlobAtInitialize for ref.
+func NoGetBlobAt(ref types.ImageReference) NoGetBlobAtInitialize {
+ return NoGetBlobAtRaw(ref.Transport().Name())
+}
+
+// NoGetBlobAtRaw is the same thing as NoGetBlobAt, but it can be used
+// in situations where no ImageReference is available.
+func NoGetBlobAtRaw(transportName string) NoGetBlobAtInitialize {
+ return NoGetBlobAtInitialize{
+ transportName: transportName,
+ }
+}
+
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (stub NoGetBlobAtInitialize) SupportsGetBlobAt() bool {
+ return false
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (stub NoGetBlobAtInitialize) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", stub.transportName)
+}
+
+// ImplementsGetBlobAt implements SupportsGetBlobAt() that returns true.
+type ImplementsGetBlobAt struct{}
+
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (stub ImplementsGetBlobAt) SupportsGetBlobAt() bool {
+ return true
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go
new file mode 100644
index 00000000000..0ce6fd51f71
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go
@@ -0,0 +1,28 @@
+// Package stubs contains trivial stubs for parts of private.ImageSource.
+// It can be used from internal/wrapper, so it should not drag in any extra dependencies.
+// Compare with imagesource/impl, which might require non-trivial implementation work.
+//
+// There are two kinds of stubs:
+//
+// First, there are pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource
+//
+// implementation:
+//
+// type yourSource struct {
+// stubs.ImplementsGetBlobAt
+// …
+// }
+//
+// Second, there are stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker
+// means that a constructor must be called:
+
+// type yourSource struct {
+// stubs.NoGetBlobAtInitialize
+// …
+// }
+//
+// dest := &yourSource{
+// …
+// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+// }
+package stubs
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go
new file mode 100644
index 00000000000..886b4e833bb
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go
@@ -0,0 +1,56 @@
+package imagesource
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// wrapped provides the private.ImageSource operations
+// for a source that only implements types.ImageSource
+type wrapped struct {
+ stubs.NoGetBlobAtInitialize
+
+ types.ImageSource
+}
+
+// FromPublic(src) returns an object that provides the private.ImageSource API
+//
+// Eventually, we might want to expose this function, and methods of the returned object,
+// as a public API (or rather, a variant that does not include the already-superseded
+// methods of types.ImageSource, and has added more future-proofing), and more strongly
+// deprecate direct use of types.ImageSource.
+//
+// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
+// with public methods, or perhaps a private interface), so that we can add methods
+// without breaking any external implementors of a public interface.
+func FromPublic(src types.ImageSource) private.ImageSource {
+ if src2, ok := src.(private.ImageSource); ok {
+ return src2
+ }
+ return &wrapped{
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(src.Reference()),
+
+ ImageSource: src,
+ }
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (w *wrapped) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ sigs, err := w.GetSignatures(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ res := []signature.Signature{}
+ for _, sig := range sigs {
+ res = append(res, signature.SimpleSigningFromBlob(sig))
+ }
+ return res, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
index 3fed1995cb8..f17d002469e 100644
--- a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
+++ b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
@@ -1,10 +1,8 @@
package iolimits
import (
+ "fmt"
"io"
- "io/ioutil"
-
- "github.com/pkg/errors"
)
// All constants below are intended to be used as limits for `ReadAtMost`. The
@@ -47,13 +45,13 @@ const (
func ReadAtMost(reader io.Reader, limit int) ([]byte, error) {
limitedReader := io.LimitReader(reader, int64(limit+1))
- res, err := ioutil.ReadAll(limitedReader)
+ res, err := io.ReadAll(limitedReader)
if err != nil {
return nil, err
}
if len(res) > limit {
- return nil, errors.Errorf("exceeded maximum allowed size of %d bytes", limit)
+ return nil, fmt.Errorf("exceeded maximum allowed size of %d bytes", limit)
}
return res, nil
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/errors.go b/vendor/github.com/containers/image/v5/internal/manifest/errors.go
new file mode 100644
index 00000000000..e5732a8c43f
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/manifest/errors.go
@@ -0,0 +1,32 @@
+package manifest
+
+import "fmt"
+
+// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
+// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
+//
+// This is publicly visible as c/image/manifest.NonImageArtifactError (but we don’t provide a public constructor)
+type NonImageArtifactError struct {
+ // Callers should not be blindly calling image-specific operations and only checking MIME types
+ // on failure; if they care about the artifact type, they should check before using it.
+ // If they blindly assume an image, they don’t really need this value; just a type check
+ // is sufficient for basic "we can only pull images" UI.
+ //
+ // Also, there are fairly widespread “artifacts” which nevertheless use imgspecv1.MediaTypeImageConfig,
+ // e.g. https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md , which could cause the callers
+ // to complain about a non-image artifact with the correct MIME type; we should probably add some other kind of
+ // type discrimination, _and_ somehow make it available in the API, if we expect API callers to make decisions
+ // based on that kind of data.
+ //
+ // So, let’s not expose this until a specific need is identified.
+ mimeType string
+}
+
+// NewNonImageArtifactError returns a NonImageArtifactError about an artifact with mimeType.
+func NewNonImageArtifactError(mimeType string) error {
+ return NonImageArtifactError{mimeType: mimeType}
+}
+
+func (e NonImageArtifactError) Error() string {
+ return fmt.Sprintf("unsupported image-specific operation on artifact with type %q", e.mimeType)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go
deleted file mode 100644
index bf6cc87d421..00000000000
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2015 Jesse Sipprell. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux
-// +build linux
-
-package keyctl
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// Key represents a single key linked to one or more kernel keyrings.
-type Key struct {
- Name string
-
- id, ring keyID
- size int
-}
-
-// ID returns the 32-bit kernel identifier for a specific key
-func (k *Key) ID() int32 {
- return int32(k.id)
-}
-
-// Get the key's value as a byte slice
-func (k *Key) Get() ([]byte, error) {
- var (
- b []byte
- err error
- sizeRead int
- )
-
- if k.size == 0 {
- k.size = 512
- }
-
- size := k.size
-
- b = make([]byte, int(size))
- sizeRead = size + 1
- for sizeRead > size {
- r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, int(k.id), b, size)
- if err != nil {
- return nil, err
- }
-
- if sizeRead = int(r1); sizeRead > size {
- b = make([]byte, sizeRead)
- size = sizeRead
- sizeRead = size + 1
- } else {
- k.size = sizeRead
- }
- }
- return b[:k.size], err
-}
-
-// Unlink a key from the keyring it was loaded from (or added to). If the key
-// is not linked to any other keyrings, it is destroyed.
-func (k *Key) Unlink() error {
- _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0)
- return err
-}
-
-// Describe returns a string describing the attributes of a specified key
-func (k *Key) Describe() (string, error) {
- keyAttr, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(k.id))
- if err != nil {
- return "", err
- }
- return keyAttr, nil
-}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
deleted file mode 100644
index 5eaad615c7c..00000000000
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2015 Jesse Sipprell. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux
-// +build linux
-
-// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface)
-package keyctl
-
-import (
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// Keyring is the basic interface to a linux keyctl keyring.
-type Keyring interface {
- ID
- Add(string, []byte) (*Key, error)
- Search(string) (*Key, error)
-}
-
-type keyring struct {
- id keyID
-}
-
-// ID is unique 32-bit serial number identifiers for all Keys and Keyrings have.
-type ID interface {
- ID() int32
-}
-
-// Add a new key to a keyring. The key can be searched for later by name.
-func (kr *keyring) Add(name string, key []byte) (*Key, error) {
- r, err := unix.AddKey("user", name, key, int(kr.id))
- if err == nil {
- key := &Key{Name: name, id: keyID(r), ring: kr.id}
- return key, nil
- }
- return nil, err
-}
-
-// Search for a key by name, this also searches child keyrings linked to this
-// one. The key, if found, is linked to the top keyring that Search() was called
-// from.
-func (kr *keyring) Search(name string) (*Key, error) {
- id, err := unix.KeyctlSearch(int(kr.id), "user", name, 0)
- if err == nil {
- return &Key{Name: name, id: keyID(id), ring: kr.id}, nil
- }
- return nil, err
-}
-
-// ID returns the 32-bit kernel identifier of a keyring
-func (kr *keyring) ID() int32 {
- return int32(kr.id)
-}
-
-// SessionKeyring returns the current login session keyring
-func SessionKeyring() (Keyring, error) {
- return newKeyring(unix.KEY_SPEC_SESSION_KEYRING)
-}
-
-// UserKeyring returns the keyring specific to the current user.
-func UserKeyring() (Keyring, error) {
- return newKeyring(unix.KEY_SPEC_USER_KEYRING)
-}
-
-// Unlink an object from a keyring
-func Unlink(parent Keyring, child ID) error {
- _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(child.ID()), int(parent.ID()), 0, 0)
- return err
-}
-
-// Link a key into a keyring
-func Link(parent Keyring, child ID) error {
- _, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0)
- return err
-}
-
-// ReadUserKeyring reads user keyring and returns slice of key with id(key_serial_t) representing the IDs of all the keys that are linked to it
-func ReadUserKeyring() ([]*Key, error) {
- var (
- b []byte
- err error
- sizeRead int
- )
- krSize := 4
- size := krSize
- b = make([]byte, size)
- sizeRead = size + 1
- for sizeRead > size {
- r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, unix.KEY_SPEC_USER_KEYRING, b, size)
- if err != nil {
- return nil, err
- }
-
- if sizeRead = int(r1); sizeRead > size {
- b = make([]byte, sizeRead)
- size = sizeRead
- sizeRead = size + 1
- } else {
- krSize = sizeRead
- }
- }
- keyIDs := getKeyIDsFromByte(b[:krSize])
- return keyIDs, err
-}
-
-func getKeyIDsFromByte(byteKeyIDs []byte) []*Key {
- idSize := 4
- var keys []*Key
- for idx := 0; idx+idSize <= len(byteKeyIDs); idx = idx + idSize {
- tempID := *(*int32)(unsafe.Pointer(&byteKeyIDs[idx]))
- keys = append(keys, &Key{id: keyID(tempID)})
- }
- return keys
-}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go
deleted file mode 100644
index 5f4d2157ae9..00000000000
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2015 Jesse Sipprell. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux
-// +build linux
-
-package keyctl
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// KeyPerm represents in-kernel access control permission to keys and keyrings
-// as a 32-bit integer broken up into four permission sets, one per byte.
-// In MSB order, the perms are: Processor, User, Group, Other.
-type KeyPerm uint32
-
-const (
- // PermOtherAll sets all permission for Other
- PermOtherAll KeyPerm = 0x3f << (8 * iota)
- // PermGroupAll sets all permission for Group
- PermGroupAll
- // PermUserAll sets all permission for User
- PermUserAll
- // PermProcessAll sets all permission for Processor
- PermProcessAll
-)
-
-// SetPerm sets the permissions on a key or keyring.
-func SetPerm(k ID, p KeyPerm) error {
- err := unix.KeyctlSetperm(int(k.ID()), uint32(p))
- return err
-}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go
deleted file mode 100644
index f61666e42c2..00000000000
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2015 Jesse Sipprell. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux
-// +build linux
-
-package keyctl
-
-import (
- "golang.org/x/sys/unix"
-)
-
-type keyID int32
-
-func newKeyring(id keyID) (*keyring, error) {
- r1, err := unix.KeyctlGetKeyringID(int(id), true)
- if err != nil {
- return nil, err
- }
-
- if id < 0 {
- r1 = int(id)
- }
- return &keyring{id: keyID(r1)}, nil
-}
diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go
new file mode 100644
index 00000000000..bfd6148ceca
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/private/private.go
@@ -0,0 +1,146 @@
+package private
+
+import (
+ "context"
+ "io"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// ImageSourceInternalOnly is the part of private.ImageSource that is not
+// a part of types.ImageSource.
+type ImageSourceInternalOnly interface {
+ // SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+ SupportsGetBlobAt() bool
+ // BlobChunkAccessor.GetBlobAt is available only if SupportsGetBlobAt().
+ BlobChunkAccessor
+
+ // GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+ // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+ // (e.g. if the source never returns manifest lists).
+ GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error)
+}
+
+// ImageSource is an internal extension to the types.ImageSource interface.
+type ImageSource interface {
+ types.ImageSource
+ ImageSourceInternalOnly
+}
+
+// ImageDestinationInternalOnly is the part of private.ImageDestination that is not
+// a part of types.ImageDestination.
+type ImageDestinationInternalOnly interface {
+ // SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+ SupportsPutBlobPartial() bool
+ // FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures
+ // on unsupported formats.
+
+ // PutBlobWithOptions writes contents of stream and returns data representing the result.
+ // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+ // inputInfo.Size is the expected length of stream, if known.
+ // inputInfo.MediaType describes the blob format, if known.
+ // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+ // to any other readers for download using the supplied digest.
+ // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+ PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (types.BlobInfo, error)
+
+ // PutBlobPartial attempts to create a blob using the data that is already present
+ // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+ // It is available only if SupportsPutBlobPartial().
+ // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+ // should fall back to PutBlobWithOptions.
+ PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error)
+
+ // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+ // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+ // info.Digest must not be empty.
+ // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+ // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+ // reflected in the manifest that will be written.
+ // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+ TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error)
+
+ // PutSignaturesWithFormat writes a set of signatures to the destination.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+ // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+ // MUST be called after PutManifest (signatures may reference manifest contents).
+ PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error
+}
+
+// ImageDestination is an internal extension to the types.ImageDestination
+// interface.
+type ImageDestination interface {
+ types.ImageDestination
+ ImageDestinationInternalOnly
+}
+
+// PutBlobOptions are used in PutBlobWithOptions.
+type PutBlobOptions struct {
+ Cache blobinfocache.BlobInfoCache2 // Cache to optionally update with the uploaded bloblook up blob infos.
+ IsConfig bool // True if the blob is a config
+
+ // The following fields are new to internal/private. Users of internal/private MUST fill them in,
+ // but they also must expect that they will be ignored by types.ImageDestination transports.
+ // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers
+ // if they use internal/imagedestination/impl.Compat;
+ // in that case, they will all be consistently zero-valued.
+
+ EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
+ LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
+}
+
+// TryReusingBlobOptions are used in TryReusingBlobWithOptions.
+type TryReusingBlobOptions struct {
+ Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update.
+ // If true, it is allowed to use an equivalent of the desired blob;
+ // in that case the returned info may not match the input.
+ CanSubstitute bool
+
+ // The following fields are new to internal/private. Users of internal/private MUST fill them in,
+ // but they also must expect that they will be ignored by types.ImageDestination transports.
+ // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers
+ // if they use internal/imagedestination/impl.Compat;
+ // in that case, they will all be consistently zero-valued.
+
+ EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
+ LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
+ SrcRef reference.Named // A reference to the source image that contains the input blob.
+}
+
+// ImageSourceChunk is a portion of a blob.
+// This API is experimental and can be changed without bumping the major version number.
+type ImageSourceChunk struct {
+ Offset uint64
+ Length uint64
+}
+
+// BlobChunkAccessor allows fetching discontiguous chunks of a blob.
+type BlobChunkAccessor interface {
+ // GetBlobAt returns a sequential channel of readers that contain data for the requested
+ // blob chunks, and a channel that might get a single error value.
+ // The specified chunks must be not overlapping and sorted by their offset.
+ // The readers must be fully consumed, in the order they are returned, before blocking
+ // to read the next chunk.
+ GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
+}
+
+// BadPartialRequestError is returned by BlobChunkAccessor.GetBlobAt on an invalid request.
+type BadPartialRequestError struct {
+ Status string
+}
+
+func (e BadPartialRequestError) Error() string {
+ return e.Status
+}
+
+// UnparsedImage is an internal extension to the types.UnparsedImage interface.
+type UnparsedImage interface {
+ types.UnparsedImage
+ // UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
+ UntrustedSignatures(ctx context.Context) ([]signature.Signature, error)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/signature/signature.go b/vendor/github.com/containers/image/v5/internal/signature/signature.go
new file mode 100644
index 00000000000..ee90b788b0d
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/signature/signature.go
@@ -0,0 +1,111 @@
+package signature
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// FIXME FIXME: MIME type? Int? String?
+// An interface with a name, parse methods?
+type FormatID string
+
+const (
+ SimpleSigningFormat FormatID = "simple-signing"
+ SigstoreFormat FormatID = "sigstore-json"
+ // Update also UnsupportedFormatError below
+)
+
+// Signature is an image signature of some kind.
+type Signature interface {
+ FormatID() FormatID
+ // blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
+ // Almost everyone should use signature.Blob() instead.
+ blobChunk() ([]byte, error)
+}
+
+// BlobChunk returns a representation of sig as a []byte, suitable for long-term storage.
+func Blob(sig Signature) ([]byte, error) {
+ chunk, err := sig.blobChunk()
+ if err != nil {
+ return nil, err
+ }
+
+ format := sig.FormatID()
+ switch format {
+ case SimpleSigningFormat:
+ // For compatibility with old dir formats:
+ return chunk, nil
+ default:
+ res := []byte{0} // Start with a zero byte to clearly mark this is a binary format, and disambiguate from random text.
+ res = append(res, []byte(format)...)
+ res = append(res, '\n')
+ res = append(res, chunk...)
+ return res, nil
+ }
+}
+
+// FromBlob returns a signature from parsing a blob created by signature.Blob.
+func FromBlob(blob []byte) (Signature, error) {
+ if len(blob) == 0 {
+ return nil, errors.New("empty signature blob")
+ }
+ // Historically we’ve just been using GPG with no identification; try to auto-detect that.
+ switch blob[0] {
+ // OpenPGP "compressed data" wrapping the message
+ case 0xA0, 0xA1, 0xA2, 0xA3, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 8 (tag: compressed data packet); bits 1…0 = length-type (any)
+ 0xC8, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 8 (tag: compressed data packet)
+ // OpenPGP “one-pass signature” starting a signature
+ 0x90, 0x91, 0x92, 0x3d, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 4 (tag: one-pass signature packet); bits 1…0 = length-type (any)
+ 0xC4, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 4 (tag: one-pass signature packet)
+ // OpenPGP signature packet signing the following data
+ 0x88, 0x89, 0x8A, 0x8B, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 2 (tag: signature packet); bits 1…0 = length-type (any)
+ 0xC2: // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 2 (tag: signature packet)
+ return SimpleSigningFromBlob(blob), nil
+
+ // The newer format: binary 0, format name, newline, data
+ case 0x00:
+ blob = blob[1:]
+ newline := bytes.IndexByte(blob, '\n')
+ if newline == -1 {
+ return nil, fmt.Errorf("invalid signature format, missing newline")
+ }
+ formatBytes := blob[:newline]
+ for _, b := range formatBytes {
+ if b < 32 || b >= 0x7F {
+ return nil, fmt.Errorf("invalid signature format, non-ASCII byte %#x", b)
+ }
+ }
+ blobChunk := blob[newline+1:]
+ switch {
+ case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)):
+ return SimpleSigningFromBlob(blobChunk), nil
+ case bytes.Equal(formatBytes, []byte(SigstoreFormat)):
+ return SigstoreFromBlobChunk(blobChunk)
+ default:
+ return nil, fmt.Errorf("unrecognized signature format %q", string(formatBytes))
+ }
+
+ default:
+ return nil, fmt.Errorf("unrecognized signature format, starting with binary %#x", blob[0])
+ }
+
+}
+
+// UnsupportedFormatError returns an error complaining about sig having an unsupported format.
+func UnsupportedFormatError(sig Signature) error {
+ formatID := sig.FormatID()
+ switch formatID {
+ case SimpleSigningFormat, SigstoreFormat:
+ return fmt.Errorf("unsupported signature format %s", string(formatID))
+ default:
+ return fmt.Errorf("unsupported, and unrecognized, signature format %q", string(formatID))
+ }
+}
+
+// copyByteSlice returns a guaranteed-fresh copy of a byte slice
+// Use this to make sure the underlying data is not shared and can’t be unexpectedly modified.
+func copyByteSlice(s []byte) []byte {
+ res := []byte{}
+ return append(res, s...)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go
new file mode 100644
index 00000000000..17342c8b761
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go
@@ -0,0 +1,84 @@
+package signature
+
+import "encoding/json"
+
+const (
+ // from sigstore/cosign/pkg/types.SimpleSigningMediaType
+ SigstoreSignatureMIMEType = "application/vnd.dev.cosign.simplesigning.v1+json"
+ // from sigstore/cosign/pkg/oci/static.SignatureAnnotationKey
+ SigstoreSignatureAnnotationKey = "dev.cosignproject.cosign/signature"
+)
+
+// Sigstore is a github.com/cosign/cosign signature.
+// For the persistent-storage format used for blobChunk(), we want
+// a degree of forward compatibility against unexpected field changes
+// (as has happened before), which is why this data type
+// contains just a payload + annotations (including annotations
+// that we don’t recognize or support), instead of individual fields
+// for the known annotations.
+type Sigstore struct {
+ untrustedMIMEType string
+ untrustedPayload []byte
+ untrustedAnnotations map[string]string
+}
+
+// sigstoreJSONRepresentation needs the files to be public, which we don’t want for
+// the main Sigstore type.
+type sigstoreJSONRepresentation struct {
+ UntrustedMIMEType string `json:"mimeType"`
+ UntrustedPayload []byte `json:"payload"`
+ UntrustedAnnotations map[string]string `json:"annotations"`
+}
+
+// SigstoreFromComponents returns a Sigstore object from its components.
+func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore {
+ return Sigstore{
+ untrustedMIMEType: untrustedMimeType,
+ untrustedPayload: copyByteSlice(untrustedPayload),
+ untrustedAnnotations: copyStringMap(untrustedAnnotations),
+ }
+}
+
+// SigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object.
+func SigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) {
+ var v sigstoreJSONRepresentation
+ if err := json.Unmarshal(blobChunk, &v); err != nil {
+ return Sigstore{}, err
+ }
+ return SigstoreFromComponents(v.UntrustedMIMEType,
+ v.UntrustedPayload,
+ v.UntrustedAnnotations), nil
+}
+
+func (s Sigstore) FormatID() FormatID {
+ return SigstoreFormat
+}
+
+// blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
+// Almost everyone should use signature.Blob() instead.
+func (s Sigstore) blobChunk() ([]byte, error) {
+ return json.Marshal(sigstoreJSONRepresentation{
+ UntrustedMIMEType: s.UntrustedMIMEType(),
+ UntrustedPayload: s.UntrustedPayload(),
+ UntrustedAnnotations: s.UntrustedAnnotations(),
+ })
+}
+
+func (s Sigstore) UntrustedMIMEType() string {
+ return s.untrustedMIMEType
+}
+func (s Sigstore) UntrustedPayload() []byte {
+ return copyByteSlice(s.untrustedPayload)
+}
+
+func (s Sigstore) UntrustedAnnotations() map[string]string {
+ return copyStringMap(s.untrustedAnnotations)
+}
+
+func copyStringMap(m map[string]string) map[string]string {
+ res := map[string]string{}
+ for k, v := range m {
+ res[k] = v
+ }
+ return res
+}
diff --git a/vendor/github.com/containers/image/v5/internal/signature/simple.go b/vendor/github.com/containers/image/v5/internal/signature/simple.go
new file mode 100644
index 00000000000..88b8adad030
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/signature/simple.go
@@ -0,0 +1,27 @@
+package signature
+
+// SimpleSigning is a “simple signing” signature.
+type SimpleSigning struct {
+ untrustedSignature []byte
+}
+
+// SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object.
+func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning {
+ return SimpleSigning{
+ untrustedSignature: copyByteSlice(blobChunk),
+ }
+}
+
+func (s SimpleSigning) FormatID() FormatID {
+ return SimpleSigningFormat
+}
+
+// blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
+// Almost everyone should use signature.Blob() instead.
+func (s SimpleSigning) blobChunk() ([]byte, error) {
+ return copyByteSlice(s.untrustedSignature), nil
+}
+
+func (s SimpleSigning) UntrustedSignature() []byte {
+ return copyByteSlice(s.untrustedSignature)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go
new file mode 100644
index 00000000000..84bb656ac71
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go
@@ -0,0 +1,40 @@
+package streamdigest
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/types"
+)
+
+// ComputeBlobInfo streams a blob to a temporary file and populates Digest and Size in inputInfo.
+// The temporary file is returned as an io.Reader along with a cleanup function.
+// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file.
+// If an error occurs, inputInfo is not modified.
+func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) {
+ diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob")
+ if err != nil {
+ return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err)
+ }
+ cleanup := func() {
+ diskBlob.Close()
+ os.Remove(diskBlob.Name())
+ }
+ digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, *inputInfo)
+ written, err := io.Copy(diskBlob, stream)
+ if err != nil {
+ cleanup()
+ return nil, nil, fmt.Errorf("writing to temporary on-disk layer: %w", err)
+ }
+ _, err = diskBlob.Seek(0, io.SeekStart)
+ if err != nil {
+ cleanup()
+ return nil, nil, fmt.Errorf("rewinding temporary on-disk layer: %w", err)
+ }
+ inputInfo.Digest = digester.Digest()
+ inputInfo.Size = written
+ return diskBlob, cleanup, nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/types/types.go b/vendor/github.com/containers/image/v5/internal/types/types.go
deleted file mode 100644
index 388f8cf3b49..00000000000
--- a/vendor/github.com/containers/image/v5/internal/types/types.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package types
-
-import (
- "context"
- "io"
-
- "github.com/containers/image/v5/docker/reference"
- publicTypes "github.com/containers/image/v5/types"
-)
-
-// ImageDestinationWithOptions is an internal extension to the ImageDestination
-// interface.
-type ImageDestinationWithOptions interface {
- publicTypes.ImageDestination
-
- // PutBlobWithOptions is a wrapper around PutBlob. If
- // options.LayerIndex is set, the blob will be committed directly.
- // Either by the calling goroutine or by another goroutine already
- // committing layers.
- //
- // Please note that TryReusingBlobWithOptions and PutBlobWithOptions
- // *must* be used the together. Mixing the two with non "WithOptions"
- // functions is not supported.
- PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo publicTypes.BlobInfo, options PutBlobOptions) (publicTypes.BlobInfo, error)
-
- // TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If
- // options.LayerIndex is set, the reused blob will be recoreded as
- // already pulled.
- //
- // Please note that TryReusingBlobWithOptions and PutBlobWithOptions
- // *must* be used the together. Mixing the two with non "WithOptions"
- // functions is not supported.
- TryReusingBlobWithOptions(ctx context.Context, blobinfo publicTypes.BlobInfo, options TryReusingBlobOptions) (bool, publicTypes.BlobInfo, error)
-}
-
-// PutBlobOptions are used in PutBlobWithOptions.
-type PutBlobOptions struct {
- // Cache to look up blob infos.
- Cache publicTypes.BlobInfoCache
- // Denotes whether the blob is a config or not.
- IsConfig bool
- // Indicates an empty layer.
- EmptyLayer bool
- // The corresponding index in the layer slice.
- LayerIndex *int
-}
-
-// TryReusingBlobOptions are used in TryReusingBlobWithOptions.
-type TryReusingBlobOptions struct {
- // Cache to look up blob infos.
- Cache publicTypes.BlobInfoCache
- // Use an equivalent of the desired blob.
- CanSubstitute bool
- // Indicates an empty layer.
- EmptyLayer bool
- // The corresponding index in the layer slice.
- LayerIndex *int
- // The reference of the image that contains the target blob.
- SrcRef reference.Named
-}
-
-// ImageSourceChunk is a portion of a blob.
-// This API is experimental and can be changed without bumping the major version number.
-type ImageSourceChunk struct {
- Offset uint64
- Length uint64
-}
-
-// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob.
-// This API is experimental and can be changed without bumping the major version number.
-type ImageSourceSeekable interface {
- // GetBlobAt returns a stream for the specified blob.
- // The specified chunks must be not overlapping and sorted by their offset.
- GetBlobAt(context.Context, publicTypes.BlobInfo, []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
-}
-
-// ImageDestinationPartial is a service to store a blob by requesting the missing chunks to a ImageSourceSeekable.
-// This API is experimental and can be changed without bumping the major version number.
-type ImageDestinationPartial interface {
- // PutBlobPartial writes contents of stream and returns data representing the result.
- PutBlobPartial(ctx context.Context, stream ImageSourceSeekable, srcInfo publicTypes.BlobInfo, cache publicTypes.BlobInfoCache) (publicTypes.BlobInfo, error)
-}
-
-// BadPartialRequestError is returned by ImageSourceSeekable.GetBlobAt on an invalid request.
-type BadPartialRequestError struct {
- Status string
-}
-
-func (e BadPartialRequestError) Error() string {
- return e.Status
-}
diff --git a/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go b/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go
new file mode 100644
index 00000000000..fe65b1a982f
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go
@@ -0,0 +1,38 @@
+package unparsedimage
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+)
+
+// wrapped provides the private.UnparsedImage operations
+// for an object that only implements types.UnparsedImage
+type wrapped struct {
+ types.UnparsedImage
+}
+
+// FromPublic(unparsed) returns an object that provides the private.UnparsedImage API
+func FromPublic(unparsed types.UnparsedImage) private.UnparsedImage {
+ if unparsed2, ok := unparsed.(private.UnparsedImage); ok {
+ return unparsed2
+ }
+ return &wrapped{
+ UnparsedImage: unparsed,
+ }
+}
+
+// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
+func (w *wrapped) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) {
+ sigs, err := w.Signatures(ctx)
+ if err != nil {
+ return nil, err
+ }
+ res := []signature.Signature{}
+ for _, sig := range sigs {
+ res = append(res, signature.SimpleSigningFromBlob(sig))
+ }
+ return res, nil
+}
diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go
index 4692211c0a7..5f352acc2f1 100644
--- a/vendor/github.com/containers/image/v5/manifest/common.go
+++ b/vendor/github.com/containers/image/v5/manifest/common.go
@@ -1,6 +1,7 @@
package manifest
import (
+ "encoding/json"
"fmt"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
@@ -32,6 +33,72 @@ func dupStringStringMap(m map[string]string) map[string]string {
return result
}
+// allowedManifestFields is a bit mask of “essential” manifest fields that validateUnambiguousManifestFormat
+// can expect to be present.
+type allowedManifestFields int
+
+const (
+ allowedFieldConfig allowedManifestFields = 1 << iota
+ allowedFieldFSLayers
+ allowedFieldHistory
+ allowedFieldLayers
+ allowedFieldManifests
+ allowedFieldFirstUnusedBit // Keep this at the end!
+)
+
+// validateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than
+// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields
+// other than the ones the caller specifically allows.
+// expectedMIMEType is used only for diagnostics.
+// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format
+// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous
+// data that just isn’t what was expected, as opposed to actually ambiguous data.
+func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
+ allowed allowedManifestFields) error {
+ if allowed >= allowedFieldFirstUnusedBit {
+ return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed)
+ }
+ // Use a private type to decode, not just a map[string]interface{}, because we want
+ // to also reject case-insensitive matches (which would be used by Go when really decoding
+ // the manifest).
+ // (It is expected that as manifest formats are added or extended over time, more fields will be added
+ // here.)
+ detectedFields := struct {
+ Config interface{} `json:"config"`
+ FSLayers interface{} `json:"fsLayers"`
+ History interface{} `json:"history"`
+ Layers interface{} `json:"layers"`
+ Manifests interface{} `json:"manifests"`
+ }{}
+ if err := json.Unmarshal(manifest, &detectedFields); err != nil {
+ // The caller was supposed to already validate version numbers, so this should not happen;
+ // let’s not bother with making this error “nice”.
+ return err
+ }
+ unexpected := []string{}
+ // Sadly this isn’t easy to automate in Go, without reflection. So, copy&paste.
+ if detectedFields.Config != nil && (allowed&allowedFieldConfig) == 0 {
+ unexpected = append(unexpected, "config")
+ }
+ if detectedFields.FSLayers != nil && (allowed&allowedFieldFSLayers) == 0 {
+ unexpected = append(unexpected, "fsLayers")
+ }
+ if detectedFields.History != nil && (allowed&allowedFieldHistory) == 0 {
+ unexpected = append(unexpected, "history")
+ }
+ if detectedFields.Layers != nil && (allowed&allowedFieldLayers) == 0 {
+ unexpected = append(unexpected, "layers")
+ }
+ if detectedFields.Manifests != nil && (allowed&allowedFieldManifests) == 0 {
+ unexpected = append(unexpected, "manifests")
+ }
+ if len(unexpected) != 0 {
+ return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`,
+ unexpected, expectedMIMEType)
+ }
+ return nil
+}
+
// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
func layerInfosToStrings(infos []LayerInfo) []string {
@@ -51,6 +118,18 @@ type compressionMIMETypeSet map[string]string
const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant
const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported”
+// findCompressionMIMETypeSet returns a pointer to a compressionMIMETypeSet in variantTable that contains a value of mimeType, or nil if not found
+func findCompressionMIMETypeSet(variantTable []compressionMIMETypeSet, mimeType string) compressionMIMETypeSet {
+ for _, variants := range variantTable {
+ for _, mt := range variants {
+ if mt == mimeType {
+ return variants
+ }
+ }
+ }
+ return nil
+}
+
// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil
// to mean "no compression"), based on variantTable.
// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants
@@ -63,29 +142,26 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
return "", fmt.Errorf("cannot update unknown MIME type")
}
- for _, variants := range variantTable {
- for _, mt := range variants {
- if mt == mimeType { // Found the variant
- name := mtsUncompressed
- if algorithm != nil {
- name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark()
- }
- if res, ok := variants[name]; ok {
- if res != mtsUnsupportedMIMEType {
- return res, nil
- }
- if name != mtsUncompressed {
- return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mt)}
- }
- return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)}
- }
- if name != mtsUncompressed {
- return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mt)}
- }
- // We can't very well say “the idea of no compression is unknown”
- return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)}
+ variants := findCompressionMIMETypeSet(variantTable, mimeType)
+ if variants != nil {
+ name := mtsUncompressed
+ if algorithm != nil {
+ name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark()
+ }
+ if res, ok := variants[name]; ok {
+ if res != mtsUnsupportedMIMEType {
+ return res, nil
}
+ if name != mtsUncompressed {
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mimeType)}
+ }
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
+ }
+ if name != mtsUncompressed {
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mimeType)}
}
+ // We can't very well say “the idea of no compression is unknown”
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
}
if algorithm != nil {
return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType)
@@ -142,3 +218,26 @@ type ManifestLayerCompressionIncompatibilityError struct {
func (m ManifestLayerCompressionIncompatibilityError) Error() string {
return m.text
}
+
+// compressionVariantsRecognizeMIMEType returns true if variantTable contains data about compressing/decompressing layers with mimeType
+// Note that the caller still needs to worry about a specific algorithm not being supported.
+func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet, mimeType string) bool {
+ if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
+ return false
+ }
+ variants := findCompressionMIMETypeSet(variantTable, mimeType)
+ return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm.
+}
+
+// imgInspectLayersFromLayerInfos converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
+// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
+func imgInspectLayersFromLayerInfos(infos []LayerInfo) []types.ImageInspectLayer {
+ layers := make([]types.ImageInspectLayer, len(infos))
+ for i, info := range infos {
+ layers[i].MIMEType = info.MediaType
+ layers[i].Digest = info.Digest
+ layers[i].Size = info.Size
+ layers[i].Annotations = info.Annotations
+ }
+ return layers
+}
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
index 8679cad1182..9df9bc6673e 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
@@ -2,6 +2,8 @@ package manifest
import (
"encoding/json"
+ "errors"
+ "fmt"
"regexp"
"strings"
"time"
@@ -10,7 +12,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/docker/docker/api/types/versions"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1.
@@ -58,7 +59,11 @@ func Schema1FromManifest(manifest []byte) (*Schema1, error) {
return nil, err
}
if s1.SchemaVersion != 1 {
- return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion)
+ return nil, fmt.Errorf("unsupported schema version %d", s1.SchemaVersion)
+ }
+ if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema1SignedMediaType,
+ allowedFieldFSLayers|allowedFieldHistory); err != nil {
+ return nil, err
}
if err := s1.initialize(); err != nil {
return nil, err
@@ -109,7 +114,7 @@ func (m *Schema1) initialize() error {
m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History))
for i, h := range m.History {
if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil {
- return errors.Wrapf(err, "parsing v2s1 history entry %d", i)
+ return fmt.Errorf("parsing v2s1 history entry %d: %w", i, err)
}
}
return nil
@@ -138,7 +143,7 @@ func (m *Schema1) LayerInfos() []LayerInfo {
func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
// Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well.
if len(m.FSLayers) != len(layerInfos) {
- return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos))
+ return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos))
}
m.FSLayers = make([]Schema1FSLayers, len(layerInfos))
for i, info := range layerInfos {
@@ -183,7 +188,7 @@ func (m *Schema1) fixManifestLayers() error {
for _, img := range m.ExtractedV1Compatibility {
// skip IDs that appear after each other, we handle those later
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
- return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
+ return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
}
lastID = img.ID
idmap[lastID] = struct{}{}
@@ -195,7 +200,7 @@ func (m *Schema1) fixManifestLayers() error {
m.History = append(m.History[:i], m.History[i+1:]...)
m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...)
} else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
- return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
+ return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
}
}
return nil
@@ -205,7 +210,7 @@ var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
func validateV1ID(id string) error {
if ok := validHex.MatchString(id); !ok {
- return errors.Errorf("image ID %q is invalid", id)
+ return fmt.Errorf("image ID %q is invalid", id)
}
return nil
}
@@ -216,13 +221,16 @@ func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageI
if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil {
return nil, err
}
+ layerInfos := m.LayerInfos()
i := &types.ImageInspectInfo{
Tag: m.Tag,
Created: &s1.Created,
DockerVersion: s1.DockerVersion,
Architecture: s1.Architecture,
Os: s1.OS,
- Layers: layerInfosToStrings(m.LayerInfos()),
+ Layers: layerInfosToStrings(layerInfos),
+ LayersData: imgInspectLayersFromLayerInfos(layerInfos),
+ Author: s1.Author,
}
if s1.Config != nil {
i.Labels = s1.Config.Labels
@@ -242,14 +250,14 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
config := []byte(m.History[0].V1Compatibility)
err := json.Unmarshal(config, &s1)
if err != nil {
- return nil, errors.Wrapf(err, "decoding configuration")
+ return nil, fmt.Errorf("decoding configuration: %w", err)
}
// Images created with versions prior to 1.8.3 require us to re-encode the encoded object,
// adding some fields that aren't "omitempty".
if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") {
config, err = json.Marshal(&s1)
if err != nil {
- return nil, errors.Wrapf(err, "re-encoding compat image config %#v", s1)
+ return nil, fmt.Errorf("re-encoding compat image config %#v: %w", s1, err)
}
}
// Build the history.
@@ -276,7 +284,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
raw := make(map[string]*json.RawMessage)
err = json.Unmarshal(config, &raw)
if err != nil {
- return nil, errors.Wrapf(err, "re-decoding compat image config %#v", s1)
+ return nil, fmt.Errorf("re-decoding compat image config %#v: %w", s1, err)
}
// Drop some fields.
delete(raw, "id")
@@ -288,20 +296,20 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
// Add the history and rootfs information.
rootfs, err := json.Marshal(rootFS)
if err != nil {
- return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err)
+ return nil, fmt.Errorf("error encoding rootfs information %#v: %v", rootFS, err)
}
rawRootfs := json.RawMessage(rootfs)
raw["rootfs"] = &rawRootfs
history, err := json.Marshal(convertedHistory)
if err != nil {
- return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err)
+ return nil, fmt.Errorf("error encoding history information %#v: %v", convertedHistory, err)
}
rawHistory := json.RawMessage(history)
raw["history"] = &rawHistory
// Encode the result.
config, err = json.Marshal(raw)
if err != nil {
- return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s1, err)
+ return nil, fmt.Errorf("error re-encoding compat image config %#v: %v", s1, err)
}
return config, nil
}
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
index 2711ca5ebcc..d9eca043bee 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
@@ -9,7 +9,6 @@ import (
"github.com/containers/image/v5/pkg/strslice"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
@@ -165,6 +164,10 @@ func Schema2FromManifest(manifest []byte) (*Schema2, error) {
if err := json.Unmarshal(manifest, &s2); err != nil {
return nil, err
}
+ if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema2MediaType,
+ allowedFieldConfig|allowedFieldLayers); err != nil {
+ return nil, err
+ }
// Check manifest's and layers' media types.
if err := SupportedSchema2MediaType(s2.MediaType); err != nil {
return nil, err
@@ -230,7 +233,7 @@ var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{
// CompressionAlgorithm that would result in anything other than gzip compression.
func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
if len(m.LayersDescriptors) != len(layerInfos) {
- return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos))
+ return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos))
}
original := m.LayersDescriptors
m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos))
@@ -242,7 +245,7 @@ func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
}
mimeType, err := updatedMIMEType(schema2CompressionMIMETypeSets, mimeType, info)
if err != nil {
- return errors.Wrapf(err, "preparing updated manifest, layer %q", info.Digest)
+ return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err)
}
m.LayersDescriptors[i].MediaType = mimeType
m.LayersDescriptors[i].Digest = info.Digest
@@ -268,6 +271,7 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t
if err := json.Unmarshal(config, s2); err != nil {
return nil, err
}
+ layerInfos := m.LayerInfos()
i := &types.ImageInspectInfo{
Tag: "",
Created: &s2.Created,
@@ -275,7 +279,9 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t
Architecture: s2.Architecture,
Variant: s2.Variant,
Os: s2.OS,
- Layers: layerInfosToStrings(m.LayerInfos()),
+ Layers: layerInfosToStrings(layerInfos),
+ LayersData: imgInspectLayersFromLayerInfos(layerInfos),
+ Author: s2.Author,
}
if s2.Config != nil {
i.Labels = s2.Config.Labels
@@ -291,3 +297,11 @@ func (m *Schema2) ImageID([]digest.Digest) (string, error) {
}
return m.ConfigDescriptor.Digest.Hex(), nil
}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion.
+func (m *Schema2) CanChangeLayerCompression(mimeType string) bool {
+ return compressionVariantsRecognizeMIMEType(schema2CompressionMIMETypeSets, mimeType)
+}
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
index 9ebb8d6b90c..7e4cc518358 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
@@ -8,7 +8,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
// Schema2PlatformSpec describes the platform which a particular manifest is
@@ -60,26 +59,26 @@ func (list *Schema2List) Instance(instanceDigest digest.Digest) (ListUpdate, err
}, nil
}
}
- return ListUpdate{}, errors.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
+ return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (list *Schema2List) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(list.Manifests) {
- return errors.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates))
+ return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
- return errors.Wrapf(err, "update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest", i+1, len(updates))
+ return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
list.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
- return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
+ return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
list.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
- return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
+ return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
}
list.Manifests[i].MediaType = updates[i].MediaType
}
@@ -91,7 +90,7 @@ func (list *Schema2List) UpdateInstances(updates []ListUpdate) error {
func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
- return "", errors.Wrapf(err, "getting platform information %#v", ctx)
+ return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range list.Manifests {
@@ -115,7 +114,7 @@ func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest
func (list *Schema2List) Serialize() ([]byte, error) {
buf, err := json.Marshal(list)
if err != nil {
- return nil, errors.Wrapf(err, "marshaling Schema2List %#v", list)
+ return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err)
}
return buf, nil
}
@@ -190,7 +189,11 @@ func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
Manifests: []Schema2ManifestDescriptor{},
}
if err := json.Unmarshal(manifest, &list); err != nil {
- return nil, errors.Wrapf(err, "unmarshaling Schema2List %q", string(manifest))
+ return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err)
+ }
+ if err := validateUnambiguousManifestFormat(manifest, DockerV2ListMediaType,
+ allowedFieldManifests); err != nil {
+ return nil, err
}
return &list, nil
}
diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go
index 4b644f2539a..53fc866a78a 100644
--- a/vendor/github.com/containers/image/v5/manifest/manifest.go
+++ b/vendor/github.com/containers/image/v5/manifest/manifest.go
@@ -4,6 +4,7 @@ import (
"encoding/json"
"fmt"
+ internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/libtrust"
digest "github.com/opencontainers/go-digest"
@@ -34,6 +35,10 @@ const (
DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
)
+// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
+// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
+type NonImageArtifactError = internalManifest.NonImageArtifactError
+
// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type.
func SupportedSchema2MediaType(m string) error {
switch m {
@@ -110,7 +115,8 @@ func GuessMIMEType(manifest []byte) string {
}
switch meta.MediaType {
- case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type.
+ case DockerV2Schema2MediaType, DockerV2ListMediaType,
+ imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type.
return meta.MediaType
}
// this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
@@ -121,9 +127,9 @@ func GuessMIMEType(manifest []byte) string {
}
return DockerV2Schema1MediaType
case 2:
- // best effort to understand if this is an OCI image since mediaType
- // isn't in the manifest for OCI anymore
- // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess.
+ // Best effort to understand if this is an OCI image since mediaType
+ // wasn't in the manifest for OCI image-spec < 1.0.2.
+ // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess.
ociMan := struct {
Config struct {
MediaType string `json:"mediaType"`
diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go
index 29a479c9427..0674eb47b80 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci.go
@@ -5,13 +5,13 @@ import (
"fmt"
"strings"
+ internalManifest "github.com/containers/image/v5/internal/manifest"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
ociencspec "github.com/containers/ocicrypt/spec"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
@@ -54,6 +54,10 @@ func OCI1FromManifest(manifest []byte) (*OCI1, error) {
if err := json.Unmarshal(manifest, &oci1); err != nil {
return nil, err
}
+ if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
+ allowedFieldConfig|allowedFieldLayers); err != nil {
+ return nil, err
+ }
return &oci1, nil
}
@@ -62,6 +66,7 @@ func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descript
return &OCI1{
imgspecv1.Manifest{
Versioned: specs.Versioned{SchemaVersion: 2},
+ MediaType: imgspecv1.MediaTypeImageManifest,
Config: config,
Layers: layers,
},
@@ -110,9 +115,15 @@ var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers)
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and
// CompressionAlgorithm that isn't supported by OCI.
+//
+// It’s generally the caller’s responsibility to determine whether a particular edit is acceptable, rather than relying on
+// failures of this function, because the layer is typically created _before_ UpdateLayerInfos is called, because UpdateLayerInfos needs
+// to know the final digest). See OCI1.CanChangeLayerCompression for some help in determining this; other aspects like compression
+// algorithms that might not be supported by a format, or the limited set of MIME types accepted for encryption, are not currently
+// handled — that logic should eventually also be provided as OCI1 methods, not hard-coded in callers.
func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
if len(m.Layers) != len(layerInfos) {
- return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
+ return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
}
original := m.Layers
m.Layers = make([]imgspecv1.Descriptor, len(layerInfos))
@@ -127,7 +138,7 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
}
mimeType, err := updatedMIMEType(oci1CompressionMIMETypeSets, mimeType, info)
if err != nil {
- return errors.Wrapf(err, "preparing updated manifest, layer %q", info.Digest)
+ return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err)
}
if info.CryptoOperation == types.Encrypt {
encMediaType, err := getEncryptedMediaType(mimeType)
@@ -146,6 +157,33 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
return nil
}
+// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
+// an error if the mediatype does not support encryption
+func getEncryptedMediaType(mediatype string) (string, error) {
+ for _, s := range strings.Split(mediatype, "+")[1:] {
+ if s == "encrypted" {
+ return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype)
+ }
+ }
+ unsuffixedMediatype := strings.Split(mediatype, "+")[0]
+ switch unsuffixedMediatype {
+ case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable:
+ return mediatype + "+encrypted", nil
+ }
+
+ return "", fmt.Errorf("unsupported mediaType to encrypt: %v", mediatype)
+}
+
+// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
+// an error if the mediatype does not support decryption
+func getDecryptedMediaType(mediatype string) (string, error) {
+ if !strings.HasSuffix(mediatype, "+encrypted") {
+ return "", fmt.Errorf("unsupported mediaType to decrypt %v:", mediatype)
+ }
+
+ return strings.TrimSuffix(mediatype, "+encrypted"), nil
+}
+
// Serialize returns the manifest in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (m *OCI1) Serialize() ([]byte, error) {
@@ -154,6 +192,14 @@ func (m *OCI1) Serialize() ([]byte, error) {
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
+ if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ // We could return at least the layers, but that’s already available in a better format via types.Image.LayerInfos.
+ // Most software calling this without human intervention is going to expect the values to be realistic and relevant,
+ // and is probably better served by failing; we can always re-visit that later if we fail now, but
+ // if we started returning some data for OCI artifacts now, we couldn’t start failing in this function later.
+ return nil, internalManifest.NewNonImageArtifactError(m.Config.MediaType)
+ }
+
config, err := configGetter(m.ConfigInfo())
if err != nil {
return nil, err
@@ -166,6 +212,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
if err := json.Unmarshal(config, d1); err != nil {
return nil, err
}
+ layerInfos := m.LayerInfos()
i := &types.ImageInspectInfo{
Tag: "",
Created: v1.Created,
@@ -173,43 +220,49 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
Labels: v1.Config.Labels,
Architecture: v1.Architecture,
Os: v1.OS,
- Layers: layerInfosToStrings(m.LayerInfos()),
+ Layers: layerInfosToStrings(layerInfos),
+ LayersData: imgInspectLayersFromLayerInfos(layerInfos),
Env: v1.Config.Env,
+ Author: v1.Author,
}
return i, nil
}
// ImageID computes an ID which can uniquely identify this image by its contents.
func (m *OCI1) ImageID([]digest.Digest) (string, error) {
+ // The way m.Config.Digest “uniquely identifies” an image is
+ // by containing RootFS.DiffIDs, which identify the layers of the image.
+ // For non-image artifacts, the we can’t expect the config to change
+ // any time the other layers (semantically) change, so this approach of
+ // distinguishing objects only by m.Config.Digest doesn’t work in general.
+ //
+ // Any caller of this method presumably wants to disambiguate the same
+ // images with a different representation, but doesn’t want to disambiguate
+ // representations (by using a manifest digest). So, submitting a non-image
+ // artifact to such a caller indicates an expectation mismatch.
+ // So, we just fail here instead of inventing some other ID value (e.g.
+ // by combining the config and blob layer digests). That still
+ // gives us the option to not fail, and return some value, in the future,
+ // without committing to that approach now.
+ // (The only known caller of ImageID is storage/storageImageDestination.computeID,
+ // which can’t work with non-image artifacts.)
+ if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return "", internalManifest.NewNonImageArtifactError(m.Config.MediaType)
+ }
+
if err := m.Config.Digest.Validate(); err != nil {
return "", err
}
return m.Config.Digest.Hex(), nil
}
-// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
-// an error if the mediatype does not support encryption
-func getEncryptedMediaType(mediatype string) (string, error) {
- for _, s := range strings.Split(mediatype, "+")[1:] {
- if s == "encrypted" {
- return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype)
- }
- }
- unsuffixedMediatype := strings.Split(mediatype, "+")[0]
- switch unsuffixedMediatype {
- case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable:
- return mediatype + "+encrypted", nil
- }
-
- return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype)
-}
-
-// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
-// an error if the mediatype does not support decryption
-func getDecryptedMediaType(mediatype string) (string, error) {
- if !strings.HasSuffix(mediatype, "+encrypted") {
- return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype)
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion.
+func (m *OCI1) CanChangeLayerCompression(mimeType string) bool {
+ if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return false
}
-
- return strings.TrimSuffix(mediatype, "+encrypted"), nil
+ return compressionVariantsRecognizeMIMEType(oci1CompressionMIMETypeSets, mimeType)
}
diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go
index 5b4111e4e24..726207b9d42 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci_index.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go
@@ -10,7 +10,6 @@ import (
"github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
// OCI1Index is just an alias for the OCI index type, but one which we can
@@ -44,26 +43,26 @@ func (index *OCI1Index) Instance(instanceDigest digest.Digest) (ListUpdate, erro
}, nil
}
}
- return ListUpdate{}, errors.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
+ return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(index.Manifests) {
- return errors.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates))
+ return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
- return errors.Wrapf(err, "update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest", i+1, len(updates))
+ return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
index.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
- return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
+ return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
index.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
- return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
+ return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
}
index.Manifests[i].MediaType = updates[i].MediaType
}
@@ -75,7 +74,7 @@ func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error {
func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
- return "", errors.Wrapf(err, "getting platform information %#v", ctx)
+ return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range index.Manifests {
@@ -108,7 +107,7 @@ func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest,
func (index *OCI1Index) Serialize() ([]byte, error) {
buf, err := json.Marshal(index)
if err != nil {
- return nil, errors.Wrapf(err, "marshaling OCI1Index %#v", index)
+ return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err)
}
return buf, nil
}
@@ -119,6 +118,7 @@ func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[
index := OCI1Index{
imgspecv1.Index{
Versioned: imgspec.Versioned{SchemaVersion: 2},
+ MediaType: imgspecv1.MediaTypeImageIndex,
Manifests: make([]imgspecv1.Descriptor, len(components)),
Annotations: dupStringStringMap(annotations),
},
@@ -195,12 +195,17 @@ func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
index := OCI1Index{
Index: imgspecv1.Index{
Versioned: imgspec.Versioned{SchemaVersion: 2},
+ MediaType: imgspecv1.MediaTypeImageIndex,
Manifests: []imgspecv1.Descriptor{},
Annotations: make(map[string]string),
},
}
if err := json.Unmarshal(manifest, &index); err != nil {
- return nil, errors.Wrapf(err, "unmarshaling OCI1Index %q", string(manifest))
+ return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err)
+ }
+ if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
+ allowedFieldManifests); err != nil {
+ return nil, err
}
return &index, nil
}
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
index 3d8738db536..f710be10b69 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
@@ -2,38 +2,49 @@ package archive
import (
"context"
+ "fmt"
"io"
"os"
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type ociArchiveImageDestination struct {
+ impl.Compat
+
ref ociArchiveReference
- unpackedDest types.ImageDestination
+ unpackedDest private.ImageDestination
tempDirRef tempDirOCIRef
}
// newImageDestination returns an ImageDestination for writing to an existing directory.
-func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) {
+func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (private.ImageDestination, error) {
tempDirRef, err := createOCIRef(sys, ref.image)
if err != nil {
- return nil, errors.Wrapf(err, "creating oci reference")
+ return nil, fmt.Errorf("creating oci reference: %w", err)
}
unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys)
if err != nil {
if err := tempDirRef.deleteTempDir(); err != nil {
- return nil, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory)
+ return nil, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)
}
return nil, err
}
- return &ociArchiveImageDestination{ref: ref,
- unpackedDest: unpackedDest,
- tempDirRef: tempDirRef}, nil
+ d := &ociArchiveImageDestination{
+ ref: ref,
+ unpackedDest: imagedestination.FromPublic(unpackedDest),
+ tempDirRef: tempDirRef,
+ }
+ d.Compat = impl.AddCompat(d)
+ return d, nil
}
// Reference returns the reference used to set up this destination.
@@ -87,29 +98,40 @@ func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool {
return false
}
-// PutBlob writes contents of stream and returns data representing the result.
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool {
+ return d.unpackedDest.SupportsPutBlobPartial()
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
-// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig)
+func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
+ return d.unpackedDest.PutBlobWithOptions(ctx, stream, inputInfo, options)
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
+ return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
- return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute)
+func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
+ return d.unpackedDest.TryReusingBlobWithOptions(ctx, info, options)
}
// PutManifest writes the manifest to the destination.
@@ -121,11 +143,12 @@ func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte,
return d.unpackedDest.PutManifest(ctx, m, instanceDigest)
}
-// PutSignatures writes a set of signatures to the destination.
+// PutSignaturesWithFormat writes a set of signatures to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
-func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- return d.unpackedDest.PutSignatures(ctx, signatures, instanceDigest)
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *ociArchiveImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ return d.unpackedDest.PutSignaturesWithFormat(ctx, signatures, instanceDigest)
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted
@@ -135,7 +158,7 @@ func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatur
// after the directory is made, it is tarred up into a file and the directory is deleted
func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil {
- return errors.Wrapf(err, "storing image %q", d.ref.image)
+ return fmt.Errorf("storing image %q: %w", d.ref.image, err)
}
// path of directory to tar up
@@ -150,13 +173,13 @@ func tarDirectory(src, dst string) error {
// input is a stream of bytes from the archive of the directory at path
input, err := archive.Tar(src, archive.Uncompressed)
if err != nil {
- return errors.Wrapf(err, "retrieving stream of bytes from %q", src)
+ return fmt.Errorf("retrieving stream of bytes from %q: %w", src, err)
}
// creates the tar file
outFile, err := os.Create(dst)
if err != nil {
- return errors.Wrapf(err, "creating tar file %q", dst)
+ return fmt.Errorf("creating tar file %q: %w", dst, err)
}
defer outFile.Close()
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go
index 20b392dc0e1..e5ad2570ef7 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go
@@ -2,40 +2,51 @@ package archive
import (
"context"
+ "errors"
+ "fmt"
"io"
+ "github.com/containers/image/v5/internal/imagesource"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
ocilayout "github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type ociArchiveImageSource struct {
+ impl.Compat
+
ref ociArchiveReference
- unpackedSrc types.ImageSource
+ unpackedSrc private.ImageSource
tempDirRef tempDirOCIRef
}
// newImageSource returns an ImageSource for reading from an existing directory.
// newImageSource untars the file and saves it in a temp directory
-func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) {
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (private.ImageSource, error) {
tempDirRef, err := createUntarTempDir(sys, ref)
if err != nil {
- return nil, errors.Wrap(err, "creating temp directory")
+ return nil, fmt.Errorf("creating temp directory: %w", err)
}
unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys)
if err != nil {
if err := tempDirRef.deleteTempDir(); err != nil {
- return nil, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory)
+ return nil, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)
}
return nil, err
}
- return &ociArchiveImageSource{ref: ref,
- unpackedSrc: unpackedSrc,
- tempDirRef: tempDirRef}, nil
+ s := &ociArchiveImageSource{
+ ref: ref,
+ unpackedSrc: imagesource.FromPublic(unpackedSrc),
+ tempDirRef: tempDirRef,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
}
// LoadManifestDescriptor loads the manifest
@@ -48,11 +59,11 @@ func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor,
func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
ociArchRef, ok := imgRef.(ociArchiveReference)
if !ok {
- return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference")
+ return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociArchiveReference")
}
tempDirRef, err := createUntarTempDir(sys, ociArchRef)
if err != nil {
- return imgspecv1.Descriptor{}, errors.Wrap(err, "creating temp directory")
+ return imgspecv1.Descriptor{}, fmt.Errorf("creating temp directory: %w", err)
}
defer func() {
err := tempDirRef.deleteTempDir()
@@ -61,7 +72,7 @@ func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.Im
descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted)
if err != nil {
- return imgspecv1.Descriptor{}, errors.Wrap(err, "loading index")
+ return imgspecv1.Descriptor{}, fmt.Errorf("loading index: %w", err)
}
return descriptor, nil
}
@@ -101,12 +112,26 @@ func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo
return s.unpackedSrc.GetBlob(ctx, info, cache)
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (s *ociArchiveImageSource) SupportsGetBlobAt() bool {
+ return s.unpackedSrc.SupportsGetBlobAt()
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (s *ociArchiveImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ return s.unpackedSrc.GetBlobAt(ctx, info, chunks)
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
-func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- return s.unpackedSrc.GetSignatures(ctx, instanceDigest)
+func (s *ociArchiveImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ return s.unpackedSrc.GetSignaturesWithFormat(ctx, instanceDigest)
}
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
index 54d325d34df..53371796fbf 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
@@ -2,21 +2,20 @@ package archive
import (
"context"
+ "errors"
"fmt"
- "io/ioutil"
"os"
"strings"
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/oci/internal"
ocilayout "github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
- "github.com/pkg/errors"
)
func init() {
@@ -123,11 +122,7 @@ func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(ctx, sys, ref)
- if err != nil {
- return nil, err
- }
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
@@ -144,7 +139,7 @@ func (ref ociArchiveReference) NewImageDestination(ctx context.Context, sys *typ
// DeleteImage deletes the named image from the registry, if supported.
func (ref ociArchiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("Deleting images not implemented for oci: images")
+ return errors.New("Deleting images not implemented for oci: images")
}
// struct to store the ociReference and temporary directory returned by createOCIRef
@@ -161,9 +156,9 @@ func (t *tempDirOCIRef) deleteTempDir() error {
// createOCIRef creates the oci reference of the image
// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files
func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
- dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
+ dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
if err != nil {
- return tempDirOCIRef{}, errors.Wrapf(err, "creating temp directory")
+ return tempDirOCIRef{}, fmt.Errorf("creating temp directory: %w", err)
}
ociRef, err := ocilayout.NewReference(dir, image)
if err != nil {
@@ -178,7 +173,7 @@ func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error)
func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) {
tempDirRef, err := createOCIRef(sys, ref.image)
if err != nil {
- return tempDirOCIRef{}, errors.Wrap(err, "creating oci reference")
+ return tempDirOCIRef{}, fmt.Errorf("creating oci reference: %w", err)
}
src := ref.resolvedFile
dst := tempDirRef.tempDirectory
@@ -190,9 +185,9 @@ func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (temp
defer arch.Close()
if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil {
if err := tempDirRef.deleteTempDir(); err != nil {
- return tempDirOCIRef{}, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory)
+ return tempDirOCIRef{}, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)
}
- return tempDirOCIRef{}, errors.Wrapf(err, "untarring file %q", tempDirRef.tempDirectory)
+ return tempDirOCIRef{}, fmt.Errorf("untarring file %q: %w", tempDirRef.tempDirectory, err)
}
return tempDirRef, nil
}
diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go
index c2012e50e02..148bc12fa3d 100644
--- a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go
+++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go
@@ -1,7 +1,8 @@
package internal
import (
- "github.com/pkg/errors"
+ "errors"
+ "fmt"
"path/filepath"
"regexp"
"runtime"
@@ -27,7 +28,7 @@ func ValidateImageName(image string) error {
var err error
if !refRegexp.MatchString(image) {
- err = errors.Errorf("Invalid image %s", image)
+ err = fmt.Errorf("Invalid image %s", image)
}
return err
}
@@ -72,11 +73,11 @@ func ValidateOCIPath(path string) error {
if runtime.GOOS == "windows" {
// On Windows we must allow for a ':' as part of the path
if strings.Count(path, ":") > 1 {
- return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path)
+ return fmt.Errorf("Invalid OCI reference: path %s contains more than one colon", path)
}
} else {
if strings.Contains(path, ":") {
- return errors.Errorf("Invalid OCI reference: path %s contains a colon", path)
+ return fmt.Errorf("Invalid OCI reference: path %s contains a colon", path)
}
}
return nil
@@ -96,7 +97,7 @@ func ValidateScope(scope string) error {
cleaned := filepath.Clean(scope)
if cleaned != scope {
- return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
+ return fmt.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
}
return nil
@@ -105,7 +106,7 @@ func ValidateScope(scope string) error {
func validateScopeWindows(scope string) error {
matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope))
if !matched {
- return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
+ return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
}
return nil
@@ -113,7 +114,7 @@ func validateScopeWindows(scope string) error {
func validateScopeNonWindows(scope string) error {
if !strings.HasPrefix(scope, "/") {
- return errors.Errorf("Invalid scope %s: must be an absolute path", scope)
+ return fmt.Errorf("Invalid scope %s: must be an absolute path", scope)
}
// Refuse also "/", otherwise "/" and "" would have the same semantics,
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
index d0ee7263527..4face7213c5 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
@@ -3,30 +3,37 @@ package layout
import (
"context"
"encoding/json"
+ "errors"
+ "fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
type ociImageDestination struct {
- ref ociReference
- index imgspecv1.Index
- sharedBlobDir string
- acceptUncompressedLayers bool
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.NoSignaturesInitialize
+
+ ref ociReference
+ index imgspecv1.Index
+ sharedBlobDir string
}
// newImageDestination returns an ImageDestination for writing to an existing directory.
-func newImageDestination(sys *types.SystemContext, ref ociReference) (types.ImageDestination, error) {
+func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) {
var index *imgspecv1.Index
if indexExists(ref) {
var err error
@@ -43,10 +50,32 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (types.Imag
}
}
- d := &ociImageDestination{ref: ref, index: *index}
+ desiredLayerCompression := types.Compress
+ if sys != nil && sys.OCIAcceptUncompressedLayers {
+ desiredLayerCompression = types.PreserveOriginal
+ }
+
+ d := &ociImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{
+ imgspecv1.MediaTypeImageManifest,
+ imgspecv1.MediaTypeImageIndex,
+ },
+ DesiredLayerCompression: desiredLayerCompression,
+ AcceptsForeignLayerURLs: true,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil.
+ HasThreadSafePutBlob: true,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+ NoSignaturesInitialize: stubs.NoSignatures("Pushing signatures for OCI images is not supported"),
+
+ ref: ref,
+ index: *index,
+ }
+ d.Compat = impl.AddCompat(d)
if sys != nil {
d.sharedBlobDir = sys.OCISharedBlobDirPath
- d.acceptUncompressedLayers = sys.OCIAcceptUncompressedLayers
}
if err := ensureDirectoryExists(d.ref.dir); err != nil {
@@ -72,59 +101,15 @@ func (d *ociImageDestination) Close() error {
return nil
}
-func (d *ociImageDestination) SupportedManifestMIMETypes() []string {
- return []string{
- imgspecv1.MediaTypeImageManifest,
- imgspecv1.MediaTypeImageIndex,
- }
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *ociImageDestination) SupportsSignatures(ctx context.Context) error {
- return errors.Errorf("Pushing signatures for OCI images is not supported")
-}
-
-func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression {
- if d.acceptUncompressedLayers {
- return types.PreserveOriginal
- }
- return types.Compress
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *ociImageDestination) AcceptsForeignLayerURLs() bool {
- return true
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *ociImageDestination) MustMatchRuntimeOS() bool {
- return false
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool {
- return false // N/A, DockerReference() returns nil.
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *ociImageDestination) HasThreadSafePutBlob() bool {
- return false
-}
-
-// PutBlob writes contents of stream and returns data representing the result.
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
-// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
+func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
+ blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
if err != nil {
return types.BlobInfo{}, err
}
@@ -147,7 +132,7 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
}
blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
- return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
+ return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err
@@ -181,18 +166,16 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
return types.BlobInfo{Digest: blobDigest, Size: size}, nil
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
}
blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
if err != nil {
@@ -238,7 +221,7 @@ func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanc
if err := ensureParentDirectoryExists(blobPath); err != nil {
return err
}
- if err := ioutil.WriteFile(blobPath, m, 0644); err != nil {
+ if err := os.WriteFile(blobPath, m, 0644); err != nil {
return err
}
@@ -291,16 +274,6 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) {
d.index.Manifests = append(d.index.Manifests, *desc)
}
-// PutSignatures would add the given signatures to the oci layout (currently not supported).
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
-// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
-func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- if len(signatures) != 0 {
- return errors.Errorf("Pushing signatures for OCI images is not supported")
- }
- return nil
-}
-
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
@@ -309,14 +282,14 @@ func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error {
- if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
+ if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
return err
}
indexJSON, err := json.Marshal(d.index)
if err != nil {
return err
}
- return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644)
+ return os.WriteFile(d.ref.indexPath(), indexJSON, 0644)
}
func ensureDirectoryExists(path string) error {
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
index 55d3f637a21..b2d963b0192 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
@@ -2,22 +2,32 @@ package layout
import (
"context"
+ "errors"
+ "fmt"
"io"
- "io/ioutil"
"net/http"
+ "net/url"
"os"
"strconv"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types"
"github.com/docker/go-connections/tlsconfig"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
type ociImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
ref ociReference
index *imgspecv1.Index
descriptor imgspecv1.Descriptor
@@ -26,7 +36,7 @@ type ociImageSource struct {
}
// newImageSource returns an ImageSource for reading from an existing directory.
-func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSource, error) {
+func newImageSource(sys *types.SystemContext, ref ociReference) (private.ImageSource, error) {
tr := tlsclientconfig.NewTransport()
tr.TLSClientConfig = tlsconfig.ServerDefault()
@@ -47,12 +57,23 @@ func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSour
if err != nil {
return nil, err
}
- d := &ociImageSource{ref: ref, index: index, descriptor: descriptor, client: client}
+ s := &ociImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ ref: ref,
+ index: index,
+ descriptor: descriptor,
+ client: client,
+ }
if sys != nil {
// TODO(jonboulle): check dir existence?
- d.sharedBlobDir = sys.OCISharedBlobDirPath
+ s.sharedBlobDir = sys.OCISharedBlobDirPath
}
- return d, nil
+ s.Compat = impl.AddCompat(s)
+ return s, nil
}
// Reference returns the reference used to set up this source.
@@ -92,7 +113,7 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
return nil, "", err
}
- m, err := ioutil.ReadFile(manifestPath)
+ m, err := os.ReadFile(manifestPath)
if err != nil {
return nil, "", err
}
@@ -103,17 +124,17 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
return m, mimeType, nil
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *ociImageSource) HasThreadSafeGetBlob() bool {
- return false
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if len(info.URLs) != 0 {
- return s.getExternalBlob(ctx, info.URLs)
+ r, s, err := s.getExternalBlob(ctx, info.URLs)
+ if err != nil {
+ return nil, 0, err
+ } else if r != nil {
+ return r, s, nil
+ }
}
path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir)
@@ -132,58 +153,48 @@ func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
return r, fi.Size(), nil
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- return [][]byte{}, nil
-}
-
+// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty.
+// This function can return nil reader when no url is supported by this function. In this case, the caller
+// should fallback to fetch the non-external blob (i.e. pull from the registry).
func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
if len(urls) == 0 {
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
}
errWrap := errors.New("failed fetching external blob from all urls")
- for _, url := range urls {
-
- req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
+ hasSupportedURL := false
+ for _, u := range urls {
+ if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") {
+ continue // unsupported url. skip this url.
+ }
+ hasSupportedURL = true
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
if err != nil {
- errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error())
+ errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap)
continue
}
resp, err := s.client.Do(req)
if err != nil {
- errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error())
+ errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap)
continue
}
if resp.StatusCode != http.StatusOK {
resp.Body.Close()
- errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", url)
+ errWrap = fmt.Errorf("fetching %s failed, response code not 200: %w", u, errWrap)
continue
}
return resp.Body, getBlobSize(resp), nil
}
+ if !hasSupportedURL {
+ return nil, 0, nil // fallback to non-external blob
+ }
return nil, 0, errWrap
}
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *ociImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
-}
-
func getBlobSize(resp *http.Response) int64 {
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
index a99b631584d..be22bed6d5f 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
@@ -3,6 +3,7 @@ package layout
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -10,13 +11,12 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/oci/internal"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
)
func init() {
@@ -154,11 +154,7 @@ func (ref ociReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(sys, ref)
- if err != nil {
- return nil, err
- }
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together
@@ -219,7 +215,7 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
ociRef, ok := imgRef.(ociReference)
if !ok {
- return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociRef")
+ return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociRef")
}
return ociRef.getManifestDescriptor()
}
@@ -238,7 +234,7 @@ func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.Syst
// DeleteImage deletes the named image from the registry, if supported.
func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("Deleting images not implemented for oci: images")
+ return errors.New("Deleting images not implemented for oci: images")
}
// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions.
@@ -254,7 +250,7 @@ func (ref ociReference) indexPath() string {
// blobPath returns a path for a blob within a directory using OCI image-layout conventions.
func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) {
if err := digest.Validate(); err != nil {
- return "", errors.Wrapf(err, "unexpected digest reference %s", digest)
+ return "", fmt.Errorf("unexpected digest reference %s: %w", digest, err)
}
blobDir := filepath.Join(ref.dir, "blobs")
if sharedBlobDir != "" {
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
index 4ffbced6bd6..42e8970a07b 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
@@ -4,8 +4,8 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/json"
+ "errors"
"fmt"
- "io/ioutil"
"net"
"net/http"
"net/url"
@@ -19,7 +19,6 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/ghodss/yaml"
"github.com/imdario/mergo"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/http2"
)
@@ -333,7 +332,7 @@ var (
errEmptyCluster = errors.New("cluster has no server defined")
)
-//helper for checking certificate/key/CA
+// helper for checking certificate/key/CA
func validateFileIsReadable(name string) error {
answer, err := os.Open(name)
defer func() {
@@ -355,19 +354,19 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []err
if len(clusterInfo.Server) == 0 {
if len(clusterName) == 0 {
- validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined"))
+ validationErrors = append(validationErrors, errors.New("default cluster has no server defined"))
} else {
- validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName))
+ validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName))
}
}
// Make sure CA data and CA file aren't both specified
if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 {
- validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName))
+ validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName))
}
if len(clusterInfo.CertificateAuthority) != 0 {
err := validateFileIsReadable(clusterInfo.CertificateAuthority)
if err != nil {
- validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
}
}
@@ -391,34 +390,34 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error {
if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 {
// Make sure cert data and file aren't both specified
if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 {
- validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName))
+ validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName))
}
// Make sure key data and file aren't both specified
if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 {
- validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
+ validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
}
// Make sure a key is specified
if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 {
- validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName))
+ validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName))
}
if len(authInfo.ClientCertificate) != 0 {
err := validateFileIsReadable(authInfo.ClientCertificate)
if err != nil {
- validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
}
}
if len(authInfo.ClientKey) != 0 {
err := validateFileIsReadable(authInfo.ClientKey)
if err != nil {
- validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
}
}
}
// authPath also provides information for the client to identify the server, so allow multiple auth methods in that case
if (len(methods) > 1) && (!usingAuthPath) {
- validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
+ validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
}
return validationErrors
@@ -546,8 +545,10 @@ type clientConfigLoadingRules struct {
// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load
// Load starts by running the MigrationRules and then
// takes the loading rules and returns a Config object based on following rules.
-// if the ExplicitPath, return the unmerged explicit file
-// Otherwise, return a merged config based on the Precedence slice
+//
+// - if the ExplicitPath, return the unmerged explicit file
+// - Otherwise, return a merged config based on the Precedence slice
+//
// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored.
// Read errors or files with non-deserializable content produce errors.
// The first file to set a particular map key wins and map key's value is never changed.
@@ -579,7 +580,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
continue
}
if err != nil {
- errlist = append(errlist, errors.Wrapf(err, "loading config file \"%s\"", filename))
+ errlist = append(errlist, fmt.Errorf("loading config file \"%s\": %w", filename, err))
continue
}
@@ -625,7 +626,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile
// LoadFromFile takes a filename and deserializes the contents into Config object
func loadFromFile(filename string) (*clientcmdConfig, error) {
- kubeconfigBytes, err := ioutil.ReadFile(filename)
+ kubeconfigBytes, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
@@ -692,7 +693,7 @@ func resolveLocalPaths(config *clientcmdConfig) error {
}
base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
if err != nil {
- return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin)
+ return fmt.Errorf("Could not determine the absolute path of config file %s: %w", cluster.LocationOfOrigin, err)
}
if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil {
@@ -705,7 +706,7 @@ func resolveLocalPaths(config *clientcmdConfig) error {
}
base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
if err != nil {
- return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin)
+ return fmt.Errorf("Could not determine the absolute path of config file %s: %w", authInfo.LocationOfOrigin, err)
}
if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil {
@@ -775,7 +776,7 @@ func restClientFor(config *restConfig) (*url.URL, *http.Client, error) {
// Kubernetes API.
func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
if host == "" {
- return nil, errors.Errorf("host must be a URL or a host:port pair")
+ return nil, errors.New("host must be a URL or a host:port pair")
}
base := host
hostURL, err := url.Parse(base)
@@ -792,7 +793,7 @@ func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
return nil, err
}
if hostURL.Path != "" && hostURL.Path != "/" {
- return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base)
+ return nil, fmt.Errorf("host must be a URL or a host:port pair: %q", base)
}
}
@@ -862,7 +863,7 @@ func transportNew(config *restConfig) (http.RoundTripper, error) {
// REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains.
if len(config.Username) != 0 && len(config.BearerToken) != 0 {
- return nil, errors.Errorf("username/password or bearer token may be set, but not both")
+ return nil, errors.New("username/password or bearer token may be set, but not both")
}
return rt, nil
@@ -955,7 +956,7 @@ func tlsConfigFor(c *restConfig) (*tls.Config, error) {
return nil, nil
}
if c.HasCA() && c.Insecure {
- return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed")
+ return nil, errors.New("specifying a root certificates file with the insecure flag is not allowed")
}
if err := loadTLSFiles(c); err != nil {
return nil, err
@@ -1013,7 +1014,7 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
return data, nil
}
if len(file) > 0 {
- fileData, err := ioutil.ReadFile(file)
+ fileData, err := os.ReadFile(file)
if err != nil {
return []byte{}, err
}
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go
index c7c6cf6945a..b2e4dfd9e8a 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift.go
@@ -3,22 +3,17 @@ package openshift
import (
"bytes"
"context"
- "crypto/rand"
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
- "github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
- "github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/types"
"github.com/containers/image/v5/version"
- "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -95,7 +90,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
req.Header.Set("Content-Type", "application/json")
}
- logrus.Debugf("%s %s", method, url.String())
+ logrus.Debugf("%s %s", method, url.Redacted())
res, err := c.httpClient.Do(req)
if err != nil {
return nil, err
@@ -126,7 +121,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
if statusValid {
return nil, errors.New(status.Message)
}
- return nil, errors.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body))
+ return nil, fmt.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body))
}
return body, nil
@@ -153,368 +148,11 @@ func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName str
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
parts := strings.SplitN(ref, "/", 2)
if len(parts) != 2 {
- return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref)
+ return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
}
return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil
}
-type openshiftImageSource struct {
- client *openshiftClient
- // Values specific to this image
- sys *types.SystemContext
- // State
- docker types.ImageSource // The docker/distribution API endpoint, or nil if not resolved yet
- imageStreamImageName string // Resolved image identifier, or "" if not known yet
-}
-
-// newImageSource creates a new ImageSource for the specified reference.
-// The caller must call .Close() on the returned ImageSource.
-func newImageSource(sys *types.SystemContext, ref openshiftReference) (types.ImageSource, error) {
- client, err := newOpenshiftClient(ref)
- if err != nil {
- return nil, err
- }
-
- return &openshiftImageSource{
- client: client,
- sys: sys,
- }, nil
-}
-
-// Reference returns the reference used to set up this source, _as specified by the user_
-// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
-func (s *openshiftImageSource) Reference() types.ImageReference {
- return s.client.ref
-}
-
-// Close removes resources associated with an initialized ImageSource, if any.
-func (s *openshiftImageSource) Close() error {
- if s.docker != nil {
- err := s.docker.Close()
- s.docker = nil
-
- return err
- }
-
- return nil
-}
-
-// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
-// It may use a remote (= slow) service.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
-// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
-func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
- if err := s.ensureImageIsResolved(ctx); err != nil {
- return nil, "", err
- }
- return s.docker.GetManifest(ctx, instanceDigest)
-}
-
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *openshiftImageSource) HasThreadSafeGetBlob() bool {
- return false
-}
-
-// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
-// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
-func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
- if err := s.ensureImageIsResolved(ctx); err != nil {
- return nil, 0, err
- }
- return s.docker.GetBlob(ctx, info, cache)
-}
-
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- var imageStreamImageName string
- if instanceDigest == nil {
- if err := s.ensureImageIsResolved(ctx); err != nil {
- return nil, err
- }
- imageStreamImageName = s.imageStreamImageName
- } else {
- imageStreamImageName = instanceDigest.String()
- }
- image, err := s.client.getImage(ctx, imageStreamImageName)
- if err != nil {
- return nil, err
- }
- var sigs [][]byte
- for _, sig := range image.Signatures {
- if sig.Type == imageSignatureTypeAtomic {
- sigs = append(sigs, sig.Content)
- }
- }
- return sigs, nil
-}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
-}
-
-// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
-func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error {
- if s.docker != nil {
- return nil
- }
-
- // FIXME: validate components per validation.IsValidPathSegmentName?
- path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream)
- body, err := s.client.doRequest(ctx, http.MethodGet, path, nil)
- if err != nil {
- return err
- }
- // Note: This does absolutely no kind/version checking or conversions.
- var is imageStream
- if err := json.Unmarshal(body, &is); err != nil {
- return err
- }
- var te *tagEvent
- for _, tag := range is.Status.Tags {
- if tag.Tag != s.client.ref.dockerReference.Tag() {
- continue
- }
- if len(tag.Items) > 0 {
- te = &tag.Items[0]
- break
- }
- }
- if te == nil {
- return errors.Errorf("No matching tag found")
- }
- logrus.Debugf("tag event %#v", te)
- dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference)
- if err != nil {
- return err
- }
- logrus.Debugf("Resolved reference %#v", dockerRefString)
- dockerRef, err := docker.ParseReference("//" + dockerRefString)
- if err != nil {
- return err
- }
- d, err := dockerRef.NewImageSource(ctx, s.sys)
- if err != nil {
- return err
- }
- s.docker = d
- s.imageStreamImageName = te.Image
- return nil
-}
-
-type openshiftImageDestination struct {
- client *openshiftClient
- docker types.ImageDestination // The docker/distribution API endpoint
- // State
- imageStreamImageName string // "" if not yet known
-}
-
-// newImageDestination creates a new ImageDestination for the specified reference.
-func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) {
- client, err := newOpenshiftClient(ref)
- if err != nil {
- return nil, err
- }
-
- // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match,
- // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know
- // the manifest digest at this point.
- dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag())
- dockerRef, err := docker.ParseReference(dockerRefString)
- if err != nil {
- return nil, err
- }
- docker, err := dockerRef.NewImageDestination(ctx, sys)
- if err != nil {
- return nil, err
- }
-
- return &openshiftImageDestination{
- client: client,
- docker: docker,
- }, nil
-}
-
-// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
-// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
-func (d *openshiftImageDestination) Reference() types.ImageReference {
- return d.client.ref
-}
-
-// Close removes resources associated with an initialized ImageDestination, if any.
-func (d *openshiftImageDestination) Close() error {
- return d.docker.Close()
-}
-
-func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string {
- return d.docker.SupportedManifestMIMETypes()
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *openshiftImageDestination) SupportsSignatures(ctx context.Context) error {
- return nil
-}
-
-func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression {
- return types.Compress
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool {
- return true
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *openshiftImageDestination) MustMatchRuntimeOS() bool {
- return false
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
- return d.docker.IgnoresEmbeddedDockerReference()
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *openshiftImageDestination) HasThreadSafePutBlob() bool {
- return false
-}
-
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
-// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
-// inputInfo.Size is the expected length of stream, if known.
-// May update cache.
-// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
-// to any other readers for download using the supplied digest.
-// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig)
-}
-
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
-// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
-// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
-// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
-// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
-// reflected in the manifest that will be written.
-// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
- return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute)
-}
-
-// PutManifest writes manifest to the destination.
-// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
-// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
-// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
-func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
- if instanceDigest == nil {
- manifestDigest, err := manifest.Digest(m)
- if err != nil {
- return err
- }
- d.imageStreamImageName = manifestDigest.String()
- }
- return d.docker.PutManifest(ctx, m, instanceDigest)
-}
-
-func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- var imageStreamImageName string
- if instanceDigest == nil {
- if d.imageStreamImageName == "" {
- return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures")
- }
- imageStreamImageName = d.imageStreamImageName
- } else {
- imageStreamImageName = instanceDigest.String()
- }
-
- // Because image signatures are a shared resource in Atomic Registry, the default upload
- // always adds signatures. Eventually we should also allow removing signatures.
-
- if len(signatures) == 0 {
- return nil // No need to even read the old state.
- }
-
- image, err := d.client.getImage(ctx, imageStreamImageName)
- if err != nil {
- return err
- }
- existingSigNames := map[string]struct{}{}
- for _, sig := range image.Signatures {
- existingSigNames[sig.objectMeta.Name] = struct{}{}
- }
-
-sigExists:
- for _, newSig := range signatures {
- for _, existingSig := range image.Signatures {
- if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
- continue sigExists
- }
- }
-
- // The API expect us to invent a new unique name. This is racy, but hopefully good enough.
- var signatureName string
- for {
- randBytes := make([]byte, 16)
- n, err := rand.Read(randBytes)
- if err != nil || n != 16 {
- return errors.Wrapf(err, "generating random signature len %d", n)
- }
- signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes)
- if _, ok := existingSigNames[signatureName]; !ok {
- break
- }
- }
- // Note: This does absolutely no kind/version checking or conversions.
- sig := imageSignature{
- typeMeta: typeMeta{
- Kind: "ImageSignature",
- APIVersion: "v1",
- },
- objectMeta: objectMeta{Name: signatureName},
- Type: imageSignatureTypeAtomic,
- Content: newSig,
- }
- body, err := json.Marshal(sig)
- if err != nil {
- return err
- }
- _, err = d.client.doRequest(ctx, http.MethodPost, "/oapi/v1/imagesignatures", body)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
-// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
-// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
-// original manifest list digest, if desired.
-// WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
- return d.docker.Commit(ctx, unparsedToplevel)
-}
-
// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies.
type imageStream struct {
Status imageStreamStatus `json:"status,omitempty"`
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go
new file mode 100644
index 00000000000..d5dbaf27eb0
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go
@@ -0,0 +1,247 @@
+package openshift
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+type openshiftImageDestination struct {
+ impl.Compat
+ stubs.AlwaysSupportsSignatures
+
+ client *openshiftClient
+ docker private.ImageDestination // The docker/distribution API endpoint
+ // State
+ imageStreamImageName string // "" if not yet known
+}
+
+// newImageDestination creates a new ImageDestination for the specified reference.
+func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (private.ImageDestination, error) {
+ client, err := newOpenshiftClient(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match,
+ // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know
+ // the manifest digest at this point.
+ dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag())
+ dockerRef, err := docker.ParseReference(dockerRefString)
+ if err != nil {
+ return nil, err
+ }
+ docker, err := dockerRef.NewImageDestination(ctx, sys)
+ if err != nil {
+ return nil, err
+ }
+
+ d := &openshiftImageDestination{
+ client: client,
+ docker: imagedestination.FromPublic(docker),
+ }
+ d.Compat = impl.AddCompat(d)
+ return d, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *openshiftImageDestination) Reference() types.ImageReference {
+ return d.client.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *openshiftImageDestination) Close() error {
+ return d.docker.Close()
+}
+
+func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string {
+ return d.docker.SupportedManifestMIMETypes()
+}
+
+func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression {
+ return types.Compress
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool {
+ return true
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
+func (d *openshiftImageDestination) MustMatchRuntimeOS() bool {
+ return false
+}
+
+// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+// and would prefer to receive an unmodified manifest instead of one modified for the destination.
+// Does not make a difference if Reference().DockerReference() is nil.
+func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
+ return d.docker.IgnoresEmbeddedDockerReference()
+}
+
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (d *openshiftImageDestination) HasThreadSafePutBlob() bool {
+ return false
+}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (d *openshiftImageDestination) SupportsPutBlobPartial() bool {
+ return d.docker.SupportsPutBlobPartial()
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
+ return d.docker.PutBlobWithOptions(ctx, stream, inputInfo, options)
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
+ return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+// reflected in the manifest that will be written.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
+ return d.docker.TryReusingBlobWithOptions(ctx, info, options)
+}
+
+// PutManifest writes manifest to the destination.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
+ if instanceDigest == nil {
+ manifestDigest, err := manifest.Digest(m)
+ if err != nil {
+ return err
+ }
+ d.imageStreamImageName = manifestDigest.String()
+ }
+ return d.docker.PutManifest(ctx, m, instanceDigest)
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ var imageStreamImageName string
+ if instanceDigest == nil {
+ if d.imageStreamImageName == "" {
+ return errors.New("Internal error: Unknown manifest digest, can't add signatures")
+ }
+ imageStreamImageName = d.imageStreamImageName
+ } else {
+ imageStreamImageName = instanceDigest.String()
+ }
+
+ // Because image signatures are a shared resource in Atomic Registry, the default upload
+ // always adds signatures. Eventually we should also allow removing signatures.
+
+ if len(signatures) == 0 {
+ return nil // No need to even read the old state.
+ }
+
+ image, err := d.client.getImage(ctx, imageStreamImageName)
+ if err != nil {
+ return err
+ }
+ existingSigNames := map[string]struct{}{}
+ for _, sig := range image.Signatures {
+ existingSigNames[sig.objectMeta.Name] = struct{}{}
+ }
+
+sigExists:
+ for _, newSigWithFormat := range signatures {
+ newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
+ if !ok {
+ return signature.UnsupportedFormatError(newSigWithFormat)
+ }
+ newSig := newSigSimple.UntrustedSignature()
+
+ for _, existingSig := range image.Signatures {
+ if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
+ continue sigExists
+ }
+ }
+
+ // The API expect us to invent a new unique name. This is racy, but hopefully good enough.
+ var signatureName string
+ for {
+ randBytes := make([]byte, 16)
+ n, err := rand.Read(randBytes)
+ if err != nil || n != 16 {
+ return fmt.Errorf("generating random signature len %d: %w", n, err)
+ }
+ signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes)
+ if _, ok := existingSigNames[signatureName]; !ok {
+ break
+ }
+ }
+ // Note: This does absolutely no kind/version checking or conversions.
+ sig := imageSignature{
+ typeMeta: typeMeta{
+ Kind: "ImageSignature",
+ APIVersion: "v1",
+ },
+ objectMeta: objectMeta{Name: signatureName},
+ Type: imageSignatureTypeAtomic,
+ Content: newSig,
+ }
+ body, err := json.Marshal(sig)
+ if err != nil {
+ return err
+ }
+ _, err = d.client.doRequest(ctx, http.MethodPost, "/oapi/v1/imagesignatures", body)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+// original manifest list digest, if desired.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ return d.docker.Commit(ctx, unparsedToplevel)
+}
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_src.go b/vendor/github.com/containers/image/v5/openshift/openshift_src.go
new file mode 100644
index 00000000000..93ba8d10e30
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/openshift/openshift_src.go
@@ -0,0 +1,173 @@
+package openshift
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+type openshiftImageSource struct {
+ impl.Compat
+ impl.DoesNotAffectLayerInfosForCopy
+ // This is slightly suboptimal. We could forward GetBlobAt(), but we need to call ensureImageIsResolved in SupportsGetBlobAt(),
+ // and that method doesn’t provide a context for timing out. That could actually be fixed (SupportsGetBlobAt is private and we
+ // can change it), but this is a deprecated transport anyway, so for now we just punt.
+ stubs.NoGetBlobAtInitialize
+
+ client *openshiftClient
+ // Values specific to this image
+ sys *types.SystemContext
+ // State
+ docker types.ImageSource // The docker/distribution API endpoint, or nil if not resolved yet
+ imageStreamImageName string // Resolved image identifier, or "" if not known yet
+}
+
+// newImageSource creates a new ImageSource for the specified reference.
+// The caller must call .Close() on the returned ImageSource.
+func newImageSource(sys *types.SystemContext, ref openshiftReference) (private.ImageSource, error) {
+ client, err := newOpenshiftClient(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ s := &openshiftImageSource{
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ client: client,
+ sys: sys,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (s *openshiftImageSource) Reference() types.ImageReference {
+ return s.client.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *openshiftImageSource) Close() error {
+ if s.docker != nil {
+ err := s.docker.Close()
+ s.docker = nil
+
+ return err
+ }
+
+ return nil
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if err := s.ensureImageIsResolved(ctx); err != nil {
+ return nil, "", err
+ }
+ return s.docker.GetManifest(ctx, instanceDigest)
+}
+
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *openshiftImageSource) HasThreadSafeGetBlob() bool {
+ return false
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ if err := s.ensureImageIsResolved(ctx); err != nil {
+ return nil, 0, err
+ }
+ return s.docker.GetBlob(ctx, info, cache)
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *openshiftImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ var imageStreamImageName string
+ if instanceDigest == nil {
+ if err := s.ensureImageIsResolved(ctx); err != nil {
+ return nil, err
+ }
+ imageStreamImageName = s.imageStreamImageName
+ } else {
+ imageStreamImageName = instanceDigest.String()
+ }
+ image, err := s.client.getImage(ctx, imageStreamImageName)
+ if err != nil {
+ return nil, err
+ }
+ var sigs []signature.Signature
+ for _, sig := range image.Signatures {
+ if sig.Type == imageSignatureTypeAtomic {
+ sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content))
+ }
+ }
+ return sigs, nil
+}
+
+// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
+func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error {
+ if s.docker != nil {
+ return nil
+ }
+
+ // FIXME: validate components per validation.IsValidPathSegmentName?
+ path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream)
+ body, err := s.client.doRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return err
+ }
+ // Note: This does absolutely no kind/version checking or conversions.
+ var is imageStream
+ if err := json.Unmarshal(body, &is); err != nil {
+ return err
+ }
+ var te *tagEvent
+ for _, tag := range is.Status.Tags {
+ if tag.Tag != s.client.ref.dockerReference.Tag() {
+ continue
+ }
+ if len(tag.Items) > 0 {
+ te = &tag.Items[0]
+ break
+ }
+ }
+ if te == nil {
+ return errors.New("No matching tag found")
+ }
+ logrus.Debugf("tag event %#v", te)
+ dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("Resolved reference %#v", dockerRefString)
+ dockerRef, err := docker.ParseReference("//" + dockerRefString)
+ if err != nil {
+ return err
+ }
+ d, err := dockerRef.NewImageSource(ctx, s.sys)
+ if err != nil {
+ return err
+ }
+ s.docker = d
+ s.imageStreamImageName = te.Image
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go
index 6bbb43be283..f7971a48f5f 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go
@@ -2,16 +2,16 @@ package openshift
import (
"context"
+ "errors"
"fmt"
"regexp"
"strings"
"github.com/containers/image/v5/docker/policyconfiguration"
"github.com/containers/image/v5/docker/reference"
- genericImage "github.com/containers/image/v5/image"
+ genericImage "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
func init() {
@@ -43,7 +43,7 @@ var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$")
// scope passed to this function will not be "", that value is always allowed.
func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error {
if scopeRegexp.FindStringIndex(scope) == nil {
- return errors.Errorf("Invalid scope name %s", scope)
+ return fmt.Errorf("Invalid scope name %s", scope)
}
return nil
}
@@ -59,11 +59,11 @@ type openshiftReference struct {
func ParseReference(ref string) (types.ImageReference, error) {
r, err := reference.ParseNormalizedNamed(ref)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse image reference %q", ref)
+ return nil, fmt.Errorf("failed to parse image reference %q: %w", ref, err)
}
tagged, ok := r.(reference.NamedTagged)
if !ok {
- return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref)
+ return nil, fmt.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref)
}
return NewReference(tagged)
}
@@ -72,7 +72,7 @@ func ParseReference(ref string) (types.ImageReference, error) {
func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) {
r := strings.SplitN(reference.Path(dockerRef), "/", 3)
if len(r) != 2 {
- return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'",
+ return nil, fmt.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'",
reference.FamiliarString(dockerRef))
}
return openshiftReference{
@@ -132,11 +132,7 @@ func (ref openshiftReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(sys, ref)
- if err != nil {
- return nil, err
- }
- return genericImage.FromSource(ctx, sys, src)
+ return genericImage.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
@@ -153,5 +149,5 @@ func (ref openshiftReference) NewImageDestination(ctx context.Context, sys *type
// DeleteImage deletes the named image from the registry, if supported.
func (ref openshiftReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("Deleting images not implemented for atomic: images")
+ return errors.New("Deleting images not implemented for atomic: images")
}
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
index 3eb2a2cba22..929523fa6c5 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
@@ -8,9 +8,9 @@ import (
"context"
"encoding/base64"
"encoding/json"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -21,7 +21,11 @@ import (
"time"
"unsafe"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
@@ -29,7 +33,6 @@ import (
"github.com/opencontainers/go-digest"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/ostreedev/ostree-go/pkg/otbuiltin"
- "github.com/pkg/errors"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
@@ -66,6 +69,11 @@ type manifestSchema struct {
}
type ostreeImageDestination struct {
+ compat impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.AlwaysSupportsSignatures
+
ref ostreeReference
manifest string
schema manifestSchema
@@ -77,12 +85,33 @@ type ostreeImageDestination struct {
}
// newImageDestination returns an ImageDestination for writing to an existing ostree.
-func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) {
+func newImageDestination(ref ostreeReference, tmpDirPath string) (private.ImageDestination, error) {
tmpDirPath = filepath.Join(tmpDirPath, ref.branchName)
if err := ensureDirectoryExists(tmpDirPath); err != nil {
return nil, err
}
- return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil
+ d := &ostreeImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{manifest.DockerV2Schema2MediaType},
+ DesiredLayerCompression: types.PreserveOriginal,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: true,
+ IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil.
+ HasThreadSafePutBlob: false,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+
+ ref: ref,
+ manifest: "",
+ schema: manifestSchema{},
+ tmpDirPath: tmpDirPath,
+ blobs: map[string]*blobToImport{},
+ digest: "",
+ signaturesLen: 0,
+ repo: nil,
+ }
+ d.Compat = impl.AddCompat(d)
+ return d, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
@@ -99,56 +128,15 @@ func (d *ostreeImageDestination) Close() error {
return os.RemoveAll(d.tmpDirPath)
}
-func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string {
- return []string{
- manifest.DockerV2Schema2MediaType,
- }
-}
-
-// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
-// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
-func (d *ostreeImageDestination) SupportsSignatures(ctx context.Context) error {
- return nil
-}
-
-// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
-func (d *ostreeImageDestination) DesiredLayerCompression() types.LayerCompression {
- return types.PreserveOriginal
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
-// uploaded to the image destination, true otherwise.
-func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool {
- return false
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (d *ostreeImageDestination) MustMatchRuntimeOS() bool {
- return true
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool {
- return false // N/A, DockerReference() returns nil.
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (d *ostreeImageDestination) HasThreadSafePutBlob() bool {
- return false
-}
-
-// PutBlob writes contents of stream and returns data representing the result.
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
-// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
+func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
+ tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob")
if err != nil {
return types.BlobInfo{}, err
}
@@ -168,7 +156,7 @@ func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
- return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
+ return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err
@@ -180,20 +168,24 @@ func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error {
- entries, err := ioutil.ReadDir(dir)
+ entries, err := os.ReadDir(dir)
if err != nil {
return err
}
- for _, info := range entries {
- fullpath := filepath.Join(dir, info.Name())
- if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
+ for _, entry := range entries {
+ fullpath := filepath.Join(dir, entry.Name())
+ if entry.Type()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
if err := os.Remove(fullpath); err != nil {
return err
}
continue
}
+ info, err := entry.Info()
+ if err != nil {
+ return err
+ }
if selinuxHnd != nil {
relPath, err := filepath.Rel(root, fullpath)
if err != nil {
@@ -210,7 +202,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user
res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm))
if int(res) < 0 && err != syscall.ENOENT {
- return errors.Wrapf(err, "cannot selabel_lookup_raw %s", relPath)
+ return fmt.Errorf("cannot selabel_lookup_raw %s: %w", relPath, err)
}
if int(res) == 0 {
defer C.freecon(context)
@@ -218,12 +210,12 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user
defer C.free(unsafe.Pointer(fullpathC))
res, err = C.lsetfilecon_raw(fullpathC, context)
if int(res) < 0 {
- return errors.Wrapf(err, "cannot setfilecon_raw %s to %s", fullpath, C.GoString(context))
+ return fmt.Errorf("cannot setfilecon_raw %s to %s: %w", fullpath, C.GoString(context), err)
}
}
}
- if info.IsDir() {
+ if entry.IsDir() {
if usermode {
if err := os.Chmod(fullpath, info.Mode()|0700); err != nil {
return err
@@ -233,7 +225,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user
if err != nil {
return err
}
- } else if usermode && (info.Mode().IsRegular()) {
+ } else if usermode && (entry.Type().IsRegular()) {
if err := os.Chmod(fullpath, info.Mode()|0600); err != nil {
return err
}
@@ -335,16 +327,14 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
}
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
if d.repo == nil {
repo, err := openRepo(d.ref.repo)
if err != nil {
@@ -405,13 +395,14 @@ func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob [
}
d.digest = digest
- return ioutil.WriteFile(manifestPath, manifestBlob, 0644)
+ return os.WriteFile(manifestPath, manifestBlob, 0644)
}
-// PutSignatures writes signatures to the destination.
-// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
-// there can be no secondary manifests.
-func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *ostreeImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
if instanceDigest != nil {
return errors.New(`Manifest lists are not supported by "ostree:"`)
}
@@ -423,7 +414,11 @@ func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [
for i, sig := range signatures {
signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i))
- if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil {
+ blob, err := signature.Blob(sig)
+ if err != nil {
+ return err
+ }
+ if err := os.WriteFile(signaturePath, blob, 0644); err != nil {
return err
}
}
@@ -450,7 +445,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
if os.Getuid() == 0 && selinux.GetEnabled() {
selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0)
if selinuxHnd == nil {
- return errors.Wrapf(err, "cannot open the SELinux DB")
+ return fmt.Errorf("cannot open the SELinux DB: %w", err)
}
defer C.selabel_close(selinuxHnd)
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go
index d30c764a630..9983acc0a64 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_src.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go
@@ -7,20 +7,23 @@ import (
"bytes"
"context"
"encoding/base64"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"strconv"
"strings"
"unsafe"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/ioutils"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
- "github.com/pkg/errors"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
@@ -35,6 +38,10 @@ import (
import "C"
type ostreeImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoGetBlobAtInitialize
+
ref ostreeReference
tmpDir string
repo *C.struct_OstreeRepo
@@ -43,8 +50,19 @@ type ostreeImageSource struct {
}
// newImageSource returns an ImageSource for reading from an existing directory.
-func newImageSource(tmpDir string, ref ostreeReference) (types.ImageSource, error) {
- return &ostreeImageSource{ref: ref, tmpDir: tmpDir, compressed: nil}, nil
+func newImageSource(tmpDir string, ref ostreeReference) (private.ImageSource, error) {
+ s := &ostreeImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ ref: ref,
+ tmpDir: tmpDir,
+ compressed: nil,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
}
// Reference returns the reference used to set up this source.
@@ -264,11 +282,6 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
return getter.Get(path)
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *ostreeImageSource) HasThreadSafeGetBlob() bool {
- return false
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@@ -340,10 +353,11 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
return rc, layerSize, nil
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
-// as there can be no secondary manifests.
-func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *ostreeImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
if instanceDigest != nil {
return nil, errors.New(`Manifest lists are not supported by "ostree:"`)
}
@@ -361,18 +375,23 @@ func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *d
s.repo = repo
}
- signatures := [][]byte{}
+ signatures := []signature.Signature{}
for i := int64(1); i <= lenSignatures; i++ {
- sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i))
+ path := fmt.Sprintf("/signature-%d", i)
+ sigReader, err := s.readSingleFile(branch, path)
if err != nil {
return nil, err
}
defer sigReader.Close()
- sig, err := ioutil.ReadAll(sigReader)
+ sigBlob, err := io.ReadAll(sigReader)
if err != nil {
return nil, err
}
+ sig, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing signature %q: %w", path, err)
+ }
signatures = append(signatures, sig)
}
return signatures, nil
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
index 1e35ab6059f..658d4e9035b 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go
@@ -6,6 +6,7 @@ package ostree
import (
"bytes"
"context"
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -14,10 +15,9 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
const defaultOSTreeRepo = "/ostree/repo"
@@ -42,16 +42,16 @@ func init() {
func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error {
sep := strings.Index(scope, ":")
if sep < 0 {
- return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope)
+ return fmt.Errorf("Invalid ostree: scope %s: Must include a repo", scope)
}
repo := scope[:sep]
if !strings.HasPrefix(repo, "/") {
- return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope)
+ return fmt.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope)
}
cleaned := filepath.Clean(repo)
if cleaned != repo {
- return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
+ return fmt.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
}
// FIXME? In the namespaces within a repo,
@@ -117,7 +117,7 @@ func NewReference(image string, repo string) (types.ImageReference, error) {
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
// from being ambiguous with values of PolicyConfigurationIdentity.
if strings.Contains(resolved, ":") {
- return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved)
+ return nil, fmt.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved)
}
return ostreeReference{
@@ -184,17 +184,7 @@ func (s *ostreeImageCloser) Size() (int64, error) {
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- var tmpDir string
- if sys == nil || sys.OSTreeTmpDirPath == "" {
- tmpDir = os.TempDir()
- } else {
- tmpDir = sys.OSTreeTmpDirPath
- }
- src, err := newImageSource(tmpDir, ref)
- if err != nil {
- return nil, err
- }
- return image.FromSource(ctx, sys, src)
+ return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
@@ -223,7 +213,7 @@ func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.S
// DeleteImage deletes the named image from the registry, if supported.
func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
- return errors.Errorf("Deleting images not implemented for ostree: images")
+ return errors.New("Deleting images not implemented for ostree: images")
}
var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`)
diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
index c28e8179296..ce688d11703 100644
--- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
@@ -5,13 +5,11 @@ import (
"compress/bzip2"
"fmt"
"io"
- "io/ioutil"
"github.com/containers/image/v5/pkg/compression/internal"
"github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/storage/pkg/chunked/compressor"
"github.com/klauspost/pgzip"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/ulikunitz/xz"
)
@@ -65,7 +63,7 @@ func GzipDecompressor(r io.Reader) (io.ReadCloser, error) {
// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.
func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) {
- return ioutil.NopCloser(bzip2.NewReader(r)), nil
+ return io.NopCloser(bzip2.NewReader(r)), nil
}
// XzDecompressor is a DecompressorFunc for the xz compression algorithm.
@@ -74,7 +72,7 @@ func XzDecompressor(r io.Reader) (io.ReadCloser, error) {
if err != nil {
return nil, err
}
- return ioutil.NopCloser(r), nil
+ return io.NopCloser(r), nil
}
// gzipCompressor is a CompressorFunc for the gzip compression algorithm.
@@ -152,16 +150,16 @@ func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) {
decompressor, stream, err := DetectCompression(stream)
if err != nil {
- return nil, false, errors.Wrapf(err, "detecting compression")
+ return nil, false, fmt.Errorf("detecting compression: %w", err)
}
var res io.ReadCloser
if decompressor != nil {
res, err = decompressor(stream)
if err != nil {
- return nil, false, errors.Wrapf(err, "initializing decompression")
+ return nil, false, fmt.Errorf("initializing decompression: %w", err)
}
} else {
- res = ioutil.NopCloser(stream)
+ res = io.NopCloser(stream)
}
return res, decompressor != nil, nil
}
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
index e37f4c19e53..0790a47f242 100644
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
@@ -3,8 +3,8 @@ package config
import (
"encoding/base64"
"encoding/json"
+ "errors"
"fmt"
- "io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -15,10 +15,10 @@ import (
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
+ "github.com/containers/storage/pkg/ioutils"
helperclient "github.com/docker/docker-credential-helpers/client"
"github.com/docker/docker-credential-helpers/credentials"
"github.com/hashicorp/go-multierror"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -54,8 +54,8 @@ var (
// SetCredentials stores the username and password in a location
// appropriate for sys and the users’ configuration.
-// A valid key can be either a registry hostname or additionally a namespace if
-// the AuthenticationFileHelper is being unsed.
+// A valid key is a repository, a namespace within a registry, or a registry hostname;
+// using forms other than just a registry may fail depending on configuration.
// Returns a human-redable description of the location that was updated.
// NOTE: The return value is only intended to be read by humans; its form is not an API,
// it may change (or new forms can be added) any time.
@@ -111,7 +111,7 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
}
func unsupportedNamespaceErr(helper string) error {
- return errors.Errorf("namespaced key is not supported for credential helper %s", helper)
+ return fmt.Errorf("namespaced key is not supported for credential helper %s", helper)
}
// SetAuthentication stores the username and password in the credential helper or file
@@ -128,11 +128,15 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// possible sources, and then call `GetCredentials` on them. That
// prevents us from having to reverse engineer the logic in
// `GetCredentials`.
- allRegistries := make(map[string]bool)
- addRegistry := func(s string) {
- allRegistries[s] = true
+ allKeys := make(map[string]bool)
+ addKey := func(s string) {
+ allKeys[s] = true
}
+ // To use GetCredentials, we must at least convert the URL forms into host names.
+ // While we're at it, we’ll also canonicalize docker.io to the standard format.
+ normalizedDockerIORegistry := normalizeRegistry("docker.io")
+
helpers, err := sysregistriesv2.CredentialHelpers(sys)
if err != nil {
return nil, err
@@ -145,16 +149,20 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// readJSONFile returns an empty map in case the path doesn't exist.
auths, err := readJSONFile(path.path, path.legacyFormat)
if err != nil {
- return nil, errors.Wrapf(err, "reading JSON file %q", path.path)
+ return nil, fmt.Errorf("reading JSON file %q: %w", path.path, err)
}
// Credential helpers in the auth file have a
// direct mapping to a registry, so we can just
// walk the map.
for registry := range auths.CredHelpers {
- addRegistry(registry)
+ addKey(registry)
}
- for registry := range auths.AuthConfigs {
- addRegistry(registry)
+ for key := range auths.AuthConfigs {
+ key := normalizeAuthFileKey(key, path.legacyFormat)
+ if key == normalizedDockerIORegistry {
+ key = "docker.io"
+ }
+ addKey(key)
}
}
// External helpers.
@@ -162,16 +170,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
creds, err := listAuthsFromCredHelper(helper)
if err != nil {
logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err)
- }
- switch errors.Cause(err) {
- case nil:
- for registry := range creds {
- addRegistry(registry)
+ if errors.Is(err, exec.ErrNotFound) {
+ creds = nil // It's okay if the helper doesn't exist.
+ } else {
+ return nil, err
}
- case exec.ErrNotFound:
- // It's okay if the helper doesn't exist.
- default:
- return nil, err
+ }
+ for registry := range creds {
+ addKey(registry)
}
}
}
@@ -179,17 +185,15 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// Now use `GetCredentials` to the specific auth configs for each
// previously listed registry.
authConfigs := make(map[string]types.DockerAuthConfig)
- for registry := range allRegistries {
- authConf, err := GetCredentials(sys, registry)
+ for key := range allKeys {
+ authConf, err := GetCredentials(sys, key)
if err != nil {
- if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
- // Ignore if the credentials could not be found (anymore).
- continue
- }
// Note: we rely on the logging in `GetCredentials`.
return nil, err
}
- authConfigs[registry] = authConf
+ if authConf != (types.DockerAuthConfig{}) {
+ authConfigs[key] = authConf
+ }
}
return authConfigs, nil
@@ -230,16 +234,14 @@ func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath {
return paths
}
-// GetCredentials returns the registry credentials stored in the
-// registry-specific credential helpers or in the default global credentials
-// helpers with falling back to using either auth.json
-// file or .docker/config.json, including support for OAuth2 and IdentityToken.
+// GetCredentials returns the registry credentials matching key, appropriate for
+// sys and the users’ configuration.
// If an entry is not found, an empty struct is returned.
+// A valid key is a repository, a namespace within a registry, or a registry hostname.
//
-// GetCredentialsForRef should almost always be used in favor of this API to
-// allow different credentials for different repositories on the same registry.
-func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) {
- return getCredentialsWithHomeDir(sys, nil, registry, homedir.Get())
+// GetCredentialsForRef should almost always be used in favor of this API.
+func GetCredentials(sys *types.SystemContext, key string) (types.DockerAuthConfig, error) {
+ return getCredentialsWithHomeDir(sys, key, homedir.Get())
}
// GetCredentialsForRef returns the registry credentials necessary for
@@ -247,39 +249,43 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth
// appropriate for sys and the users’ configuration.
// If an entry is not found, an empty struct is returned.
func GetCredentialsForRef(sys *types.SystemContext, ref reference.Named) (types.DockerAuthConfig, error) {
- return getCredentialsWithHomeDir(sys, ref, reference.Domain(ref), homedir.Get())
+ return getCredentialsWithHomeDir(sys, ref.Name(), homedir.Get())
}
// getCredentialsWithHomeDir is an internal implementation detail of
// GetCredentialsForRef and GetCredentials. It exists only to allow testing it
// with an artificial home directory.
-func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, registry, homeDir string) (types.DockerAuthConfig, error) {
- // consistency check of the ref and registry arguments
- if ref != nil && reference.Domain(ref) != registry {
- return types.DockerAuthConfig{}, errors.Errorf(
- "internal error: provided reference domain %q name does not match registry %q",
- reference.Domain(ref), registry,
- )
+func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (types.DockerAuthConfig, error) {
+ _, err := validateKey(key)
+ if err != nil {
+ return types.DockerAuthConfig{}, err
}
if sys != nil && sys.DockerAuthConfig != nil {
- logrus.Debugf("Returning credentials for %s from DockerAuthConfig", registry)
+ logrus.Debugf("Returning credentials for %s from DockerAuthConfig", key)
return *sys.DockerAuthConfig, nil
}
+ var registry string // We compute this once because it is used in several places.
+ if firstSlash := strings.IndexRune(key, '/'); firstSlash != -1 {
+ registry = key[:firstSlash]
+ } else {
+ registry = key
+ }
+
// Anonymous function to query credentials from auth files.
- getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, error) {
+ getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) {
for _, path := range getAuthFilePaths(sys, homeDir) {
- authConfig, err := findAuthentication(ref, registry, path.path, path.legacyFormat)
+ authConfig, err := findCredentialsInFile(key, registry, path.path, path.legacyFormat)
if err != nil {
- return types.DockerAuthConfig{}, err
+ return types.DockerAuthConfig{}, "", err
}
- if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" {
- return authConfig, nil
+ if authConfig != (types.DockerAuthConfig{}) {
+ return authConfig, path.path, nil
}
}
- return types.DockerAuthConfig{}, nil
+ return types.DockerAuthConfig{}, "", nil
}
helpers, err := sysregistriesv2.CredentialHelpers(sys)
@@ -289,65 +295,76 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, re
var multiErr error
for _, helper := range helpers {
- var creds types.DockerAuthConfig
- var err error
+ var (
+ creds types.DockerAuthConfig
+ helperKey string
+ credHelperPath string
+ err error
+ )
switch helper {
// Special-case the built-in helper for auth files.
case sysregistriesv2.AuthenticationFileHelper:
- creds, err = getCredentialsFromAuthFiles()
+ helperKey = key
+ creds, credHelperPath, err = getCredentialsFromAuthFiles()
// External helpers.
default:
+ // This intentionally uses "registry", not "key"; we don't support namespaced
+ // credentials in helpers, but a "registry" is a valid parent of "key".
+ helperKey = registry
creds, err = getAuthFromCredHelper(helper, registry)
}
if err != nil {
- logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", registry, helper, err)
+ logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err)
multiErr = multierror.Append(multiErr, err)
continue
}
- if len(creds.Username)+len(creds.Password)+len(creds.IdentityToken) == 0 {
- continue
+ if creds != (types.DockerAuthConfig{}) {
+ msg := fmt.Sprintf("Found credentials for %s in credential helper %s", helperKey, helper)
+ if credHelperPath != "" {
+ msg = fmt.Sprintf("%s in file %s", msg, credHelperPath)
+ }
+ logrus.Debug(msg)
+ return creds, nil
}
- logrus.Debugf("Found credentials for %s in credential helper %s", registry, helper)
- return creds, nil
}
if multiErr != nil {
return types.DockerAuthConfig{}, multiErr
}
- logrus.Debugf("No credentials for %s found", registry)
+ logrus.Debugf("No credentials for %s found", key)
return types.DockerAuthConfig{}, nil
}
-// GetAuthentication returns the registry credentials stored in the
-// registry-specific credential helpers or in the default global credentials
-// helpers with falling back to using either auth.json file or
-// .docker/config.json
+// GetAuthentication returns the registry credentials matching key, appropriate for
+// sys and the users’ configuration.
+// If an entry is not found, an empty struct is returned.
+// A valid key is a repository, a namespace within a registry, or a registry hostname.
//
// Deprecated: This API only has support for username and password. To get the
// support for oauth2 in container registry authentication, we added the new
-// GetCredentials API. The new API should be used and this API is kept to
+// GetCredentialsForRef and GetCredentials API. The new API should be used and this API is kept to
// maintain backward compatibility.
-func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) {
- return getAuthenticationWithHomeDir(sys, registry, homedir.Get())
+func GetAuthentication(sys *types.SystemContext, key string) (string, string, error) {
+ return getAuthenticationWithHomeDir(sys, key, homedir.Get())
}
// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication,
// it exists only to allow testing it with an artificial home directory.
-func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir string) (string, string, error) {
- auth, err := getCredentialsWithHomeDir(sys, nil, registry, homeDir)
+func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) {
+ auth, err := getCredentialsWithHomeDir(sys, key, homeDir)
if err != nil {
return "", "", err
}
if auth.IdentityToken != "" {
- return "", "", errors.Wrap(ErrNotSupported, "non-empty identity token found and this API doesn't support it")
+ return "", "", fmt.Errorf("non-empty identity token found and this API doesn't support it: %w", ErrNotSupported)
}
return auth.Username, auth.Password, nil
}
// RemoveAuthentication removes credentials for `key` from all possible
// sources such as credential helpers and auth files.
-// A valid key can be either a registry hostname or additionally a namespace if
-// the AuthenticationFileHelper is being unsed.
+// A valid key is a repository, a namespace within a registry, or a registry hostname;
+// using forms other than just a registry may fail depending on configuration.
func RemoveAuthentication(sys *types.SystemContext, key string) error {
isNamespaced, err := validateKey(key)
if err != nil {
@@ -378,7 +395,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
return
}
}
- multiErr = multierror.Append(multiErr, errors.Wrapf(err, "removing credentials for %s from credential helper %s", key, helper))
+ multiErr = multierror.Append(multiErr, fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err))
}
for _, helper := range helpers {
@@ -446,19 +463,19 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
default:
var creds map[string]string
creds, err = listAuthsFromCredHelper(helper)
- switch errors.Cause(err) {
- case nil:
- for registry := range creds {
- err = deleteAuthFromCredHelper(helper, registry)
- if err != nil {
- break
- }
+ if err != nil {
+ if errors.Is(err, exec.ErrNotFound) {
+ // It's okay if the helper doesn't exist.
+ continue
+ } else {
+ break
+ }
+ }
+ for registry := range creds {
+ err = deleteAuthFromCredHelper(helper, registry)
+ if err != nil {
+ break
}
- case exec.ErrNotFound:
- // It's okay if the helper doesn't exist.
- continue
- default:
- // fall through
}
}
if err != nil {
@@ -511,7 +528,7 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, e
// This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory
// or made a typo while setting the environment variable,
// so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside.
- return "", false, errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir)
+ return "", false, fmt.Errorf("%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.: %w", runtimeDir, err)
} // else ignore err and let the caller fail accessing xdgRuntimeDirPath.
return filepath.Join(runtimeDir, xdgRuntimeDirPath), false, nil
}
@@ -524,7 +541,7 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, e
func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
var auths dockerConfigFile
- raw, err := ioutil.ReadFile(path)
+ raw, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
auths.AuthConfigs = map[string]dockerAuthConfig{}
@@ -535,13 +552,13 @@ func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
if legacyFormat {
if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil {
- return dockerConfigFile{}, errors.Wrapf(err, "unmarshaling JSON at %q", path)
+ return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path, err)
}
return auths, nil
}
if err = json.Unmarshal(raw, &auths); err != nil {
- return dockerConfigFile{}, errors.Wrapf(err, "unmarshaling JSON at %q", path)
+ return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path, err)
}
if auths.AuthConfigs == nil {
@@ -573,21 +590,21 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
auths, err := readJSONFile(path, false)
if err != nil {
- return "", errors.Wrapf(err, "reading JSON file %q", path)
+ return "", fmt.Errorf("reading JSON file %q: %w", path, err)
}
updated, err := editor(&auths)
if err != nil {
- return "", errors.Wrapf(err, "updating %q", path)
+ return "", fmt.Errorf("updating %q: %w", path, err)
}
if updated {
newData, err := json.MarshalIndent(auths, "", "\t")
if err != nil {
- return "", errors.Wrapf(err, "marshaling JSON %q", path)
+ return "", fmt.Errorf("marshaling JSON %q: %w", path, err)
}
- if err = ioutil.WriteFile(path, newData, 0600); err != nil {
- return "", errors.Wrapf(err, "writing to file %q", path)
+ if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil {
+ return "", fmt.Errorf("writing to file %q: %w", path, err)
}
}
@@ -599,6 +616,10 @@ func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig,
p := helperclient.NewShellProgramFunc(helperName)
creds, err := helperclient.Get(p, registry)
if err != nil {
+ if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
+ logrus.Debugf("Not logged in to %s with credential helper %s", registry, credHelper)
+ err = nil
+ }
return types.DockerAuthConfig{}, err
}
@@ -632,26 +653,28 @@ func deleteAuthFromCredHelper(credHelper, registry string) error {
return helperclient.Erase(p, registry)
}
-// findAuthentication looks for auth of registry in path. If ref is
-// not nil, then it will be taken into account when looking up the
-// authentication credentials.
-func findAuthentication(ref reference.Named, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) {
+// findCredentialsInFile looks for credentials matching "key"
+// (which is "registry" or a namespace in "registry") in "path".
+func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) {
auths, err := readJSONFile(path, legacyFormat)
if err != nil {
- return types.DockerAuthConfig{}, errors.Wrapf(err, "reading JSON file %q", path)
+ return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path, err)
}
// First try cred helpers. They should always be normalized.
+ // This intentionally uses "registry", not "key"; we don't support namespaced
+ // credentials in helpers.
if ch, exists := auths.CredHelpers[registry]; exists {
+ logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path)
return getAuthFromCredHelper(ch, registry)
}
- // Support for different paths in auth.
+ // Support sub-registry namespaces in auth.
// (This is not a feature of ~/.docker/config.json; we support it even for
// those files as an extension.)
var keys []string
- if !legacyFormat && ref != nil {
- keys = authKeysForRef(ref)
+ if !legacyFormat {
+ keys = authKeysForKey(key)
} else {
keys = []string{registry}
}
@@ -660,7 +683,7 @@ func findAuthentication(ref reference.Named, registry, path string, legacyFormat
// keys we prefer exact matches as well.
for _, key := range keys {
if val, exists := auths.AuthConfigs[key]; exists {
- return decodeDockerAuth(val)
+ return decodeDockerAuth(path, key, val)
}
}
@@ -675,38 +698,40 @@ func findAuthentication(ref reference.Named, registry, path string, legacyFormat
registry = normalizeRegistry(registry)
for k, v := range auths.AuthConfigs {
if normalizeAuthFileKey(k, legacyFormat) == registry {
- return decodeDockerAuth(v)
+ return decodeDockerAuth(path, k, v)
}
}
+ // Only log this if we found nothing; getCredentialsWithHomeDir logs the
+ // source of found data.
+ logrus.Debugf("No credentials matching %s found in %s", key, path)
return types.DockerAuthConfig{}, nil
}
-// authKeysForRef returns the valid paths for a provided reference. For example,
-// when given a reference "quay.io/repo/ns/image:tag", then it would return
+// authKeysForKey returns the keys matching a provided auth file key, in order
+// from the best match to worst. For example,
+// when given a repository key "quay.io/repo/ns/image", it returns
// - quay.io/repo/ns/image
// - quay.io/repo/ns
// - quay.io/repo
// - quay.io
-func authKeysForRef(ref reference.Named) (res []string) {
- name := ref.Name()
-
+func authKeysForKey(key string) (res []string) {
for {
- res = append(res, name)
+ res = append(res, key)
- lastSlash := strings.LastIndex(name, "/")
+ lastSlash := strings.LastIndex(key, "/")
if lastSlash == -1 {
break
}
- name = name[:lastSlash]
+ key = key[:lastSlash]
}
return res
}
-// decodeDockerAuth decodes the username and password, which is
-// encoded in base64.
-func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
+// decodeDockerAuth decodes the username and password from conf,
+// which is entry key in path.
+func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuthConfig, error) {
decoded, err := base64.StdEncoding.DecodeString(conf.Auth)
if err != nil {
return types.DockerAuthConfig{}, err
@@ -715,6 +740,11 @@ func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
parts := strings.SplitN(string(decoded), ":", 2)
if len(parts) != 2 {
// if it's invalid just skip, as docker does
+ if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, don’t warn about those
+ logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Don’t include the text of decoded, because that might put secrets into a log.
+ } else {
+ logrus.Debugf("Found an empty credential entry %q in %q (an unhandled credential helper marker?), moving on", key, path)
+ }
return types.DockerAuthConfig{}, nil
}
@@ -752,11 +782,24 @@ func normalizeRegistry(registry string) string {
// validateKey verifies that the input key does not have a prefix that is not
// allowed and returns an indicator if the key is namespaced.
-func validateKey(key string) (isNamespaced bool, err error) {
+func validateKey(key string) (bool, error) {
if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") {
- return isNamespaced, errors.Errorf("key %s contains http[s]:// prefix", key)
+ return false, fmt.Errorf("key %s contains http[s]:// prefix", key)
}
+ // Ideally this should only accept explicitly valid keys, compare
+ // validateIdentityRemappingPrefix. For now, just reject values that look
+ // like tagged or digested values.
+ if strings.ContainsRune(key, '@') {
+ return false, fmt.Errorf(`key %s contains a '@' character`, key)
+ }
+
+ firstSlash := strings.IndexRune(key, '/')
+ isNamespaced := firstSlash != -1
+ // Reject host/repo:tag, but allow localhost:5000 and localhost:5000/foo.
+ if isNamespaced && strings.ContainsRune(key[firstSlash+1:], ':') {
+ return false, fmt.Errorf(`key %s contains a ':' character after host[:port]`, key)
+ }
// check if the provided key contains one or more subpaths.
- return strings.ContainsRune(key, '/'), nil
+ return isNamespaced, nil
}
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go
deleted file mode 100644
index 0bf16125919..00000000000
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package config
-
-import (
- "fmt"
- "strings"
-
- "github.com/containers/image/v5/internal/pkg/keyctl"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
-)
-
-// NOTE: none of the functions here are currently used. If we ever want to
-// re-enable keyring support, we should introduce a similar built-in credential
-// helpers as for `sysregistriesv2.AuthenticationFileHelper`.
-
-const keyDescribePrefix = "container-registry-login:" //nolint:deadcode,unused
-
-func getAuthFromKernelKeyring(registry string) (string, string, error) { //nolint:deadcode,unused
- userkeyring, err := keyctl.UserKeyring()
- if err != nil {
- return "", "", err
- }
- key, err := userkeyring.Search(genDescription(registry))
- if err != nil {
- return "", "", err
- }
- authData, err := key.Get()
- if err != nil {
- return "", "", err
- }
- parts := strings.SplitN(string(authData), "\x00", 2)
- if len(parts) != 2 {
- return "", "", nil
- }
- return parts[0], parts[1], nil
-}
-
-func deleteAuthFromKernelKeyring(registry string) error { //nolint:deadcode,unused
- userkeyring, err := keyctl.UserKeyring()
-
- if err != nil {
- return err
- }
- key, err := userkeyring.Search(genDescription(registry))
- if err != nil {
- return err
- }
- return key.Unlink()
-}
-
-func removeAllAuthFromKernelKeyring() error { //nolint:deadcode,unused
- keys, err := keyctl.ReadUserKeyring()
- if err != nil {
- return err
- }
-
- userkeyring, err := keyctl.UserKeyring()
- if err != nil {
- return err
- }
-
- for _, k := range keys {
- keyAttr, err := k.Describe()
- if err != nil {
- return err
- }
- // split string "type;uid;gid;perm;description"
- keyAttrs := strings.SplitN(keyAttr, ";", 5)
- if len(keyAttrs) < 5 {
- return errors.Errorf("Key attributes of %d are not available", k.ID())
- }
- keyDescribe := keyAttrs[4]
- if strings.HasPrefix(keyDescribe, keyDescribePrefix) {
- err := keyctl.Unlink(userkeyring, k)
- if err != nil {
- return errors.Wrapf(err, "unlinking key %d", k.ID())
- }
- logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr)
- }
- }
- return nil
-}
-
-func setAuthToKernelKeyring(registry, username, password string) error { //nolint:deadcode,unused
- keyring, err := keyctl.SessionKeyring()
- if err != nil {
- return err
- }
- id, err := keyring.Add(genDescription(registry), []byte(fmt.Sprintf("%s\x00%s", username, password)))
- if err != nil {
- return err
- }
-
- // sets all permission(view,read,write,search,link,set attribute) for current user
- // it enables the user to search the key after it linked to user keyring and unlinked from session keyring
- err = keyctl.SetPerm(id, keyctl.PermUserAll)
- if err != nil {
- return err
- }
- // link the key to userKeyring
- userKeyring, err := keyctl.UserKeyring()
- if err != nil {
- return errors.Wrapf(err, "getting user keyring")
- }
- err = keyctl.Link(userKeyring, id)
- if err != nil {
- return errors.Wrapf(err, "linking the key to user keyring")
- }
- // unlink the key from session keyring
- err = keyctl.Unlink(keyring, id)
- if err != nil {
- return errors.Wrapf(err, "unlinking the key from session keyring")
- }
- return nil
-}
-
-func genDescription(registry string) string { //nolint:deadcode,unused
- return fmt.Sprintf("%s%s", keyDescribePrefix, registry)
-}
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go
deleted file mode 100644
index d9827d8edbc..00000000000
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go
+++ /dev/null
@@ -1,21 +0,0 @@
-//go:build !linux && (!386 || !amd64)
-// +build !linux
-// +build !386 !amd64
-
-package config
-
-func getAuthFromKernelKeyring(registry string) (string, string, error) { //nolint:deadcode,unused
- return "", "", ErrNotSupported
-}
-
-func deleteAuthFromKernelKeyring(registry string) error { //nolint:deadcode,unused
- return ErrNotSupported
-}
-
-func setAuthToKernelKeyring(registry, username, password string) error { //nolint:deadcode,unused
- return ErrNotSupported
-}
-
-func removeAllAuthFromKernelKeyring() error { //nolint:deadcode,unused
- return ErrNotSupported
-}
diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
index fb0a15b993e..8793711fead 100644
--- a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
+++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
@@ -1,6 +1,7 @@
package shortnames
import (
+ "errors"
"fmt"
"os"
"strings"
@@ -10,7 +11,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/manifoldco/promptui"
"github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"golang.org/x/term"
)
@@ -20,9 +20,9 @@ import (
// short names.
//
// Examples:
-// * short names: "image:tag", "library/fedora"
-// * not short names: "quay.io/image", "localhost/image:tag",
-// "server.org:5000/lib/image", "image@sha256:..."
+// - short names: "image:tag", "library/fedora"
+// - not short names: "quay.io/image", "localhost/image:tag",
+// "server.org:5000/lib/image", "image@sha256:..."
func IsShortName(input string) bool {
isShort, _, _ := parseUnnormalizedShortName(input)
return isShort
@@ -33,12 +33,12 @@ func IsShortName(input string) bool {
func parseUnnormalizedShortName(input string) (bool, reference.Named, error) {
ref, err := reference.Parse(input)
if err != nil {
- return false, nil, errors.Wrapf(err, "cannot parse input: %q", input)
+ return false, nil, fmt.Errorf("cannot parse input: %q: %w", input, err)
}
named, ok := ref.(reference.Named)
if !ok {
- return true, nil, errors.Errorf("%q is not a named reference", input)
+ return true, nil, fmt.Errorf("%q is not a named reference", input)
}
registry := reference.Domain(named)
@@ -47,7 +47,7 @@ func parseUnnormalizedShortName(input string) (bool, reference.Named, error) {
// normalized (e.g., docker.io/alpine to docker.io/library/alpine.
named, err = reference.ParseNormalizedNamed(input)
if err != nil {
- return false, nil, errors.Wrapf(err, "cannot normalize input: %q", input)
+ return false, nil, fmt.Errorf("cannot normalize input: %q: %w", input, err)
}
return false, named, nil
}
@@ -87,7 +87,7 @@ func Add(ctx *types.SystemContext, name string, value reference.Named) error {
return err
}
if !isShort {
- return errors.Errorf("%q is not a short name", name)
+ return fmt.Errorf("%q is not a short name", name)
}
return sysregistriesv2.AddShortNameAlias(ctx, name, value.String())
}
@@ -102,7 +102,7 @@ func Remove(ctx *types.SystemContext, name string) error {
return err
}
if !isShort {
- return errors.Errorf("%q is not a short name", name)
+ return fmt.Errorf("%q is not a short name", name)
}
return sysregistriesv2.RemoveShortNameAlias(ctx, name)
}
@@ -118,6 +118,7 @@ type Resolved struct {
}
func (r *Resolved) addCandidate(named reference.Named) {
+ named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed
r.PullCandidates = append(r.PullCandidates, PullCandidate{named, false, r})
}
@@ -138,6 +139,8 @@ const (
rationaleUSR
// Resolved value has been selected by the user (via the prompt).
rationaleUserSelection
+ // Resolved value has been enforced to use Docker Hub (via SystemContext).
+ rationaleEnforcedDockerHub
)
// Description returns a human-readable description about the resolution
@@ -152,6 +155,8 @@ func (r *Resolved) Description() string {
return fmt.Sprintf("Resolved %q as an alias (%s)", r.userInput, r.originDescription)
case rationaleUSR:
return fmt.Sprintf("Resolving %q using unqualified-search registries (%s)", r.userInput, r.originDescription)
+ case rationaleEnforcedDockerHub:
+ return fmt.Sprintf("Resolving %q to docker.io (%s)", r.userInput, r.originDescription)
case rationaleUserSelection, rationaleNone:
fallthrough
default:
@@ -167,7 +172,7 @@ func (r *Resolved) Description() string {
func (r *Resolved) FormatPullErrors(pullErrors []error) error {
if len(pullErrors) >= 0 && len(pullErrors) != len(r.PullCandidates) {
pullErrors = append(pullErrors,
- errors.Errorf("internal error: expected %d instead of %d errors for %d pull candidates",
+ fmt.Errorf("internal error: expected %d instead of %d errors for %d pull candidates",
len(r.PullCandidates), len(pullErrors), len(r.PullCandidates)))
}
@@ -211,7 +216,7 @@ func (c *PullCandidate) Record() error {
value := reference.TrimNamed(c.Value)
if err := Add(c.resolved.systemContext, name.String(), value); err != nil {
- return errors.Wrapf(err, "recording short-name alias (%q=%q)", c.resolved.userInput, c.Value)
+ return fmt.Errorf("recording short-name alias (%q=%q): %w", c.resolved.userInput, c.Value, err)
}
return nil
}
@@ -257,7 +262,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
case types.ShortNameModeDisabled, types.ShortNameModePermissive, types.ShortNameModeEnforcing:
// We're good.
default:
- return nil, errors.Errorf("unsupported short-name mode (%v)", mode)
+ return nil, fmt.Errorf("unsupported short-name mode (%v)", mode)
}
isShort, shortRef, err := parseUnnormalizedShortName(name)
@@ -265,8 +270,20 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
return nil, err
}
if !isShort { // no short name
- named := reference.TagNameOnly(shortRef) // Make sure to add ":latest" if needed
+ resolved.addCandidate(shortRef)
+ return resolved, nil
+ }
+
+ // Resolve to docker.io only if enforced by the caller (e.g., Podman's
+ // Docker-compatible REST API).
+ if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub {
+ named, err := reference.ParseNormalizedNamed(name)
+ if err != nil {
+ return nil, fmt.Errorf("cannot normalize input: %q: %w", name, err)
+ }
resolved.addCandidate(named)
+ resolved.rationale = rationaleEnforcedDockerHub
+ resolved.originDescription = "enforced by caller"
return resolved, nil
}
@@ -295,9 +312,6 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
return nil, err
}
}
- // Make sure to add ":latest" if needed
- namedAlias = reference.TagNameOnly(namedAlias)
-
resolved.addCandidate(namedAlias)
resolved.rationale = rationaleAlias
resolved.originDescription = aliasOriginDescription
@@ -314,20 +328,17 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
// Error out if there's no matching alias and no search registries.
if len(unqualifiedSearchRegistries) == 0 {
if usrConfig != "" {
- return nil, errors.Errorf("short-name %q did not resolve to an alias and no unqualified-search registries are defined in %q", name, usrConfig)
+ return nil, fmt.Errorf("short-name %q did not resolve to an alias and no unqualified-search registries are defined in %q", name, usrConfig)
}
- return nil, errors.Errorf("short-name %q did not resolve to an alias and no containers-registries.conf(5) was found", name)
+ return nil, fmt.Errorf("short-name %q did not resolve to an alias and no containers-registries.conf(5) was found", name)
}
resolved.originDescription = usrConfig
for _, reg := range unqualifiedSearchRegistries {
named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
if err != nil {
- return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg)
+ return nil, fmt.Errorf("creating reference with unqualified-search registry %q: %w", reg, err)
}
- // Make sure to add ":latest" if needed
- named = reference.TagNameOnly(named)
-
resolved.addCandidate(named)
}
@@ -353,7 +364,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
return nil, errors.New("short-name resolution enforced but cannot prompt without a TTY")
default:
// We should not end up here.
- return nil, errors.Errorf("unexpected short-name mode (%v) during resolution", mode)
+ return nil, fmt.Errorf("unexpected short-name mode (%v) during resolution", mode)
}
}
@@ -376,7 +387,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
named, err := reference.ParseNormalizedNamed(selection)
if err != nil {
- return nil, errors.Wrapf(err, "selection %q is not a valid reference", selection)
+ return nil, fmt.Errorf("selection %q is not a valid reference: %w", selection, err)
}
resolved.PullCandidates = nil
@@ -391,9 +402,9 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
// not a short name), it is returned as is. In case, it's a short name, the
// returned slice of named references looks as follows:
//
-// 1) If present, the short-name alias
-// 2) "localhost/" as used by many container engines such as Podman and Buildah
-// 3) Unqualified-search registries from the registries.conf files
+// 1. If present, the short-name alias
+// 2. "localhost/" as used by many container engines such as Podman and Buildah
+// 3. Unqualified-search registries from the registries.conf files
//
// Note that tags and digests are stripped from the specified name before
// looking up an alias. Stripped off tags and digests are later on appended to
@@ -412,6 +423,23 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e
var candidates []reference.Named
+ // Complete the candidates with the specified registries.
+ completeCandidates := func(registries []string) ([]reference.Named, error) {
+ for _, reg := range registries {
+ named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
+ if err != nil {
+ return nil, fmt.Errorf("creating reference with unqualified-search registry %q: %w", reg, err)
+ }
+ named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed
+ candidates = append(candidates, named)
+ }
+ return candidates, nil
+ }
+
+ if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub {
+ return completeCandidates([]string{"docker.io"})
+ }
+
// Strip off the tag to normalize the short name for looking it up in
// the config files.
isTagged, isDigested, shortNameRepo, tag, digest := splitUserInput(shortRef)
@@ -434,9 +462,7 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e
return nil, err
}
}
- // Make sure to add ":latest" if needed
- namedAlias = reference.TagNameOnly(namedAlias)
-
+ namedAlias = reference.TagNameOnly(namedAlias) // Make sure to add ":latest" if needed
candidates = append(candidates, namedAlias)
}
@@ -447,16 +473,5 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e
}
// Note that "localhost" has precedence over the unqualified-search registries.
- for _, reg := range append([]string{"localhost"}, unqualifiedSearchRegistries...) {
- named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
- if err != nil {
- return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg)
- }
- // Make sure to add ":latest" if needed
- named = reference.TagNameOnly(named)
-
- candidates = append(candidates, named)
- }
-
- return candidates, nil
+ return completeCandidates(append([]string{"localhost"}, unqualifiedSearchRegistries...))
}
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go
new file mode 100644
index 00000000000..07fe5029428
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go
@@ -0,0 +1,12 @@
+//go:build !freebsd
+// +build !freebsd
+
+package sysregistriesv2
+
+// builtinRegistriesConfPath is the path to the registry configuration file.
+// DO NOT change this, instead see systemRegistriesConfPath above.
+const builtinRegistriesConfPath = "/etc/containers/registries.conf"
+
+// builtinRegistriesConfDirPath is the path to the registry configuration directory.
+// DO NOT change this, instead see systemRegistriesConfDirectoryPath above.
+const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d"
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go
new file mode 100644
index 00000000000..741b99f8f7b
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go
@@ -0,0 +1,12 @@
+//go:build freebsd
+// +build freebsd
+
+package sysregistriesv2
+
+// builtinRegistriesConfPath is the path to the registry configuration file.
+// DO NOT change this, instead see systemRegistriesConfPath above.
+const builtinRegistriesConfPath = "/usr/local/etc/containers/registries.conf"
+
+// builtinRegistriesConfDirPath is the path to the registry configuration directory.
+// DO NOT change this, instead see systemRegistriesConfDirectoryPath above.
+const builtinRegistriesConfDirPath = "/usr/local/etc/containers/registries.conf.d"
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
index 7122e869fe6..12939b24da6 100644
--- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
@@ -1,6 +1,7 @@
package sysregistriesv2
import (
+ "fmt"
"os"
"path/filepath"
"reflect"
@@ -12,7 +13,7 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/lockfile"
- "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// defaultShortNameMode is the default mode of registries.conf files if the
@@ -165,7 +166,7 @@ func editShortNameAlias(ctx *types.SystemContext, name string, value *string) er
} else {
// If the name does not exist, throw an error.
if _, exists := conf.Aliases[name]; !exists {
- return errors.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath)
+ return fmt.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath)
}
delete(conf.Aliases, name)
@@ -209,25 +210,25 @@ func RemoveShortNameAlias(ctx *types.SystemContext, name string) error {
func parseShortNameValue(alias string) (reference.Named, error) {
ref, err := reference.Parse(alias)
if err != nil {
- return nil, errors.Wrapf(err, "parsing alias %q", alias)
+ return nil, fmt.Errorf("parsing alias %q: %w", alias, err)
}
if _, ok := ref.(reference.Digested); ok {
- return nil, errors.Errorf("invalid alias %q: must not contain digest", alias)
+ return nil, fmt.Errorf("invalid alias %q: must not contain digest", alias)
}
if _, ok := ref.(reference.Tagged); ok {
- return nil, errors.Errorf("invalid alias %q: must not contain tag", alias)
+ return nil, fmt.Errorf("invalid alias %q: must not contain tag", alias)
}
named, ok := ref.(reference.Named)
if !ok {
- return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias)
+ return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias)
}
registry := reference.Domain(named)
if !(strings.ContainsAny(registry, ".:") || registry == "localhost") {
- return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias)
+ return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias)
}
// A final parse to make sure that docker.io references are correctly
@@ -241,25 +242,25 @@ func parseShortNameValue(alias string) (reference.Named, error) {
func validateShortName(name string) error {
repo, err := reference.Parse(name)
if err != nil {
- return errors.Wrapf(err, "cannot parse short name: %q", name)
+ return fmt.Errorf("cannot parse short name: %q: %w", name, err)
}
if _, ok := repo.(reference.Digested); ok {
- return errors.Errorf("invalid short name %q: must not contain digest", name)
+ return fmt.Errorf("invalid short name %q: must not contain digest", name)
}
if _, ok := repo.(reference.Tagged); ok {
- return errors.Errorf("invalid short name %q: must not contain tag", name)
+ return fmt.Errorf("invalid short name %q: must not contain tag", name)
}
named, ok := repo.(reference.Named)
if !ok {
- return errors.Errorf("invalid short name %q: no name", name)
+ return fmt.Errorf("invalid short name %q: no name", name)
}
registry := reference.Domain(named)
if strings.ContainsAny(registry, ".:") || registry == "localhost" {
- return errors.Errorf("invalid short name %q: must not contain registry", name)
+ return fmt.Errorf("invalid short name %q: must not contain registry", name)
}
return nil
}
@@ -297,7 +298,7 @@ func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAl
if len(errs) > 0 {
err := errs[0]
for i := 1; i < len(errs); i++ {
- err = errors.Wrapf(err, "%v\n", errs[i])
+ err = fmt.Errorf("%v\n: %w", errs[i], err)
}
return nil, err
}
@@ -315,17 +316,20 @@ func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAlia
func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) {
conf := shortNameAliasConf{}
- _, err := toml.DecodeFile(confPath, &conf)
+ meta, err := toml.DecodeFile(confPath, &conf)
if err != nil && !os.IsNotExist(err) {
// It's okay if the config doesn't exist. Other errors are not.
- return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath)
+ return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err)
+ }
+ if keys := meta.Undecoded(); len(keys) > 0 {
+ logrus.Debugf("Failed to decode keys %q from %q", keys, confPath)
}
// Even if we don’t always need the cache, doing so validates the machine-generated config. The
// file could still be corrupted by another process or user.
cache, err := newShortNameAliasCache(confPath, &conf)
if err != nil {
- return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath)
+ return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err)
}
return &conf, cache, nil
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
index 4c1629f5634..41204dd9afc 100644
--- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
@@ -2,6 +2,7 @@ package sysregistriesv2
import (
"fmt"
+ "io/fs"
"os"
"path/filepath"
"reflect"
@@ -14,7 +15,6 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -24,25 +24,27 @@ import (
// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfPath=$your_path'
var systemRegistriesConfPath = builtinRegistriesConfPath
-// builtinRegistriesConfPath is the path to the registry configuration file.
-// DO NOT change this, instead see systemRegistriesConfPath above.
-const builtinRegistriesConfPath = "/etc/containers/registries.conf"
-
// systemRegistriesConfDirPath is the path to the system-wide registry
// configuration directory and is used to add/subtract potential registries for
// obtaining images. You can override this at build time with
// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirectoryPath=$your_path'
var systemRegistriesConfDirPath = builtinRegistriesConfDirPath
-// builtinRegistriesConfDirPath is the path to the registry configuration directory.
-// DO NOT change this, instead see systemRegistriesConfDirectoryPath above.
-const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d"
-
// AuthenticationFileHelper is a special key for credential helpers indicating
// the usage of consulting containers-auth.json files instead of a credential
// helper.
const AuthenticationFileHelper = "containers-auth.json"
+const (
+ // configuration values for "pull-from-mirror"
+ // mirrors will be used for both digest pulls and tag pulls
+ MirrorAll = "all"
+ // mirrors will only be used for digest pulls
+ MirrorByDigestOnly = "digest-only"
+ // mirrors will only be used for tag pulls
+ MirrorByTagOnly = "tag-only"
+)
+
// Endpoint describes a remote location of a registry.
type Endpoint struct {
// The endpoint's remote location. Can be empty iff Prefix contains
@@ -53,6 +55,18 @@ type Endpoint struct {
// If true, certs verification will be skipped and HTTP (non-TLS)
// connections will be allowed.
Insecure bool `toml:"insecure,omitempty"`
+ // PullFromMirror is used for adding restrictions to image pull through the mirror.
+ // Set to "all", "digest-only", or "tag-only".
+ // If "digest-only", mirrors will only be used for digest pulls. Pulling images by
+ // tag can potentially yield different images, depending on which endpoint
+ // we pull from. Restricting mirrors to pulls by digest avoids that issue.
+ // If "tag-only", mirrors will only be used for tag pulls. For a more up-to-date and expensive mirror
+ // that it is less likely to be out of sync if tags move, it should not be unnecessarily
+ // used for digest references.
+ // Default is "all" (or left empty), mirrors will be used for both digest pulls and tag pulls unless the mirror-by-digest-only is set for the primary registry.
+ // This can only be set in a registry's Mirror field, not in the registry's primary Endpoint.
+ // This per-mirror setting is allowed only when mirror-by-digest-only is not configured for the primary registry.
+ PullFromMirror string `toml:"pull-from-mirror,omitempty"`
}
// userRegistriesFile is the path to the per user registry configuration file.
@@ -80,7 +94,7 @@ func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (referen
// be dropped.
// https://github.com/containers/image/pull/1191#discussion_r610621608
if e.Location == "" {
- if prefix[:2] != "*." {
+ if !strings.HasPrefix(prefix, "*.") {
return nil, fmt.Errorf("invalid prefix '%v' for empty location, should be in the format: *.example.com", prefix)
}
return ref, nil
@@ -88,7 +102,7 @@ func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (referen
newNamedRef = e.Location + refString[prefixLen:]
newParsedRef, err := reference.ParseNamed(newNamedRef)
if err != nil {
- return nil, errors.Wrapf(err, "rewriting reference")
+ return nil, fmt.Errorf("rewriting reference: %w", err)
}
return newParsedRef, nil
@@ -115,7 +129,7 @@ type Registry struct {
Blocked bool `toml:"blocked,omitempty"`
// If true, mirrors will only be used for digest pulls. Pulling images by
// tag can potentially yield different images, depending on which endpoint
- // we pull from. Forcing digest-pulls for mirrors avoids that issue.
+ // we pull from. Restricting mirrors to pulls by digest avoids that issue.
MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"`
}
@@ -130,17 +144,29 @@ type PullSource struct {
// reference.
func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) {
var endpoints []Endpoint
-
+ _, isDigested := ref.(reference.Canonical)
if r.MirrorByDigestOnly {
- // Only use mirrors when the reference is a digest one.
- if _, isDigested := ref.(reference.Canonical); isDigested {
- endpoints = append(r.Mirrors, r.Endpoint)
- } else {
- endpoints = []Endpoint{r.Endpoint}
+ // Only use mirrors when the reference is a digested one.
+ if isDigested {
+ endpoints = append(endpoints, r.Mirrors...)
}
} else {
- endpoints = append(r.Mirrors, r.Endpoint)
+ for _, mirror := range r.Mirrors {
+ // skip the mirror if per mirror setting exists but reference does not match the restriction
+ switch mirror.PullFromMirror {
+ case MirrorByDigestOnly:
+ if !isDigested {
+ continue
+ }
+ case MirrorByTagOnly:
+ if isDigested {
+ continue
+ }
+ }
+ endpoints = append(endpoints, mirror)
+ }
}
+ endpoints = append(endpoints, r.Endpoint)
sources := []PullSource{}
for _, ep := range endpoints {
@@ -369,11 +395,15 @@ func (config *V2RegistriesConf) postProcessRegistries() error {
}
// FIXME: allow config authors to always use Prefix.
// https://github.com/containers/image/pull/1191#discussion_r610622495
- if reg.Prefix[:2] != "*." && reg.Location == "" {
+ if !strings.HasPrefix(reg.Prefix, "*.") && reg.Location == "" {
return &InvalidRegistries{s: "invalid condition: location is unset and prefix is not in the format: *.example.com"}
}
}
+ // validate the mirror usage settings does not apply to primary registry
+ if reg.PullFromMirror != "" {
+ return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix)
+ }
// make sure mirrors are valid
for _, mir := range reg.Mirrors {
mir.Location, err = parseLocation(mir.Location)
@@ -387,6 +417,14 @@ func (config *V2RegistriesConf) postProcessRegistries() error {
if mir.Location == "" {
return &InvalidRegistries{s: "invalid condition: mirror location is unset"}
}
+
+ if reg.MirrorByDigestOnly && mir.PullFromMirror != "" {
+ return &InvalidRegistries{s: fmt.Sprintf("cannot set mirror usage mirror-by-digest-only for the registry (%q) and pull-from-mirror for per-mirror (%q) at the same time", reg.Prefix, mir.Location)}
+ }
+ if mir.PullFromMirror != "" && mir.PullFromMirror != MirrorAll &&
+ mir.PullFromMirror != MirrorByDigestOnly && mir.PullFromMirror != MirrorByTagOnly {
+ return &InvalidRegistries{s: fmt.Sprintf("unsupported pull-from-mirror value %q for mirror %q", mir.PullFromMirror, mir.Location)}
+ }
}
if reg.Location == "" {
regMap[reg.Prefix] = append(regMap[reg.Prefix], reg)
@@ -597,17 +635,17 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) {
dirPaths = append(dirPaths, wrapper.userConfigDirPath)
}
for _, dirPath := range dirPaths {
- err := filepath.Walk(dirPath,
+ err := filepath.WalkDir(dirPath,
// WalkFunc to read additional configs
- func(path string, info os.FileInfo, err error) error {
+ func(path string, d fs.DirEntry, err error) error {
switch {
case err != nil:
// return error (could be a permission problem)
return err
- case info == nil:
+ case d == nil:
// this should only happen when err != nil but let's be sure
return nil
- case info.IsDir():
+ case d.IsDir():
if path != dirPath {
// make sure to not recurse into sub-directories
return filepath.SkipDir
@@ -627,7 +665,7 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) {
if err != nil && !os.IsNotExist(err) {
// Ignore IsNotExist errors: most systems won't have a registries.conf.d
// directory.
- return nil, errors.Wrapf(err, "reading registries.conf.d")
+ return nil, fmt.Errorf("reading registries.conf.d: %w", err)
}
}
@@ -669,7 +707,7 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC
return nil, err // Should never happen
}
} else {
- return nil, errors.Wrapf(err, "loading registries configuration %q", wrapper.configPath)
+ return nil, fmt.Errorf("loading registries configuration %q: %w", wrapper.configPath, err)
}
}
@@ -682,7 +720,7 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC
// Enforce v2 format for drop-in-configs.
dropIn, err := loadConfigFile(path, true)
if err != nil {
- return nil, errors.Wrapf(err, "loading drop-in registries configuration %q", path)
+ return nil, fmt.Errorf("loading drop-in registries configuration %q: %w", path, err)
}
config.updateWithConfigurationFrom(dropIn)
}
@@ -743,7 +781,7 @@ func parseShortNameMode(mode string) (types.ShortNameMode, error) {
case "permissive":
return types.ShortNameModePermissive, nil
default:
- return types.ShortNameModeInvalid, errors.Errorf("invalid short-name mode: %q", mode)
+ return types.ShortNameModeInvalid, fmt.Errorf("invalid short-name mode: %q", mode)
}
}
@@ -804,7 +842,7 @@ func refMatchingSubdomainPrefix(ref, prefix string) int {
// (This is split from the caller primarily to make testing easier.)
func refMatchingPrefix(ref, prefix string) int {
switch {
- case prefix[0:2] == "*.":
+ case strings.HasPrefix(prefix, "*."):
return refMatchingSubdomainPrefix(ref, prefix)
case len(ref) < len(prefix):
return -1
@@ -877,10 +915,13 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) {
// Load the tomlConfig. Note that `DecodeFile` will overwrite set fields.
var combinedTOML tomlConfig
- _, err := toml.DecodeFile(path, &combinedTOML)
+ meta, err := toml.DecodeFile(path, &combinedTOML)
if err != nil {
return nil, err
}
+ if keys := meta.Undecoded(); len(keys) > 0 {
+ logrus.Debugf("Failed to decode keys %q from %q", keys, path)
+ }
if combinedTOML.V1RegistriesConf.Nonempty() {
// Enforce the v2 format if requested.
@@ -924,7 +965,7 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) {
// https://github.com/containers/image/pull/1191#discussion_r610623829
for i := range res.partialV2.Registries {
prefix := res.partialV2.Registries[i].Prefix
- if prefix[:2] == "*." && strings.ContainsAny(prefix, "/@:") {
+ if strings.HasPrefix(prefix, "*.") && strings.ContainsAny(prefix, "/@:") {
msg := fmt.Sprintf("Wildcarded prefix should be in the format: *.example.com. Current prefix %q is incorrectly formatted", prefix)
return nil, &InvalidRegistries{s: msg}
}
@@ -933,7 +974,7 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) {
// Parse and validate short-name aliases.
cache, err := newShortNameAliasCache(path, &res.partialV2.shortNameAliasConf)
if err != nil {
- return nil, errors.Wrap(err, "validating short-name aliases")
+ return nil, fmt.Errorf("validating short-name aliases: %w", err)
}
res.aliasCache = cache
// Clear conf.partialV2.shortNameAliasConf to make it available for garbage collection and
diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
index 7e2142b1f58..9599aa3c9d0 100644
--- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
+++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
@@ -2,7 +2,7 @@ package tlsclientconfig
import (
"crypto/tls"
- "io/ioutil"
+ "fmt"
"net"
"net/http"
"os"
@@ -12,14 +12,13 @@ import (
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc
func SetupCertificates(dir string, tlsc *tls.Config) error {
logrus.Debugf("Looking for TLS certificates and private keys in %s", dir)
- fs, err := ioutil.ReadDir(dir)
+ fs, err := os.ReadDir(dir)
if err != nil {
if os.IsNotExist(err) {
return nil
@@ -35,7 +34,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
fullPath := filepath.Join(dir, f.Name())
if strings.HasSuffix(f.Name(), ".crt") {
logrus.Debugf(" crt: %s", fullPath)
- data, err := ioutil.ReadFile(fullPath)
+ data, err := os.ReadFile(fullPath)
if err != nil {
if os.IsNotExist(err) {
// Dangling symbolic link?
@@ -50,7 +49,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
if tlsc.RootCAs == nil {
systemPool, err := tlsconfig.SystemCertPool()
if err != nil {
- return errors.Wrap(err, "unable to get system cert pool")
+ return fmt.Errorf("unable to get system cert pool: %w", err)
}
tlsc.RootCAs = systemPool
}
@@ -61,7 +60,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
keyName := certName[:len(certName)-5] + ".key"
logrus.Debugf(" cert: %s", fullPath)
if !hasFile(fs, keyName) {
- return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
+ return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
}
cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName))
if err != nil {
@@ -74,14 +73,14 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
certName := keyName[:len(keyName)-4] + ".cert"
logrus.Debugf(" key: %s", fullPath)
if !hasFile(fs, certName) {
- return errors.Errorf("missing client certificate %s for key %s", certName, keyName)
+ return fmt.Errorf("missing client certificate %s for key %s", certName, keyName)
}
}
}
return nil
}
-func hasFile(files []os.FileInfo, name string) bool {
+func hasFile(files []os.DirEntry, name string) bool {
for _, f := range files {
if f.Name() == name {
return true
diff --git a/vendor/github.com/containers/image/v5/sif/load.go b/vendor/github.com/containers/image/v5/sif/load.go
new file mode 100644
index 00000000000..70758ad4399
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/sif/load.go
@@ -0,0 +1,210 @@
+package sif
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+ "github.com/sylabs/sif/v2/pkg/sif"
+)
+
+// injectedScriptTargetPath is the path injectedScript should be written to in the created image.
+const injectedScriptTargetPath = "/podman/runscript"
+
+// parseDefFile parses a SIF definition file from reader,
+// and returns non-trivial contents of the %environment and %runscript sections.
+func parseDefFile(reader io.Reader) ([]string, []string, error) {
+ type parserState int
+ const (
+ parsingOther parserState = iota
+ parsingEnvironment
+ parsingRunscript
+ )
+
+ environment := []string{}
+ runscript := []string{}
+
+ state := parsingOther
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ s := strings.TrimSpace(scanner.Text())
+ switch {
+ case s == `%environment`:
+ state = parsingEnvironment
+ case s == `%runscript`:
+ state = parsingRunscript
+ case strings.HasPrefix(s, "%"):
+ state = parsingOther
+ case state == parsingEnvironment:
+ if s != "" && !strings.HasPrefix(s, "#") {
+ environment = append(environment, s)
+ }
+ case state == parsingRunscript:
+ runscript = append(runscript, s)
+ default: // parsingOther: ignore the line
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, nil, fmt.Errorf("reading lines from SIF definition file object: %w", err)
+ }
+ return environment, runscript, nil
+}
+
+// generateInjectedScript generates a shell script based on
+// SIF definition file %environment and %runscript data, and returns it.
+func generateInjectedScript(environment []string, runscript []string) []byte {
+ script := fmt.Sprintf("#!/bin/bash\n"+
+ "%s\n"+
+ "%s\n", strings.Join(environment, "\n"), strings.Join(runscript, "\n"))
+ return []byte(script)
+}
+
+// processDefFile finds sif.DataDeffile in sifImage, if any,
+// and returns:
+// - the command to run
+// - contents of a script to inject as injectedScriptTargetPath, or nil
+func processDefFile(sifImage *sif.FileImage) (string, []byte, error) {
+ var environment, runscript []string
+
+ desc, err := sifImage.GetDescriptor(sif.WithDataType(sif.DataDeffile))
+ if err == nil {
+ environment, runscript, err = parseDefFile(desc.GetReader())
+ if err != nil {
+ return "", nil, err
+ }
+ }
+
+ var command string
+ var injectedScript []byte
+ if len(environment) == 0 && len(runscript) == 0 {
+ command = "bash"
+ injectedScript = nil
+ } else {
+ injectedScript = generateInjectedScript(environment, runscript)
+ command = injectedScriptTargetPath
+ }
+
+ return command, injectedScript, nil
+}
+
+func writeInjectedScript(extractedRootPath string, injectedScript []byte) error {
+ if injectedScript == nil {
+ return nil
+ }
+ filePath := filepath.Join(extractedRootPath, injectedScriptTargetPath)
+ parentDirPath := filepath.Dir(filePath)
+ if err := os.MkdirAll(parentDirPath, 0755); err != nil {
+ return fmt.Errorf("creating %s: %w", parentDirPath, err)
+ }
+ if err := os.WriteFile(filePath, injectedScript, 0755); err != nil {
+ return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err)
+ }
+ return nil
+}
+
+// createTarFromSIFInputs creates a tar file at tarPath, using a squashfs image at squashFSPath.
+// It can also use extractedRootPath and scriptPath, which are allocated for its exclusive use,
+// if necessary.
+func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, injectedScript []byte, extractedRootPath, scriptPath string) error {
+ // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive
+ // for our use.
+ defer os.RemoveAll(extractedRootPath)
+
+ // Almost everything in extractedRootPath comes from squashFSPath.
+ conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./",
+ extractedRootPath, squashFSPath, extractedRootPath, tarPath)
+ script := "#!/bin/sh\n" + conversionCommand + "\n"
+ if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil {
+ return err
+ }
+ defer os.Remove(scriptPath)
+
+ // On top of squashFSPath, we only add injectedScript, if necessary.
+ if err := writeInjectedScript(extractedRootPath, injectedScript); err != nil {
+ return err
+ }
+
+ logrus.Debugf("Converting squashfs to tar, command: %s ...", conversionCommand)
+ cmd := exec.CommandContext(ctx, "fakeroot", "--", scriptPath)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("converting image: %w, output: %s", err, string(output))
+ }
+ logrus.Debugf("... finished converting squashfs to tar")
+ return nil
+}
+
+// convertSIFToElements processes sifImage and creates/returns
+// the relevant elements for constructing an OCI-like image:
+// - A path to a tar file containing a root filesystem,
+// - A command to run.
+// The returned tar file path is inside tempDir, which can be assumed to be empty
+// at start, and is exclusively used by the current process (i.e. it is safe
+// to use hard-coded relative paths within it).
+func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) {
+ // We could allocate unique names for all of these using os.{CreateTemp,MkdirTemp}, but tempDir is exclusive,
+ // so we can just hard-code a set of unique values here.
+ // We create and/or manage cleanup of these two paths.
+ squashFSPath := filepath.Join(tempDir, "rootfs.squashfs")
+ tarPath := filepath.Join(tempDir, "rootfs.tar")
+ // We only allocate these paths, the user is responsible for cleaning them up.
+ extractedRootPath := filepath.Join(tempDir, "rootfs")
+ scriptPath := filepath.Join(tempDir, "script")
+
+ succeeded := false
+ // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive
+ // for our use.
+ // Ideally we would remove squashFSPath immediately after creating extractedRootPath, but we need
+ // to run both creation and consumption of extractedRootPath in the same fakeroot context.
+ // So, overall, this process requires at least 2 compressed copies (SIF and squashFSPath) and 2
+ // uncompressed copies (extractedRootPath and tarPath) of the data, all using up space at the same time.
+ // That's rather unsatisfactory, ideally we would be streaming the data directly from a squashfs parser
+ // reading from the SIF file to a tarball, for 1 compressed and 1 uncompressed copy.
+ defer os.Remove(squashFSPath)
+ defer func() {
+ if !succeeded {
+ os.Remove(tarPath)
+ }
+ }()
+
+ command, injectedScript, err := processDefFile(sifImage)
+ if err != nil {
+ return "", nil, err
+ }
+
+ rootFS, err := sifImage.GetDescriptor(sif.WithPartitionType(sif.PartPrimSys))
+ if err != nil {
+ return "", nil, fmt.Errorf("looking up rootfs from SIF file: %w", err)
+ }
+ // TODO: We'd prefer not to make a full copy of the file here; unsquashfs ≥ 4.4
+ // has an -o option that allows extracting a squashfs from the SIF file directly,
+ // but that version is not currently available in RHEL 8.
+ logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath)
+ if err := func() error { // A scope for defer
+ f, err := os.Create(squashFSPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil {
+ return err
+ }
+ return nil
+ }(); err != nil {
+ return "", nil, err
+ }
+ logrus.Debugf("... finished creating a temporary squashfs image")
+
+ if err := createTarFromSIFInputs(ctx, tarPath, squashFSPath, injectedScript, extractedRootPath, scriptPath); err != nil {
+ return "", nil, err
+ }
+ succeeded = true
+ return tarPath, []string{command}, nil
+}
diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go
new file mode 100644
index 00000000000..b645f80dd02
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/sif/src.go
@@ -0,0 +1,204 @@
+package sif
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecs "github.com/opencontainers/image-spec/specs-go"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "github.com/sylabs/sif/v2/pkg/sif"
+)
+
+type sifImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
+ ref sifReference
+ workDir string
+ layerDigest digest.Digest
+ layerSize int64
+ layerFile string
+ config []byte
+ configDigest digest.Digest
+ manifest []byte
+}
+
+// getBlobInfo returns the digest, and size of the provided file.
+func getBlobInfo(path string) (digest.Digest, int64, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return "", -1, fmt.Errorf("opening %q for reading: %w", path, err)
+ }
+ defer f.Close()
+
+ // TODO: Instead of writing the tar file to disk, and reading
+ // it here again, stream the tar file to a pipe and
+ // compute the digest while writing it to disk.
+ logrus.Debugf("Computing a digest of the SIF conversion output...")
+ digester := digest.Canonical.Digester()
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ size, err := io.Copy(digester.Hash(), f)
+ if err != nil {
+ return "", -1, fmt.Errorf("reading %q: %w", path, err)
+ }
+ digest := digester.Digest()
+ logrus.Debugf("... finished computing the digest of the SIF conversion output")
+
+ return digest, size, nil
+}
+
+// newImageSource returns an ImageSource for reading from an existing directory.
+// newImageSource extracts SIF objects and saves them in a temp directory.
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (private.ImageSource, error) {
+ sifImg, err := sif.LoadContainerFromPath(ref.file, sif.OptLoadWithFlag(os.O_RDONLY))
+ if err != nil {
+ return nil, fmt.Errorf("loading SIF file: %w", err)
+ }
+ defer func() {
+ _ = sifImg.UnloadContainer()
+ }()
+
+ workDir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif")
+ if err != nil {
+ return nil, fmt.Errorf("creating temp directory: %w", err)
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ os.RemoveAll(workDir)
+ }
+ }()
+
+ layerPath, commandLine, err := convertSIFToElements(ctx, sifImg, workDir)
+ if err != nil {
+ return nil, fmt.Errorf("converting rootfs from SquashFS to Tarball: %w", err)
+ }
+
+ layerDigest, layerSize, err := getBlobInfo(layerPath)
+ if err != nil {
+ return nil, fmt.Errorf("gathering blob information: %w", err)
+ }
+
+ created := sifImg.ModifiedAt()
+ config := imgspecv1.Image{
+ Created: &created,
+ Architecture: sifImg.PrimaryArch(),
+ OS: "linux",
+ Config: imgspecv1.ImageConfig{
+ Cmd: commandLine,
+ },
+ RootFS: imgspecv1.RootFS{
+ Type: "layers",
+ DiffIDs: []digest.Digest{layerDigest},
+ },
+ History: []imgspecv1.History{
+ {
+ Created: &created,
+ CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator),
+ Comment: "imported from SIF, uuid: " + sifImg.ID(),
+ },
+ {
+ Created: &created,
+ CreatedBy: "/bin/sh -c #(nop) CMD [\"bash\"]",
+ EmptyLayer: true,
+ },
+ },
+ }
+ configBytes, err := json.Marshal(&config)
+ if err != nil {
+ return nil, fmt.Errorf("generating configuration blob for %q: %w", ref.resolvedFile, err)
+ }
+ configDigest := digest.Canonical.FromBytes(configBytes)
+
+ manifest := imgspecv1.Manifest{
+ Versioned: imgspecs.Versioned{SchemaVersion: 2},
+ MediaType: imgspecv1.MediaTypeImageManifest,
+ Config: imgspecv1.Descriptor{
+ Digest: configDigest,
+ Size: int64(len(configBytes)),
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ },
+ Layers: []imgspecv1.Descriptor{{
+ Digest: layerDigest,
+ Size: layerSize,
+ MediaType: imgspecv1.MediaTypeImageLayer,
+ }},
+ }
+ manifestBytes, err := json.Marshal(&manifest)
+ if err != nil {
+ return nil, fmt.Errorf("generating manifest for %q: %w", ref.resolvedFile, err)
+ }
+
+ succeeded = true
+ s := &sifImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ ref: ref,
+ workDir: workDir,
+ layerDigest: layerDigest,
+ layerSize: layerSize,
+ layerFile: layerPath,
+ config: configBytes,
+ configDigest: configDigest,
+ manifest: manifestBytes,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
+}
+
+// Reference returns the reference used to set up this source.
+func (s *sifImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *sifImageSource) Close() error {
+ return os.RemoveAll(s.workDir)
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ switch info.Digest {
+ case s.configDigest:
+ return io.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil
+ case s.layerDigest:
+ reader, err := os.Open(s.layerFile)
+ if err != nil {
+ return nil, -1, fmt.Errorf("opening %q: %w", s.layerFile, err)
+ }
+ return reader, s.layerSize, nil
+ default:
+ return nil, -1, fmt.Errorf("no blob with digest %q found", info.Digest.String())
+ }
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *sifImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ return nil, "", errors.New("manifest lists are not supported by the sif transport")
+ }
+ return s.manifest, imgspecv1.MediaTypeImageManifest, nil
+}
diff --git a/vendor/github.com/containers/image/v5/sif/transport.go b/vendor/github.com/containers/image/v5/sif/transport.go
new file mode 100644
index 00000000000..2037f25082e
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/sif/transport.go
@@ -0,0 +1,160 @@
+package sif
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/image/v5/directory/explicitfilepath"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for SIF images.
+var Transport = sifTransport{}
+
+type sifTransport struct{}
+
+func (t sifTransport) Name() string {
+ return "sif"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t sifTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return NewReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t sifTransport) ValidatePolicyConfigurationScope(scope string) error {
+ if !strings.HasPrefix(scope, "/") {
+ return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope)
+ }
+ // Refuse also "/", otherwise "/" and "" would have the same semantics,
+ // and "" could be unexpectedly shadowed by the "/" entry.
+ if scope == "/" {
+ return errors.New(`Invalid scope "/": Use the generic default scope ""`)
+ }
+ cleaned := filepath.Clean(scope)
+ if cleaned != scope {
+ return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
+ }
+ return nil
+}
+
+// sifReference is an ImageReference for SIF images.
+type sifReference struct {
+ // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time!
+ // Either of the paths may point to a different, or no, inode over time. resolvedFile may contain symbolic links, and so on.
+
+ // Generally we follow the intent of the user, and use the "file" member for filesystem operations (e.g. the user can use a relative path to avoid
+ // being exposed to symlinks and renames in the parent directories to the working directory).
+ // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
+ file string // As specified by the user. May be relative, contain symlinks, etc.
+ resolvedFile string // Absolute file path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
+}
+
+// There is no sif.ParseReference because it is rather pointless.
+// Callers who need a transport-independent interface will go through
+// sifTransport.ParseReference; callers who intentionally deal with SIF files
+// can use sif.NewReference.
+
+// NewReference returns an image file reference for a specified path.
+func NewReference(file string) (types.ImageReference, error) {
+ // We do not expose an API supplying the resolvedFile; we could, but recomputing it
+ // is generally cheap enough that we prefer being confident about the properties of resolvedFile.
+ resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file)
+ if err != nil {
+ return nil, err
+ }
+ return sifReference{file: file, resolvedFile: resolved}, nil
+}
+
+func (ref sifReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
+// instead, see transports.ImageName().
+func (ref sifReference) StringWithinTransport() string {
+ return ref.file
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref sifReference) DockerReference() reference.Named {
+ return nil
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref sifReference) PolicyConfigurationIdentity() string {
+ return ref.resolvedFile
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref sifReference) PolicyConfigurationNamespaces() []string {
+ res := []string{}
+ path := ref.resolvedFile
+ for {
+ lastSlash := strings.LastIndex(path, "/")
+ if lastSlash == -1 || lastSlash == 0 {
+ break
+ }
+ path = path[:lastSlash]
+ res = append(res, path)
+ }
+ // Note that we do not include "/"; it is redundant with the default "" global default,
+ // and rejected by sifTransport.ValidatePolicyConfigurationScope above.
+ return res
+}
+
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ return image.FromReference(ctx, sys, ref)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref sifReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(ctx, sys, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref sifReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ return nil, errors.New(`"sif:" locations can only be read from, not written to`)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref sifReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ return errors.New("Deleting images not implemented for sif: images")
+}
diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go
index 07fdd42a9af..b09502dfe31 100644
--- a/vendor/github.com/containers/image/v5/signature/docker.go
+++ b/vendor/github.com/containers/image/v5/signature/docker.go
@@ -3,22 +3,47 @@
package signature
import (
+ "errors"
"fmt"
+ "strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature/internal"
"github.com/opencontainers/go-digest"
)
+// SignOptions includes optional parameters for signing container images.
+type SignOptions struct {
+ // Passphare to use when signing with the key identity.
+ Passphrase string
+}
+
// SignDockerManifest returns a signature for manifest as the specified dockerReference,
-// using mech and keyIdentity.
-func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) {
+// using mech and keyIdentity, and the specified options.
+func SignDockerManifestWithOptions(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string, options *SignOptions) ([]byte, error) {
manifestDigest, err := manifest.Digest(m)
if err != nil {
return nil, err
}
sig := newUntrustedSignature(manifestDigest, dockerReference)
- return sig.sign(mech, keyIdentity)
+
+ var passphrase string
+ if options != nil {
+ passphrase = options.Passphrase
+ // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior.
+ if strings.Contains(passphrase, "\n") {
+ return nil, errors.New("invalid passphrase: must not contain a line break")
+ }
+ }
+
+ return sig.sign(mech, keyIdentity, passphrase)
+}
+
+// SignDockerManifest returns a signature for manifest as the specified dockerReference,
+// using mech and keyIdentity.
+func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) {
+ return SignDockerManifestWithOptions(m, dockerReference, mech, keyIdentity, nil)
}
// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference,
@@ -32,18 +57,18 @@ func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byt
sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{
validateKeyIdentity: func(keyIdentity string) error {
if keyIdentity != expectedKeyIdentity {
- return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)}
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity))
}
return nil
},
validateSignedDockerReference: func(signedDockerReference string) error {
signedRef, err := reference.ParseNormalizedNamed(signedDockerReference)
if err != nil {
- return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)}
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference))
}
if signedRef.String() != expectedRef.String() {
- return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s",
- signedDockerReference, expectedDockerReference)}
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %s does not match %s",
+ signedDockerReference, expectedDockerReference))
}
return nil
},
@@ -53,7 +78,7 @@ func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byt
return err
}
if !matches {
- return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)}
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest))
}
return nil
},
diff --git a/vendor/github.com/containers/image/v5/signature/internal/errors.go b/vendor/github.com/containers/image/v5/signature/internal/errors.go
new file mode 100644
index 00000000000..7872f0f43c7
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/internal/errors.go
@@ -0,0 +1,15 @@
+package internal
+
+// InvalidSignatureError is returned when parsing an invalid signature.
+// This is publicly visible as signature.InvalidSignatureError
+type InvalidSignatureError struct {
+ msg string
+}
+
+func (err InvalidSignatureError) Error() string {
+ return err.msg
+}
+
+func NewInvalidSignatureError(msg string) InvalidSignatureError {
+ return InvalidSignatureError{msg: msg}
+}
diff --git a/vendor/github.com/containers/image/v5/signature/json.go b/vendor/github.com/containers/image/v5/signature/internal/json.go
similarity index 64%
rename from vendor/github.com/containers/image/v5/signature/json.go
rename to vendor/github.com/containers/image/v5/signature/internal/json.go
index 9e592863dae..0f39fe0ad29 100644
--- a/vendor/github.com/containers/image/v5/signature/json.go
+++ b/vendor/github.com/containers/image/v5/signature/internal/json.go
@@ -1,4 +1,4 @@
-package signature
+package internal
import (
"bytes"
@@ -7,34 +7,34 @@ import (
"io"
)
-// jsonFormatError is returned when JSON does not match expected format.
-type jsonFormatError string
+// JSONFormatError is returned when JSON does not match expected format.
+type JSONFormatError string
-func (err jsonFormatError) Error() string {
+func (err JSONFormatError) Error() string {
return string(err)
}
-// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
+// ParanoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to
// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected.
//
// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy,
// we could use reflection to automate this. Later?
-func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error {
+func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error {
seenKeys := map[string]struct{}{}
dec := json.NewDecoder(bytes.NewReader(data))
t, err := dec.Token()
if err != nil {
- return jsonFormatError(err.Error())
+ return JSONFormatError(err.Error())
}
if t != json.Delim('{') {
- return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t))
+ return JSONFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t))
}
for {
t, err := dec.Token()
if err != nil {
- return jsonFormatError(err.Error())
+ return JSONFormatError(err.Error())
}
if t == json.Delim('}') {
break
@@ -43,34 +43,34 @@ func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interfa
key, ok := t.(string)
if !ok {
// Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
- return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
+ return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
}
if _, ok := seenKeys[key]; ok {
- return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
+ return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
}
seenKeys[key] = struct{}{}
valuePtr := fieldResolver(key)
if valuePtr == nil {
- return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key))
+ return JSONFormatError(fmt.Sprintf("Unknown key \"%s\"", key))
}
// This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
if err := dec.Decode(valuePtr); err != nil {
- return jsonFormatError(err.Error())
+ return JSONFormatError(err.Error())
}
}
if _, err := dec.Token(); err != io.EOF {
- return jsonFormatError("Unexpected data after JSON object")
+ return JSONFormatError("Unexpected data after JSON object")
}
return nil
}
-// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
+// ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect
// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
// must be present exactly once, and none other fields are accepted.
-func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error {
+func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error {
seenKeys := map[string]struct{}{}
- if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ if err := ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
if valuePtr, ok := exactFields[key]; ok {
seenKeys[key] = struct{}{}
return valuePtr
@@ -81,7 +81,7 @@ func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]
}
for key := range exactFields {
if _, ok := seenKeys[key]; !ok {
- return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
+ return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
}
}
return nil
diff --git a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go
new file mode 100644
index 00000000000..bb5e9139d76
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go
@@ -0,0 +1,201 @@
+package internal
+
+import (
+ "bytes"
+ "crypto"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/containers/image/v5/version"
+ digest "github.com/opencontainers/go-digest"
+ sigstoreSignature "github.com/sigstore/sigstore/pkg/signature"
+)
+
+const (
+ sigstoreSignatureType = "cosign container image signature"
+ sigstoreHarcodedHashAlgorithm = crypto.SHA256
+)
+
+// UntrustedSigstorePayload is a parsed content of a sigstore signature payload (not the full signature)
+type UntrustedSigstorePayload struct {
+ UntrustedDockerManifestDigest digest.Digest
+ UntrustedDockerReference string // FIXME: more precise type?
+ UntrustedCreatorID *string
+ // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
+ // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
+ // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
+ // we would add another field, UntrustedTimestampNS int64.
+ UntrustedTimestamp *int64
+}
+
+// NewUntrustedSigstorePayload returns an UntrustedSigstorePayload object with
+// the specified primary contents and appropriate metadata.
+func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerReference string) UntrustedSigstorePayload {
+ // Use intermediate variables for these values so that we can take their addresses.
+ // Golang guarantees that they will have a new address on every execution.
+ creatorID := "containers/image " + version.Version
+ timestamp := time.Now().Unix()
+ return UntrustedSigstorePayload{
+ UntrustedDockerManifestDigest: dockerManifestDigest,
+ UntrustedDockerReference: dockerReference,
+ UntrustedCreatorID: &creatorID,
+ UntrustedTimestamp: ×tamp,
+ }
+}
+
+// Compile-time check that UntrustedSigstorePayload implements json.Marshaler
+var _ json.Marshaler = (*UntrustedSigstorePayload)(nil)
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) {
+ if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
+ return nil, errors.New("Unexpected empty signature content")
+ }
+ critical := map[string]interface{}{
+ "type": sigstoreSignatureType,
+ "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
+ "identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
+ }
+ optional := map[string]interface{}{}
+ if s.UntrustedCreatorID != nil {
+ optional["creator"] = *s.UntrustedCreatorID
+ }
+ if s.UntrustedTimestamp != nil {
+ optional["timestamp"] = *s.UntrustedTimestamp
+ }
+ signature := map[string]interface{}{
+ "critical": critical,
+ "optional": optional,
+ }
+ return json.Marshal(signature)
+}
+
+// Compile-time check that UntrustedSigstorePayload implements json.Unmarshaler
+var _ json.Unmarshaler = (*UntrustedSigstorePayload)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error {
+ err := s.strictUnmarshalJSON(data)
+ if err != nil {
+ if formatErr, ok := err.(JSONFormatError); ok {
+ err = NewInvalidSignatureError(formatErr.Error())
+ }
+ }
+ return err
+}
+
+// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
+// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
+func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
+ var critical, optional json.RawMessage
+ if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "critical": &critical,
+ "optional": &optional,
+ }); err != nil {
+ return err
+ }
+
+ var creatorID string
+ var timestamp float64
+ var gotCreatorID, gotTimestamp = false, false
+ // /usr/bin/cosign generates "optional": null if there are no user-specified annotations.
+ if !bytes.Equal(optional, []byte("null")) {
+ if err := ParanoidUnmarshalJSONObject(optional, func(key string) interface{} {
+ switch key {
+ case "creator":
+ gotCreatorID = true
+ return &creatorID
+ case "timestamp":
+ gotTimestamp = true
+ return ×tamp
+ default:
+ var ignore interface{}
+ return &ignore
+ }
+ }); err != nil {
+ return err
+ }
+ }
+ if gotCreatorID {
+ s.UntrustedCreatorID = &creatorID
+ }
+ if gotTimestamp {
+ intTimestamp := int64(timestamp)
+ if float64(intTimestamp) != timestamp {
+ return NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
+ }
+ s.UntrustedTimestamp = &intTimestamp
+ }
+
+ var t string
+ var image, identity json.RawMessage
+ if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
+ "type": &t,
+ "image": &image,
+ "identity": &identity,
+ }); err != nil {
+ return err
+ }
+ if t != sigstoreSignatureType {
+ return NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t))
+ }
+
+ var digestString string
+ if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
+ "docker-manifest-digest": &digestString,
+ }); err != nil {
+ return err
+ }
+ s.UntrustedDockerManifestDigest = digest.Digest(digestString)
+
+ return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
+ "docker-reference": &s.UntrustedDockerReference,
+ })
+}
+
+// SigstorePayloadAcceptanceRules specifies how to decide whether an untrusted payload is acceptable.
+// We centralize the actual parsing and data extraction in VerifySigstorePayload; this supplies
+// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature
+// because the functions have the same or similar types, so there is a risk of exchanging the functions;
+// named members of this struct are more explicit.
+type SigstorePayloadAcceptanceRules struct {
+ ValidateSignedDockerReference func(string) error
+ ValidateSignedDockerManifestDigest func(digest.Digest) error
+}
+
+// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by publicKey, and that its principal components
+// match expected values, both as specified by rules, and returns it.
+// We return an *UntrustedSigstorePayload, although nothing actually uses it,
+// just to double-check against stupid typos.
+func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) {
+ verifier, err := sigstoreSignature.LoadVerifier(publicKey, sigstoreHarcodedHashAlgorithm)
+ if err != nil {
+ return nil, fmt.Errorf("creating verifier: %w", err)
+ }
+
+ unverifiedSignature, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature)
+ if err != nil {
+ return nil, NewInvalidSignatureError(fmt.Sprintf("base64 decoding: %v", err))
+ }
+ // github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(),
+ // which seems to be not used by anything. So we don’t bother.
+ if err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload)); err != nil {
+ return nil, NewInvalidSignatureError(fmt.Sprintf("cryptographic signature verification failed: %v", err))
+ }
+
+ var unmatchedPayload UntrustedSigstorePayload
+ if err := json.Unmarshal(unverifiedPayload, &unmatchedPayload); err != nil {
+ return nil, NewInvalidSignatureError(err.Error())
+ }
+ if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.UntrustedDockerManifestDigest); err != nil {
+ return nil, err
+ }
+ if err := rules.ValidateSignedDockerReference(unmatchedPayload.UntrustedDockerReference); err != nil {
+ return nil, err
+ }
+ // SigstorePayloadAcceptanceRules have accepted this value.
+ return &unmatchedPayload, nil
+}
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go
index 2c08c231ea8..1d3fe0fdc99 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism.go
@@ -6,16 +6,19 @@ import (
"bytes"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"strings"
- "golang.org/x/crypto/openpgp"
+ // This code is used only to parse the data in an explicitly-untrusted
+ // code path, where cryptography is not relevant. For now, continue to
+ // use this frozen deprecated implementation. When mechanism_openpgp.go
+ // migrates to another implementation, this should migrate as well.
+ //lint:ignore SA1019 See above
+ "golang.org/x/crypto/openpgp" //nolint:staticcheck
)
// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.
// Each mechanism should eventually be closed by calling Close().
-// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to
-// eliminate ambiguities, support CA signatures and perhaps other key properties)
type SigningMechanism interface {
// Close removes resources associated with the mechanism, if any.
Close() error
@@ -34,6 +37,15 @@ type SigningMechanism interface {
UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error)
}
+// signingMechanismWithPassphrase is an internal extension of SigningMechanism.
+type signingMechanismWithPassphrase interface {
+ SigningMechanism
+
+ // Sign creates a (non-detached) signature of input using keyIdentity and passphrase.
+ // Fails with a SigningNotSupportedError if the mechanism does not support signing.
+ SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error)
+}
+
// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that.
type SigningNotSupportedError string
@@ -53,7 +65,7 @@ func NewGPGSigningMechanism() (SigningMechanism, error) {
// of these keys.
// The caller must call .Close() on the returned SigningMechanism.
func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
- return newEphemeralGPGSigningMechanism(blob)
+ return newEphemeralGPGSigningMechanism([][]byte{blob})
}
// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
@@ -70,7 +82,7 @@ func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents
if !md.IsSigned {
return nil, "", errors.New("The input is not a signature")
}
- content, err := ioutil.ReadAll(md.UnverifiedBody)
+ content, err := io.ReadAll(md.UnverifiedBody)
if err != nil {
// Coverage: An error during reading the body can happen only if
// 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
index 6ae74d430d6..2b2a7ad866b 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
@@ -5,11 +5,12 @@ package signature
import (
"bytes"
+ "errors"
"fmt"
- "io/ioutil"
"os"
- "github.com/mtrmac/gpgme"
+ "github.com/containers/image/v5/signature/internal"
+ "github.com/proglottis/gpgme"
)
// A GPG/OpenPGP signing mechanism, implemented using gpgme.
@@ -20,7 +21,7 @@ type gpgmeSigningMechanism struct {
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
// The caller must call .Close() on the returned SigningMechanism.
-func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
+func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) {
ctx, err := newGPGMEContext(optionalDir)
if err != nil {
return nil, err
@@ -32,11 +33,11 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er
}
// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
-// recognizes _only_ public keys from the supplied blob, and returns the identities
+// recognizes _only_ public keys from the supplied blobs, and returns the identities
// of these keys.
// The caller must call .Close() on the returned SigningMechanism.
-func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
- dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-")
+func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) {
+ dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-")
if err != nil {
return nil, nil, err
}
@@ -54,9 +55,13 @@ func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, e
ctx: ctx,
ephemeralDir: dir,
}
- keyIdentities, err := mech.importKeysFromBytes(blob)
- if err != nil {
- return nil, nil, err
+ keyIdentities := []string{}
+ for _, blob := range blobs {
+ ki, err := mech.importKeysFromBytes(blob)
+ if err != nil {
+ return nil, nil, err
+ }
+ keyIdentities = append(keyIdentities, ki...)
}
removeDir = false
@@ -117,9 +122,9 @@ func (m *gpgmeSigningMechanism) SupportsSigning() error {
return nil
}
-// Sign creates a (non-detached) signature of input using keyIdentity.
+// Sign creates a (non-detached) signature of input using keyIdentity and passphrase.
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
-func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+func (m *gpgmeSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) {
key, err := m.ctx.GetKey(keyIdentity, true)
if err != nil {
return nil, err
@@ -133,12 +138,38 @@ func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte,
if err != nil {
return nil, err
}
+
+ if passphrase != "" {
+ // Callback to write the passphrase to the specified file descriptor.
+ callback := func(uidHint string, prevWasBad bool, gpgmeFD *os.File) error {
+ if prevWasBad {
+ return errors.New("bad passphrase")
+ }
+ _, err := gpgmeFD.WriteString(passphrase + "\n")
+ return err
+ }
+ if err := m.ctx.SetCallback(callback); err != nil {
+ return nil, fmt.Errorf("setting gpgme passphrase callback: %w", err)
+ }
+
+ // Loopback mode will use the callback instead of prompting the user.
+ if err := m.ctx.SetPinEntryMode(gpgme.PinEntryLoopback); err != nil {
+ return nil, fmt.Errorf("setting gpgme pinentry mode: %w", err)
+ }
+ }
+
if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil {
return nil, err
}
return sigBuffer.Bytes(), nil
}
+// Sign creates a (non-detached) signature of input using keyIdentity.
+// Fails with a SigningNotSupportedError if the mechanism does not support signing.
+func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+ return m.SignWithPassphrase(input, keyIdentity, "")
+}
+
// Verify parses unverifiedSignature and returns the content and the signer's identity
func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
signedBuffer := bytes.Buffer{}
@@ -155,13 +186,13 @@ func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []b
return nil, "", err
}
if len(sigs) != 1 {
- return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))}
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected GPG signature count %d", len(sigs)))
}
sig := sigs[0]
// This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves
if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage {
// FIXME: Better error reporting eventually
- return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)}
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", sig))
}
return signedBuffer.Bytes(), sig.Fingerprint, nil
}
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
index 0a09788f989..5d6c1ac8397 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
@@ -7,14 +7,22 @@ import (
"bytes"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"os"
"path"
"strings"
"time"
+ "github.com/containers/image/v5/signature/internal"
"github.com/containers/storage/pkg/homedir"
- "golang.org/x/crypto/openpgp"
+ // This is a fallback code; the primary recommendation is to use the gpgme mechanism
+ // implementation, which is out-of-process and more appropriate for handling long-term private key material
+ // than any Go implementation.
+ // For this verify-only fallback, we haven't reviewed any of the
+ // existing alternatives to choose; so, for now, continue to
+ // use this frozen deprecated implementation.
+ //lint:ignore SA1019 See above
+ "golang.org/x/crypto/openpgp" //nolint:staticcheck
)
// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp.
@@ -24,7 +32,7 @@ type openpgpSigningMechanism struct {
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
// The caller must call .Close() on the returned SigningMechanism.
-func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
+func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) {
m := &openpgpSigningMechanism{
keyring: openpgp.EntityList{},
}
@@ -37,7 +45,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er
}
}
- pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg"))
+ pubring, err := os.ReadFile(path.Join(gpgHome, "pubring.gpg"))
if err != nil {
if !os.IsNotExist(err) {
return nil, err
@@ -55,14 +63,19 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er
// recognizes _only_ public keys from the supplied blob, and returns the identities
// of these keys.
// The caller must call .Close() on the returned SigningMechanism.
-func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
+func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) {
m := &openpgpSigningMechanism{
keyring: openpgp.EntityList{},
}
- keyIdentities, err := m.importKeysFromBytes(blob)
- if err != nil {
- return nil, nil, err
+ keyIdentities := []string{}
+ for _, blob := range blobs {
+ ki, err := m.importKeysFromBytes(blob)
+ if err != nil {
+ return nil, nil, err
+ }
+ keyIdentities = append(keyIdentities, ki...)
}
+
return m, keyIdentities, nil
}
@@ -104,10 +117,16 @@ func (m *openpgpSigningMechanism) SupportsSigning() error {
// Sign creates a (non-detached) signature of input using keyIdentity.
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
-func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+func (m *openpgpSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) {
return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag")
}
+// Sign creates a (non-detached) signature of input using keyIdentity.
+// Fails with a SigningNotSupportedError if the mechanism does not support signing.
+func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+ return m.SignWithPassphrase(input, keyIdentity, "")
+}
+
// Verify parses unverifiedSignature and returns the content and the signer's identity
func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil)
@@ -117,7 +136,7 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [
if !md.IsSigned {
return nil, "", errors.New("not signed")
}
- content, err := ioutil.ReadAll(md.UnverifiedBody)
+ content, err := io.ReadAll(md.UnverifiedBody)
if err != nil {
// Coverage: md.UnverifiedBody.Read only fails if the body is encrypted
// (and possibly also signed, but it _must_ be encrypted) and the signing
@@ -131,19 +150,19 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [
return nil, "", fmt.Errorf("signature error: %v", md.SignatureError)
}
if md.SignedBy == nil {
- return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)}
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", md.Signature))
}
if md.Signature != nil {
if md.Signature.SigLifetimeSecs != nil {
expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second)
if time.Now().After(expiry) {
- return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)}
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Signature expired on %s", expiry))
}
}
} else if md.SignatureV3 == nil {
// Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3,
// or sets md.SignatureError.
- return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"}
+ return nil, "", internal.NewInvalidSignatureError("Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set")
}
// Uppercase the fingerprint to be compatible with gpgme
diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go
index 82fbb68cb14..f8fdce2da5f 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_config.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_config.go
@@ -15,17 +15,17 @@ package signature
import (
"encoding/json"
+ "errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"regexp"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/signature/internal"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
- "github.com/pkg/errors"
)
// systemDefaultPolicyPath is the policy path used for DefaultPolicy().
@@ -33,10 +33,6 @@ import (
// -ldflags '-X github.com/containers/image/v5/signature.systemDefaultPolicyPath=$your_path'
var systemDefaultPolicyPath = builtinDefaultPolicyPath
-// builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
-// DO NOT change this, instead see systemDefaultPolicyPath above.
-const builtinDefaultPolicyPath = "/etc/containers/policy.json"
-
// userPolicyFile is the path to the per user policy path.
var userPolicyFile = filepath.FromSlash(".config/containers/policy.json")
@@ -80,13 +76,13 @@ func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) stri
// NewPolicyFromFile returns a policy configured in the specified file.
func NewPolicyFromFile(fileName string) (*Policy, error) {
- contents, err := ioutil.ReadFile(fileName)
+ contents, err := os.ReadFile(fileName)
if err != nil {
return nil, err
}
policy, err := NewPolicyFromBytes(contents)
if err != nil {
- return nil, errors.Wrapf(err, "invalid policy in %q", fileName)
+ return nil, fmt.Errorf("invalid policy in %q: %w", fileName, err)
}
return policy, nil
}
@@ -108,7 +104,7 @@ var _ json.Unmarshaler = (*Policy)(nil)
func (p *Policy) UnmarshalJSON(data []byte) error {
*p = Policy{}
transports := policyTransportsMap{}
- if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "default":
return &p.Default
@@ -139,10 +135,10 @@ func (m *policyTransportsMap) UnmarshalJSON(data []byte) error {
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
// So, use a temporary map of pointers-to-slices and convert.
tmpMap := map[string]*PolicyTransportScopes{}
- if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
// transport can be nil
transport := transports.Get(key)
- // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
+ // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
if _, ok := tmpMap[key]; ok {
return nil
}
@@ -185,8 +181,8 @@ func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error {
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
// So, use a temporary map of pointers-to-slices and convert.
tmpMap := map[string]*PolicyRequirements{}
- if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
- // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
+ // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
if _, ok := tmpMap[key]; ok {
return nil
}
@@ -247,6 +243,8 @@ func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) {
res = &prSignedBy{}
case prTypeSignedBaseLayer:
res = &prSignedBaseLayer{}
+ case prTypeSigstoreSigned:
+ res = &prSigstoreSigned{}
default:
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type))
}
@@ -273,7 +271,7 @@ var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil)
func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
*pr = prInsecureAcceptAnything{}
var tmp prInsecureAcceptAnything
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
}); err != nil {
return err
@@ -303,7 +301,7 @@ var _ json.Unmarshaler = (*prReject)(nil)
func (pr *prReject) UnmarshalJSON(data []byte) error {
*pr = prReject{}
var tmp prReject
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
}); err != nil {
return err
@@ -317,12 +315,22 @@ func (pr *prReject) UnmarshalJSON(data []byte) error {
}
// newPRSignedBy returns a new prSignedBy if parameters are valid.
-func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
if !keyType.IsValid() {
return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType))
}
- if len(keyPath) > 0 && len(keyData) > 0 {
- return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously")
+ keySources := 0
+ if keyPath != "" {
+ keySources++
+ }
+ if keyPaths != nil {
+ keySources++
+ }
+ if keyData != nil {
+ keySources++
+ }
+ if keySources != 1 {
+ return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths and keyData must be specified")
}
if signedIdentity == nil {
return nil, InvalidPolicyFormatError("signedIdentity not specified")
@@ -331,6 +339,7 @@ func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIden
prCommon: prCommon{Type: prTypeSignedBy},
KeyType: keyType,
KeyPath: keyPath,
+ KeyPaths: keyPaths,
KeyData: keyData,
SignedIdentity: signedIdentity,
}, nil
@@ -338,7 +347,7 @@ func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIden
// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type.
func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
- return newPRSignedBy(keyType, keyPath, nil, signedIdentity)
+ return newPRSignedBy(keyType, keyPath, nil, nil, signedIdentity)
}
// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath
@@ -346,9 +355,19 @@ func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity Poli
return newPRSignedByKeyPath(keyType, keyPath, signedIdentity)
}
+// newPRSignedByKeyPaths is NewPRSignedByKeyPaths, except it returns the private type.
+func newPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+ return newPRSignedBy(keyType, "", keyPaths, nil, signedIdentity)
+}
+
+// NewPRSignedByKeyPaths returns a new "signedBy" PolicyRequirement using KeyPaths
+func NewPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSignedByKeyPaths(keyType, keyPaths, signedIdentity)
+}
+
// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type.
func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
- return newPRSignedBy(keyType, "", keyData, signedIdentity)
+ return newPRSignedBy(keyType, "", nil, keyData, signedIdentity)
}
// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData
@@ -363,9 +382,9 @@ var _ json.Unmarshaler = (*prSignedBy)(nil)
func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
*pr = prSignedBy{}
var tmp prSignedBy
- var gotKeyPath, gotKeyData = false, false
+ var gotKeyPath, gotKeyPaths, gotKeyData = false, false, false
var signedIdentity json.RawMessage
- if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "type":
return &tmp.Type
@@ -374,6 +393,9 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
case "keyPath":
gotKeyPath = true
return &tmp.KeyPath
+ case "keyPaths":
+ gotKeyPaths = true
+ return &tmp.KeyPaths
case "keyData":
gotKeyData = true
return &tmp.KeyData
@@ -402,16 +424,16 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
var res *prSignedBy
var err error
switch {
- case gotKeyPath && gotKeyData:
- return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously")
- case gotKeyPath && !gotKeyData:
+ case gotKeyPath && !gotKeyPaths && !gotKeyData:
res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity)
- case !gotKeyPath && gotKeyData:
+ case !gotKeyPath && gotKeyPaths && !gotKeyData:
+ res, err = newPRSignedByKeyPaths(tmp.KeyType, tmp.KeyPaths, tmp.SignedIdentity)
+ case !gotKeyPath && !gotKeyPaths && gotKeyData:
res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity)
- case !gotKeyPath && !gotKeyData:
- return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified")
- default: // Coverage: This should never happen
- return errors.Errorf("Impossible keyPath/keyData presence combination!?")
+ case !gotKeyPath && !gotKeyPaths && !gotKeyData:
+ return InvalidPolicyFormatError("Exactly one of keyPath, keyPaths and keyData must be specified, none of them present")
+ default:
+ return fmt.Errorf("Exactly one of keyPath, keyPaths and keyData must be specified, more than one present")
}
if err != nil {
return err
@@ -473,7 +495,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
*pr = prSignedBaseLayer{}
var tmp prSignedBaseLayer
var baseLayerIdentity json.RawMessage
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
"baseLayerIdentity": &baseLayerIdentity,
}); err != nil {
@@ -496,6 +518,107 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
return nil
}
+// newPRSigstoreSigned returns a new prSigstoreSigned if parameters are valid.
+func newPRSigstoreSigned(keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSigstoreSigned, error) {
+ if len(keyPath) > 0 && len(keyData) > 0 {
+ return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously")
+ }
+ if signedIdentity == nil {
+ return nil, InvalidPolicyFormatError("signedIdentity not specified")
+ }
+ return &prSigstoreSigned{
+ prCommon: prCommon{Type: prTypeSigstoreSigned},
+ KeyPath: keyPath,
+ KeyData: keyData,
+ SignedIdentity: signedIdentity,
+ }, nil
+}
+
+// newPRSigstoreSignedKeyPath is NewPRSigstoreSignedKeyPath, except it returns the private type.
+func newPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (*prSigstoreSigned, error) {
+ return newPRSigstoreSigned(keyPath, nil, signedIdentity)
+}
+
+// NewPRSigstoreSignedKeyPath returns a new "sigstoreSigned" PolicyRequirement using a KeyPath
+func NewPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSigstoreSignedKeyPath(keyPath, signedIdentity)
+}
+
+// newPRSigstoreSignedKeyData is NewPRSigstoreSignedKeyData, except it returns the private type.
+func newPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (*prSigstoreSigned, error) {
+ return newPRSigstoreSigned("", keyData, signedIdentity)
+}
+
+// NewPRSigstoreSignedKeyData returns a new "sigstoreSigned" PolicyRequirement using a KeyData
+func NewPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSigstoreSignedKeyData(keyData, signedIdentity)
+}
+
+// Compile-time check that prSigstoreSigned implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prSigstoreSigned)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
+ *pr = prSigstoreSigned{}
+ var tmp prSigstoreSigned
+ var gotKeyPath, gotKeyData = false, false
+ var signedIdentity json.RawMessage
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
+ switch key {
+ case "type":
+ return &tmp.Type
+ case "keyPath":
+ gotKeyPath = true
+ return &tmp.KeyPath
+ case "keyData":
+ gotKeyData = true
+ return &tmp.KeyData
+ case "signedIdentity":
+ return &signedIdentity
+ default:
+ return nil
+ }
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeSigstoreSigned {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ if signedIdentity == nil {
+ tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
+ } else {
+ si, err := newPolicyReferenceMatchFromJSON(signedIdentity)
+ if err != nil {
+ return err
+ }
+ tmp.SignedIdentity = si
+ }
+
+ var res *prSigstoreSigned
+ var err error
+ switch {
+ case gotKeyPath && gotKeyData:
+ return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously")
+ case gotKeyPath && !gotKeyData:
+ res, err = newPRSigstoreSignedKeyPath(tmp.KeyPath, tmp.SignedIdentity)
+ case !gotKeyPath && gotKeyData:
+ res, err = newPRSigstoreSignedKeyData(tmp.KeyData, tmp.SignedIdentity)
+ case !gotKeyPath && !gotKeyData:
+ return InvalidPolicyFormatError("At least one of keyPath and keyData must be specified")
+ default: // Coverage: This should never happen
+ return fmt.Errorf("Impossible keyPath/keyData presence combination!?")
+ }
+ if err != nil {
+ // Coverage: This cannot currently happen, creating a prSigstoreSigned only fails
+ // if signedIdentity is nil, which we replace with a default above.
+ return err
+ }
+ *pr = *res
+
+ return nil
+}
+
// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation.
func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) {
var typeField prmCommon
@@ -542,7 +665,7 @@ var _ json.Unmarshaler = (*prmMatchExact)(nil)
func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
*prm = prmMatchExact{}
var tmp prmMatchExact
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
}); err != nil {
return err
@@ -572,7 +695,7 @@ var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil)
func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
*prm = prmMatchRepoDigestOrExact{}
var tmp prmMatchRepoDigestOrExact
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
}); err != nil {
return err
@@ -602,7 +725,7 @@ var _ json.Unmarshaler = (*prmMatchRepository)(nil)
func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
*prm = prmMatchRepository{}
var tmp prmMatchRepository
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
}); err != nil {
return err
@@ -642,7 +765,7 @@ var _ json.Unmarshaler = (*prmExactReference)(nil)
func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
*prm = prmExactReference{}
var tmp prmExactReference
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
"dockerReference": &tmp.DockerReference,
}); err != nil {
@@ -684,7 +807,7 @@ var _ json.Unmarshaler = (*prmExactRepository)(nil)
func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
*prm = prmExactRepository{}
var tmp prmExactRepository
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
"dockerRepository": &tmp.DockerRepository,
}); err != nil {
@@ -756,7 +879,7 @@ var _ json.Unmarshaler = (*prmRemapIdentity)(nil)
func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error {
*prm = prmRemapIdentity{}
var tmp prmRemapIdentity
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
"prefix": &tmp.Prefix,
"signedPrefix": &tmp.SignedPrefix,
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go
index edcbf52f4da..533a997b1cb 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go
@@ -7,9 +7,11 @@ package signature
import (
"context"
+ "fmt"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/unparsedimage"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -55,14 +57,14 @@ type PolicyRequirement interface {
// a container based on this image; use IsRunningImageAllowed instead.
// - Just because a signature is accepted does not automatically mean the contents of the
// signature are authorized to run code as root, or to affect system or cluster configuration.
- isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error)
+ isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error)
// isRunningImageAllowed returns true if the requirement allows running an image.
// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation
// succeeded but the result was rejection.
// WARNING: This validates signatures and the manifest, but does not download or validate the
// layers. Users must validate that the layers match their expected digests.
- isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error)
+ isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error)
}
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
@@ -71,7 +73,7 @@ type PolicyReferenceMatch interface {
// matchesDockerReference decides whether a specific image identity is accepted for an image
// (or, usually, for the image's Reference().DockerReference()). Note that
// image.Reference().DockerReference() may be nil.
- matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool
+ matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool
}
// PolicyContext encapsulates a policy and possible cached state
@@ -95,7 +97,7 @@ const (
// changeContextState changes pc.state, or fails if the state is unexpected
func (pc *PolicyContext) changeState(expected, new policyContextState) error {
if pc.state != expected {
- return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
+ return fmt.Errorf(`Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
}
pc.state = new
return nil
@@ -170,11 +172,11 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic
// but it does not necessarily mean that the contents of the signature are
// consistent with local policy.
// For example:
-// - Do not use a an existence of an accepted signature to determine whether to run
-// a container based on this image; use IsRunningImageAllowed instead.
-// - Just because a signature is accepted does not automatically mean the contents of the
-// signature are authorized to run code as root, or to affect system or cluster configuration.
-func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, image types.UnparsedImage) (sigs []*Signature, finalErr error) {
+// - Do not use a an existence of an accepted signature to determine whether to run
+// a container based on this image; use IsRunningImageAllowed instead.
+// - Just because a signature is accepted does not automatically mean the contents of the
+// signature are authorized to run code as root, or to affect system or cluster configuration.
+func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, publicImage types.UnparsedImage) (sigs []*Signature, finalErr error) {
if err := pc.changeState(pcReady, pcInUse); err != nil {
return nil, err
}
@@ -185,11 +187,12 @@ func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, im
}
}()
+ image := unparsedimage.FromPublic(publicImage)
+
logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference()))
reqs := pc.requirementsForImageRef(image.Reference())
- // FIXME: rename Signatures to UnverifiedSignatures
- // FIXME: pass context.Context
+ // FIXME: Use image.UntrustedSignatures, use that to improve error messages (needs tests!)
unverifiedSignatures, err := image.Signatures(ctx)
if err != nil {
return nil, err
@@ -255,7 +258,7 @@ func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, im
// succeeded but the result was rejection.
// WARNING: This validates signatures and the manifest, but does not download or validate the
// layers. Users must validate that the layers match their expected digests.
-func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (res bool, finalErr error) {
+func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, publicImage types.UnparsedImage) (res bool, finalErr error) {
if err := pc.changeState(pcReady, pcInUse); err != nil {
return false, err
}
@@ -266,6 +269,8 @@ func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types.
}
}()
+ image := unparsedimage.FromPublic(publicImage)
+
logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference()))
reqs := pc.requirementsForImageRef(image.Reference())
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go
index 55cdd3054f1..a8bc0130107 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go
@@ -5,15 +5,15 @@ package signature
import (
"context"
- "github.com/containers/image/v5/types"
+ "github.com/containers/image/v5/internal/private"
"github.com/sirupsen/logrus"
)
-func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
return sarUnknown, nil, nil
}
-func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) {
+func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
// FIXME? Reject this at policy parsing time already?
logrus.Errorf("signedBaseLayer not implemented yet!")
return false, PolicyRequirementError("signedBaseLayer not implemented yet!")
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
index 26cca4759e0..ef98b8b83f9 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
@@ -4,44 +4,59 @@ package signature
import (
"context"
+ "errors"
"fmt"
- "io/ioutil"
+ "os"
"strings"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
-func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
switch pr.KeyType {
case SBKeyTypeGPGKeys:
case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
// FIXME? Reject this at policy parsing time already?
- return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType))
+ return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value "%s"`, string(pr.KeyType))
default:
// This should never happen, newPRSignedBy ensures KeyType.IsValid()
- return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType))
+ return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value "%s"`, string(pr.KeyType))
}
- if pr.KeyPath != "" && pr.KeyData != nil {
- return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`)
- }
// FIXME: move this to per-context initialization
- var data []byte
- if pr.KeyData != nil {
- data = pr.KeyData
- } else {
- d, err := ioutil.ReadFile(pr.KeyPath)
+ var data [][]byte
+ keySources := 0
+ if pr.KeyPath != "" {
+ keySources++
+ d, err := os.ReadFile(pr.KeyPath)
if err != nil {
return sarRejected, nil, err
}
- data = d
+ data = [][]byte{d}
+ }
+ if pr.KeyPaths != nil {
+ keySources++
+ data = [][]byte{}
+ for _, path := range pr.KeyPaths {
+ d, err := os.ReadFile(path)
+ if err != nil {
+ return sarRejected, nil, err
+ }
+ data = append(data, d)
+ }
+ }
+ if pr.KeyData != nil {
+ keySources++
+ data = [][]byte{pr.KeyData}
+ }
+ if keySources != 1 {
+ return sarRejected, nil, errors.New(`Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified`)
}
// FIXME: move this to per-context initialization
- mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data)
+ mech, trustedIdentities, err := newEphemeralGPGSigningMechanism(data)
if err != nil {
return sarRejected, nil, err
}
@@ -89,8 +104,9 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types
return sarAccepted, signature, nil
}
-func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) {
- // FIXME: pass context.Context
+func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
+ // FIXME: Use image.UntrustedSignatures, use that to improve error messages
+ // (needs tests!)
sigs, err := image.Signatures(ctx)
if err != nil {
return false, err
@@ -108,7 +124,7 @@ func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.Unp
// Huh?! This should not happen at all; treat it as any other invalid value.
fallthrough
default:
- reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
+ reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
}
rejections = append(rejections, reason)
}
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go
new file mode 100644
index 00000000000..ccf1d80ac8d
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go
@@ -0,0 +1,140 @@
+// Policy evaluation for prSigstoreSigned.
+
+package signature
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature/internal"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+)
+
+func (pr *prSigstoreSigned) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ // We don’t know of a single user of this API, and we might return unexpected values in Signature.
+ // For now, just punt.
+ return sarRejected, nil, errors.New("isSignatureAuthorAccepted is not implemented for sigstore")
+}
+
+func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image private.UnparsedImage, sig signature.Sigstore) (signatureAcceptanceResult, error) {
+ if pr.KeyPath != "" && pr.KeyData != nil {
+ return sarRejected, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`)
+ }
+ // FIXME: move this to per-context initialization
+ var publicKeyPEM []byte
+ if pr.KeyData != nil {
+ publicKeyPEM = pr.KeyData
+ } else {
+ d, err := os.ReadFile(pr.KeyPath)
+ if err != nil {
+ return sarRejected, err
+ }
+ publicKeyPEM = d
+ }
+
+ publicKey, err := cryptoutils.UnmarshalPEMToPublicKey(publicKeyPEM)
+ if err != nil {
+ return sarRejected, fmt.Errorf("parsing public key: %w", err)
+ }
+
+ untrustedAnnotations := sig.UntrustedAnnotations()
+ untrustedBase64Signature, ok := untrustedAnnotations[signature.SigstoreSignatureAnnotationKey]
+ if !ok {
+ return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSignatureAnnotationKey)
+ }
+
+ signature, err := internal.VerifySigstorePayload(publicKey, sig.UntrustedPayload(), untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
+ ValidateSignedDockerReference: func(ref string) error {
+ if !pr.SignedIdentity.matchesDockerReference(image, ref) {
+ return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
+ }
+ return nil
+ },
+ ValidateSignedDockerManifestDigest: func(digest digest.Digest) error {
+ m, _, err := image.Manifest(ctx)
+ if err != nil {
+ return err
+ }
+ digestMatches, err := manifest.MatchesDigest(m, digest)
+ if err != nil {
+ return err
+ }
+ if !digestMatches {
+ return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest))
+ }
+ return nil
+ },
+ })
+ if err != nil {
+ return sarRejected, err
+ }
+ if signature == nil { // A paranoid sanity check that VerifySigstorePayload has returned consistent values
+ return sarRejected, errors.New("internal error: VerifySigstorePayload succeeded but returned no data") // Coverage: This should never happen.
+ }
+
+ return sarAccepted, nil
+}
+
+func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
+ sigs, err := image.UntrustedSignatures(ctx)
+ if err != nil {
+ return false, err
+ }
+ var rejections []error
+ foundNonSigstoreSignatures := 0
+ foundSigstoreNonAttachments := 0
+ for _, s := range sigs {
+ sigstoreSig, ok := s.(signature.Sigstore)
+ if !ok {
+ foundNonSigstoreSignatures++
+ continue
+ }
+ if sigstoreSig.UntrustedMIMEType() != signature.SigstoreSignatureMIMEType {
+ foundSigstoreNonAttachments++
+ continue
+ }
+
+ var reason error
+ switch res, err := pr.isSignatureAccepted(ctx, image, sigstoreSig); res {
+ case sarAccepted:
+ // One accepted signature is enough.
+ return true, nil
+ case sarRejected:
+ reason = err
+ case sarUnknown:
+ // Huh?! This should not happen at all; treat it as any other invalid value.
+ fallthrough
+ default:
+ reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
+ }
+ rejections = append(rejections, reason)
+ }
+ var summary error
+ switch len(rejections) {
+ case 0:
+ if foundNonSigstoreSignatures == 0 && foundSigstoreNonAttachments == 0 {
+ // A nice message for the most common case.
+ summary = PolicyRequirementError("A signature was required, but no signature exists")
+ } else {
+ summary = PolicyRequirementError(fmt.Sprintf("A signature was required, but no signature exists (%d non-sigstore signatures, %d sigstore non-signature attachments)",
+ foundNonSigstoreSignatures, foundSigstoreNonAttachments))
+ }
+ case 1:
+ summary = rejections[0]
+ default:
+ var msgs []string
+ for _, e := range rejections {
+ msgs = append(msgs, e.Error())
+ }
+ summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s",
+ strings.Join(msgs, "; ")))
+ }
+ return false, summary
+}
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go
index f949088b5f6..031866f0dc7 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go
@@ -6,24 +6,24 @@ import (
"context"
"fmt"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/transports"
- "github.com/containers/image/v5/types"
)
-func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
// prInsecureAcceptAnything semantics: Every image is allowed to run,
// but this does not consider the signature as verified.
return sarUnknown, nil, nil
}
-func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) {
+func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
return true, nil
}
-func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference())))
}
-func (pr *prReject) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) {
+func (pr *prReject) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference())))
}
diff --git a/vendor/github.com/containers/image/v5/signature/policy_paths_common.go b/vendor/github.com/containers/image/v5/signature/policy_paths_common.go
new file mode 100644
index 00000000000..290fc24599a
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/policy_paths_common.go
@@ -0,0 +1,8 @@
+//go:build !freebsd
+// +build !freebsd
+
+package signature
+
+// builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
+// DO NOT change this, instead see systemDefaultPolicyPath above.
+const builtinDefaultPolicyPath = "/etc/containers/policy.json"
diff --git a/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go b/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go
new file mode 100644
index 00000000000..702b7171fb6
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go
@@ -0,0 +1,8 @@
+//go:build freebsd
+// +build freebsd
+
+package signature
+
+// builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
+// DO NOT change this, instead see systemDefaultPolicyPath above.
+const builtinDefaultPolicyPath = "/usr/local/etc/containers/policy.json"
diff --git a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go
index 064866cf6c3..4e70c0f2e39 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go
@@ -7,12 +7,12 @@ import (
"strings"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/transports"
- "github.com/containers/image/v5/types"
)
// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images.
-func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) {
+func parseImageAndDockerReference(image private.UnparsedImage, s2 string) (reference.Named, reference.Named, error) {
r1 := image.Reference().DockerReference()
if r1 == nil {
return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity",
@@ -25,7 +25,7 @@ func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (referen
return r1, r2, nil
}
-func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmMatchExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
@@ -56,7 +56,7 @@ func matchRepoDigestOrExactReferenceValues(intended, signature reference.Named)
return false
}
}
-func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
@@ -64,7 +64,7 @@ func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.Unparse
return matchRepoDigestOrExactReferenceValues(intended, signature)
}
-func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmMatchRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
@@ -85,7 +85,7 @@ func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, err
return r1, r2, nil
}
-func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmExactReference) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference)
if err != nil {
return false
@@ -97,7 +97,7 @@ func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage,
return signature.String() == intended.String()
}
-func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmExactRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference)
if err != nil {
return false
@@ -141,7 +141,7 @@ func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (referenc
return newParsedRef, nil
}
-func (prm *prmRemapIdentity) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+func (prm *prmRemapIdentity) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
diff --git a/vendor/github.com/containers/image/v5/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go
index c6819929bd2..9e837452a7c 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_types.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_types.go
@@ -46,6 +46,7 @@ const (
prTypeReject prTypeIdentifier = "reject"
prTypeSignedBy prTypeIdentifier = "signedBy"
prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer"
+ prTypeSigstoreSigned prTypeIdentifier = "sigstoreSigned"
)
// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything:
@@ -66,18 +67,20 @@ type prReject struct {
type prSignedBy struct {
prCommon
- // KeyType specifies what kind of key reference KeyPath/KeyData is.
+ // KeyType specifies what kind of key reference KeyPath/KeyPaths/KeyData is.
// Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs”
// FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only
KeyType sbKeyType `json:"keyType"`
- // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified.
+ // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
KeyPath string `json:"keyPath,omitempty"`
- // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified.
+ // KeyPaths if a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
+ KeyPaths []string `json:"keyPaths,omitempty"`
+ // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath, KeyPaths and KeyData must be specified.
KeyData []byte `json:"keyData,omitempty"`
// SignedIdentity specifies what image identity the signature must be claiming about the image.
- // Defaults to "match-exact" if not specified.
+ // Defaults to "matchRepoDigestOrExact" if not specified.
SignedIdentity PolicyReferenceMatch `json:"signedIdentity"`
}
@@ -104,6 +107,24 @@ type prSignedBaseLayer struct {
BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"`
}
+// prSigstoreSigned is a PolicyRequirement with type = prTypeSigstoreSigned: the image is signed by trusted keys for a specified identity
+type prSigstoreSigned struct {
+ prCommon
+
+ // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath and KeyData must be specified.
+ KeyPath string `json:"keyPath,omitempty"`
+ // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath and KeyData must be specified.
+ KeyData []byte `json:"keyData,omitempty"`
+ // FIXME: Multiple public keys?
+
+ // FIXME: Support fulcio+rekor as an alternative.
+
+ // SignedIdentity specifies what image identity the signature must be claiming about the image.
+ // Defaults to "matchRepoDigestOrExact" if not specified.
+ // Note that /usr/bin/cosign interoperability might require using repo-only matching.
+ SignedIdentity PolicyReferenceMatch `json:"signedIdentity"`
+}
+
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
// The type is public, but its implementation is private.
diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/copied.go b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go
new file mode 100644
index 00000000000..dbc03ec0a05
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go
@@ -0,0 +1,70 @@
+package sigstore
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+
+ "github.com/sigstore/sigstore/pkg/signature"
+ "github.com/theupdateframework/go-tuf/encrypted"
+)
+
+// The following code was copied from github.com/sigstore.
+// FIXME: Eliminate that duplication.
+
+// Copyright 2021 The Sigstore Authors.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+const (
+ // from sigstore/cosign/pkg/cosign.sigstorePrivateKeyPemType
+ sigstorePrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY"
+)
+
+// from sigstore/cosign/pkg/cosign.loadPrivateKey
+// FIXME: Do we need all of these key formats, and all of those
+func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) {
+ // Decrypt first
+ p, _ := pem.Decode(key)
+ if p == nil {
+ return nil, errors.New("invalid pem block")
+ }
+ if p.Type != sigstorePrivateKeyPemType {
+ return nil, fmt.Errorf("unsupported pem type: %s", p.Type)
+ }
+
+ x509Encoded, err := encrypted.Decrypt(p.Bytes, pass)
+ if err != nil {
+ return nil, fmt.Errorf("decrypt: %w", err)
+ }
+
+ pk, err := x509.ParsePKCS8PrivateKey(x509Encoded)
+ if err != nil {
+ return nil, fmt.Errorf("parsing private key: %w", err)
+ }
+ switch pk := pk.(type) {
+ case *rsa.PrivateKey:
+ return signature.LoadRSAPKCS1v15SignerVerifier(pk, crypto.SHA256)
+ case *ecdsa.PrivateKey:
+ return signature.LoadECDSASignerVerifier(pk, crypto.SHA256)
+ case ed25519.PrivateKey:
+ return signature.LoadED25519SignerVerifier(pk)
+ default:
+ return nil, errors.New("unsupported key type")
+ }
+}
diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/sign.go b/vendor/github.com/containers/image/v5/signature/sigstore/sign.go
new file mode 100644
index 00000000000..daa6ab387cf
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/signature/sigstore/sign.go
@@ -0,0 +1,65 @@
+package sigstore
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "os"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature/internal"
+ sigstoreSignature "github.com/sigstore/sigstore/pkg/signature"
+)
+
+// SignDockerManifestWithPrivateKeyFileUnstable returns a signature for manifest as the specified dockerReference,
+// using a private key and an optional passphrase.
+//
+// Yes, this returns an internal type, and should currently not be used outside of c/image.
+// There is NO COMITTMENT TO STABLE API.
+func SignDockerManifestWithPrivateKeyFileUnstable(m []byte, dockerReference reference.Named, privateKeyFile string, passphrase []byte) (signature.Sigstore, error) {
+ privateKeyPEM, err := os.ReadFile(privateKeyFile)
+ if err != nil {
+ return signature.Sigstore{}, fmt.Errorf("reading private key from %s: %w", privateKeyFile, err)
+ }
+ signer, err := loadPrivateKey(privateKeyPEM, passphrase)
+ if err != nil {
+ return signature.Sigstore{}, fmt.Errorf("initializing private key: %w", err)
+ }
+
+ return signDockerManifest(m, dockerReference, signer)
+}
+
+func signDockerManifest(m []byte, dockerReference reference.Named, signer sigstoreSignature.Signer) (signature.Sigstore, error) {
+ if reference.IsNameOnly(dockerReference) {
+ return signature.Sigstore{}, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String())
+ }
+ manifestDigest, err := manifest.Digest(m)
+ if err != nil {
+ return signature.Sigstore{}, err
+ }
+ // sigstore/cosign completely ignores dockerReference for actual policy decisions.
+ // They record the repo (but NOT THE TAG) in the value; without the tag we can’t detect version rollbacks.
+ // So, just do what simple signing does, and cosign won’t mind.
+ payloadData := internal.NewUntrustedSigstorePayload(manifestDigest, dockerReference.String())
+ payloadBytes, err := json.Marshal(payloadData)
+ if err != nil {
+ return signature.Sigstore{}, err
+ }
+
+ // github.com/sigstore/cosign/internal/pkg/cosign.payloadSigner uses signatureoptions.WithContext(),
+ // which seems to be not used by anything. So we don’t bother.
+ signatureBytes, err := signer.SignMessage(bytes.NewReader(payloadBytes))
+ if err != nil {
+ return signature.Sigstore{}, fmt.Errorf("creating signature: %w", err)
+ }
+ base64Signature := base64.StdEncoding.EncodeToString(signatureBytes)
+
+ return signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType,
+ payloadBytes,
+ map[string]string{
+ signature.SigstoreSignatureAnnotationKey: base64Signature,
+ }), nil
+}
diff --git a/vendor/github.com/containers/image/v5/signature/signature.go b/vendor/github.com/containers/image/v5/signature/simple.go
similarity index 86%
rename from vendor/github.com/containers/image/v5/signature/signature.go
rename to vendor/github.com/containers/image/v5/signature/simple.go
index 09f4f85e0a8..1ca571e5aa5 100644
--- a/vendor/github.com/containers/image/v5/signature/signature.go
+++ b/vendor/github.com/containers/image/v5/signature/simple.go
@@ -6,12 +6,13 @@ package signature
import (
"encoding/json"
+ "errors"
"fmt"
"time"
+ "github.com/containers/image/v5/signature/internal"
"github.com/containers/image/v5/version"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
const (
@@ -19,13 +20,7 @@ const (
)
// InvalidSignatureError is returned when parsing an invalid signature.
-type InvalidSignatureError struct {
- msg string
-}
-
-func (err InvalidSignatureError) Error() string {
- return err.msg
-}
+type InvalidSignatureError = internal.InvalidSignatureError
// Signature is a parsed content of a signature.
// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below.
@@ -111,18 +106,18 @@ var _ json.Unmarshaler = (*untrustedSignature)(nil)
func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
err := s.strictUnmarshalJSON(data)
if err != nil {
- if formatErr, ok := err.(jsonFormatError); ok {
- err = InvalidSignatureError{msg: formatErr.Error()}
+ if formatErr, ok := err.(internal.JSONFormatError); ok {
+ err = internal.NewInvalidSignatureError(formatErr.Error())
}
}
return err
}
-// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type.
-// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller.
+// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal.JSONFormatError error type.
+// Splitting it into a separate function allows us to do the internal.JSONFormatError → InvalidSignatureError in a single place, the caller.
func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
var critical, optional json.RawMessage
- if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"critical": &critical,
"optional": &optional,
}); err != nil {
@@ -132,7 +127,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
var creatorID string
var timestamp float64
var gotCreatorID, gotTimestamp = false, false
- if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} {
+ if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) interface{} {
switch key {
case "creator":
gotCreatorID = true
@@ -153,14 +148,14 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
if gotTimestamp {
intTimestamp := int64(timestamp)
if float64(intTimestamp) != timestamp {
- return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"}
+ return internal.NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
}
s.UntrustedTimestamp = &intTimestamp
}
var t string
var image, identity json.RawMessage
- if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
"type": &t,
"image": &image,
"identity": &identity,
@@ -168,18 +163,18 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
return err
}
if t != signatureType {
- return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)}
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t))
}
var digestString string
- if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
"docker-manifest-digest": &digestString,
}); err != nil {
return err
}
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
- return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
+ return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
"docker-reference": &s.UntrustedDockerReference,
})
}
@@ -190,12 +185,20 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
// of the system just because it is a private key — actually the presence of a private key
// on the system increases the likelihood of an a successful attack on that private key
// on that particular system.)
-func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) {
+func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string, passphrase string) ([]byte, error) {
json, err := json.Marshal(s)
if err != nil {
return nil, err
}
+ if newMech, ok := mech.(signingMechanismWithPassphrase); ok {
+ return newMech.SignWithPassphrase(json, keyIdentity, passphrase)
+ }
+
+ if passphrase != "" {
+ return nil, errors.New("signing mechanism does not support passphrases")
+ }
+
return mech.Sign(json, keyIdentity)
}
@@ -223,7 +226,7 @@ func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte
var unmatchedSignature untrustedSignature
if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
- return nil, InvalidSignatureError{msg: err.Error()}
+ return nil, internal.NewInvalidSignatureError(err.Error())
}
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil {
return nil, err
@@ -261,7 +264,7 @@ func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []
}
var untrustedDecodedContents untrustedSignature
if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil {
- return nil, InvalidSignatureError{msg: err.Error()}
+ return nil, internal.NewInvalidSignatureError(err.Error())
}
var timestamp *time.Time // = nil
diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go
new file mode 100644
index 00000000000..ae3bfa8fa4c
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go
@@ -0,0 +1,924 @@
+//go:build !containers_image_storage_stub
+// +build !containers_image_storage_stub
+
+package storage
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ graphdriver "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chunked"
+ "github.com/containers/storage/pkg/ioutils"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // ErrBlobDigestMismatch could potentially be returned when PutBlob() is given a blob
+ // with a digest-based name that doesn't match its contents.
+ // Deprecated: PutBlob() doesn't do this any more (it just accepts the caller’s value),
+ // and there is no known user of this error.
+ ErrBlobDigestMismatch = errors.New("blob digest mismatch")
+ // ErrBlobSizeMismatch is returned when PutBlob() is given a blob
+ // with an expected size that doesn't match the reader.
+ ErrBlobSizeMismatch = errors.New("blob size mismatch")
+)
+
+type storageImageDestination struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.ImplementsPutBlobPartial
+ stubs.AlwaysSupportsSignatures
+
+ imageRef storageReference
+ directory string // Temporary directory where we store blobs until Commit() time
+ nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
+ manifest []byte // Manifest contents, temporary
+ manifestDigest digest.Digest // Valid if len(manifest) != 0
+ signatures []byte // Signature contents, temporary
+ signatureses map[digest.Digest][]byte // Instance signature contents, temporary
+ SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
+ SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
+
+ // A storage destination may be used concurrently. Accesses are
+ // serialized via a mutex. Please refer to the individual comments
+ // below for details.
+ lock sync.Mutex
+ // Mapping from layer (by index) to the associated ID in the storage.
+ // It's protected *implicitly* since `commitLayer()`, at any given
+ // time, can only be executed by *one* goroutine. Please refer to
+ // `queueOrCommit()` for further details on how the single-caller
+ // guarantee is implemented.
+ indexToStorageID map[int]*string
+ // All accesses to below data are protected by `lock` which is made
+ // *explicit* in the code.
+ blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
+ fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
+ filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
+ currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
+ indexToPulledLayerInfo map[int]*manifest.LayerInfo // Mapping from layer (by index) to pulled down blob
+ blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
+ diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
+}
+
+// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
+// it's time to Commit() the image
+func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
+ directory, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage")
+ if err != nil {
+ return nil, fmt.Errorf("creating a temporary directory: %w", err)
+ }
+ dest := &storageImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{
+ imgspecv1.MediaTypeImageManifest,
+ manifest.DockerV2Schema2MediaType,
+ manifest.DockerV2Schema1SignedMediaType,
+ manifest.DockerV2Schema1MediaType,
+ },
+ // We ultimately have to decompress layers to populate trees on disk
+ // and need to explicitly ask for it here, so that the layers' MIME
+ // types can be set accordingly.
+ DesiredLayerCompression: types.PreserveOriginal,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: true,
+ IgnoresEmbeddedDockerReference: true, // Yes, we want the unmodified manifest
+ HasThreadSafePutBlob: true,
+ }),
+
+ imageRef: imageRef,
+ directory: directory,
+ signatureses: make(map[digest.Digest][]byte),
+ blobDiffIDs: make(map[digest.Digest]digest.Digest),
+ blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
+ fileSizes: make(map[digest.Digest]int64),
+ filenames: make(map[digest.Digest]string),
+ SignatureSizes: []int{},
+ SignaturesSizes: make(map[digest.Digest][]int),
+ indexToStorageID: make(map[int]*string),
+ indexToPulledLayerInfo: make(map[int]*manifest.LayerInfo),
+ diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
+ }
+ dest.Compat = impl.AddCompat(dest)
+ return dest, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (s *storageImageDestination) Reference() types.ImageReference {
+ return s.imageRef
+}
+
+// Close cleans up the temporary directory and additional layer store handlers.
+func (s *storageImageDestination) Close() error {
+ for _, al := range s.blobAdditionalLayer {
+ al.Release()
+ }
+ for _, v := range s.diffOutputs {
+ if v.Target != "" {
+ _ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target)
+ }
+ }
+ return os.RemoveAll(s.directory)
+}
+
+func (s *storageImageDestination) computeNextBlobCacheFile() string {
+ return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
+ info, err := s.putBlobToPendingFile(ctx, stream, blobinfo, &options)
+ if err != nil {
+ return info, err
+ }
+
+ if options.IsConfig || options.LayerIndex == nil {
+ return info, nil
+ }
+
+ return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
+}
+
+// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
+// The caller must arrange the blob to be eventually committed using s.commitLayer().
+func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) {
+ // Stores a layer or data blob in our temporary directory, checking that any information
+ // in the blobinfo matches the incoming data.
+ errorBlobInfo := types.BlobInfo{
+ Digest: "",
+ Size: -1,
+ }
+ if blobinfo.Digest != "" {
+ if err := blobinfo.Digest.Validate(); err != nil {
+ return errorBlobInfo, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err)
+ }
+ }
+
+ // Set up to digest the blob if necessary, and count its size while saving it to a file.
+ filename := s.computeNextBlobCacheFile()
+ file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
+ if err != nil {
+ return errorBlobInfo, fmt.Errorf("creating temporary file %q: %w", filename, err)
+ }
+ defer file.Close()
+ counter := ioutils.NewWriteCounter(file)
+ stream = io.TeeReader(stream, counter)
+ digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
+ decompressed, err := archive.DecompressStream(stream)
+ if err != nil {
+ return errorBlobInfo, fmt.Errorf("setting up to decompress blob: %w", err)
+ }
+
+ diffID := digest.Canonical.Digester()
+ // Copy the data to the file.
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ _, err = io.Copy(diffID.Hash(), decompressed)
+ decompressed.Close()
+ if err != nil {
+ return errorBlobInfo, fmt.Errorf("storing blob to file %q: %w", filename, err)
+ }
+
+ // Determine blob properties, and fail if information that we were given about the blob
+ // is known to be incorrect.
+ blobDigest := digester.Digest()
+ blobSize := blobinfo.Size
+ if blobSize < 0 {
+ blobSize = counter.Count
+ } else if blobinfo.Size != counter.Count {
+ return errorBlobInfo, ErrBlobSizeMismatch
+ }
+
+ // Record information about the blob.
+ s.lock.Lock()
+ s.blobDiffIDs[blobDigest] = diffID.Digest()
+ s.fileSizes[blobDigest] = counter.Count
+ s.filenames[blobDigest] = filename
+ s.lock.Unlock()
+ // This is safe because we have just computed diffID, and blobDigest was either computed
+ // by us, or validated by the caller (usually copy.digestingReader).
+ options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
+ return types.BlobInfo{
+ Digest: blobDigest,
+ Size: blobSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+}
+
+type zstdFetcher struct {
+ chunkAccessor private.BlobChunkAccessor
+ ctx context.Context
+ blobInfo types.BlobInfo
+}
+
+// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt.
+func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ var newChunks []private.ImageSourceChunk
+ for _, v := range chunks {
+ i := private.ImageSourceChunk{
+ Offset: v.Offset,
+ Length: v.Length,
+ }
+ newChunks = append(newChunks, i)
+ }
+ rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks)
+ if _, ok := err.(private.BadPartialRequestError); ok {
+ err = chunked.ErrBadRequest{}
+ }
+ return rc, errs, err
+
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
+ fetcher := zstdFetcher{
+ chunkAccessor: chunkAccessor,
+ ctx: ctx,
+ blobInfo: srcInfo,
+ }
+
+ differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher)
+ if err != nil {
+ return srcInfo, err
+ }
+
+ out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ)
+ if err != nil {
+ return srcInfo, err
+ }
+
+ blobDigest := srcInfo.Digest
+
+ s.lock.Lock()
+ s.blobDiffIDs[blobDigest] = blobDigest
+ s.fileSizes[blobDigest] = 0
+ s.filenames[blobDigest] = ""
+ s.diffOutputs[blobDigest] = out
+ s.lock.Unlock()
+
+ return srcInfo, nil
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+// reflected in the manifest that will be written.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
+ reused, info, err := s.tryReusingBlobAsPending(ctx, blobinfo, &options)
+ if err != nil || !reused || options.LayerIndex == nil {
+ return reused, info, err
+ }
+
+ return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
+}
+
+// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata.
+// The caller must arrange the blob to be eventually committed using s.commitLayer().
+func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
+ // lock the entire method as it executes fairly quickly
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ if options.SrcRef != nil {
+ // Check if we have the layer in the underlying additional layer store.
+ aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String())
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobinfo.Digest, err)
+ } else if err == nil {
+ // Record the uncompressed value so that we can use it to calculate layer IDs.
+ s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest()
+ s.blobAdditionalLayer[blobinfo.Digest] = aLayer
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: aLayer.CompressedSize(),
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+ }
+
+ if blobinfo.Digest == "" {
+ return false, types.BlobInfo{}, errors.New(`Can not check for a blob with unknown digest`)
+ }
+ if err := blobinfo.Digest.Validate(); err != nil {
+ return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
+ }
+
+ // Check if we've already cached it in a file.
+ if size, ok := s.fileSizes[blobinfo.Digest]; ok {
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: size,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+
+ // Check if we have a wasn't-compressed layer in storage that's based on that blob.
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, blobinfo.Digest, err)
+ }
+ if len(layers) > 0 {
+ // Save this for completeness.
+ s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: layers[0].UncompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+
+ // Check if we have a was-compressed layer in storage that's based on that blob.
+ layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, blobinfo.Digest, err)
+ }
+ if len(layers) > 0 {
+ // Record the uncompressed value so that we can use it to calculate layer IDs.
+ s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: layers[0].CompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+
+ // Does the blob correspond to a known DiffID which we already have available?
+ // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
+ // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size.
+ if options.CanSubstitute || blobinfo.Size != -1 {
+ if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
+ }
+ if len(layers) > 0 {
+ if blobinfo.Size != -1 {
+ s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
+ return true, blobinfo, nil
+ }
+ if !options.CanSubstitute {
+ return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo)
+ }
+ s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
+ return true, types.BlobInfo{
+ Digest: uncompressedDigest,
+ Size: layers[0].UncompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+ }
+ }
+
+ // Nope, we don't have it.
+ return false, types.BlobInfo{}, nil
+}
+
+// computeID computes a recommended image ID based on information we have so far. If
+// the manifest is not of a type that we recognize, we return an empty value, indicating
+// that since we don't have a recommendation, a random ID should be used if one needs
+// to be allocated.
+func (s *storageImageDestination) computeID(m manifest.Manifest) string {
+ // Build the diffID list. We need the decompressed sums that we've been calculating to
+ // fill in the DiffIDs. It's expected (but not enforced by us) that the number of
+ // diffIDs corresponds to the number of non-EmptyLayer entries in the history.
+ var diffIDs []digest.Digest
+ switch m := m.(type) {
+ case *manifest.Schema1:
+ // Build a list of the diffIDs we've generated for the non-throwaway FS layers,
+ // in reverse of the order in which they were originally listed.
+ for i, compat := range m.ExtractedV1Compatibility {
+ if compat.ThrowAway {
+ continue
+ }
+ blobSum := m.FSLayers[i].BlobSum
+ diffID, ok := s.blobDiffIDs[blobSum]
+ if !ok {
+ logrus.Infof("error looking up diffID for layer %q", blobSum.String())
+ return ""
+ }
+ diffIDs = append([]digest.Digest{diffID}, diffIDs...)
+ }
+ case *manifest.Schema2, *manifest.OCI1:
+ // We know the ID calculation for these formats doesn't actually use the diffIDs,
+ // so we don't need to populate the diffID list.
+ default:
+ return ""
+ }
+ id, err := m.ImageID(diffIDs)
+ if err != nil {
+ return ""
+ }
+ return id
+}
+
+// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
+// information out of it for Inspect().
+func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) {
+ if info.Digest == "" {
+ return nil, errors.New(`no digest supplied when reading blob`)
+ }
+ if err := info.Digest.Validate(); err != nil {
+ return nil, fmt.Errorf("invalid digest supplied when reading blob: %w", err)
+ }
+ // Assume it's a file, since we're only calling this from a place that expects to read files.
+ if filename, ok := s.filenames[info.Digest]; ok {
+ contents, err2 := os.ReadFile(filename)
+ if err2 != nil {
+ return nil, fmt.Errorf(`reading blob from file %q: %w`, filename, err2)
+ }
+ return contents, nil
+ }
+ // If it's not a file, it's a bug, because we're not expecting to be asked for a layer.
+ return nil, errors.New("blob not found")
+}
+
+// queueOrCommit queues in the specified blob to be committed to the storage.
+// If no other goroutine is already committing layers, the layer and all
+// subsequent layers (if already queued) will be committed to the storage.
+func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.BlobInfo, index int, emptyLayer bool) error {
+ // NOTE: whenever the code below is touched, make sure that all code
+ // paths unlock the lock and to unlock it exactly once.
+ //
+ // Conceptually, the code is divided in two stages:
+ //
+ // 1) Queue in work by marking the layer as ready to be committed.
+ // If at least one previous/parent layer with a lower index has
+ // not yet been committed, return early.
+ //
+ // 2) Process the queued-in work by committing the "ready" layers
+ // in sequence. Make sure that more items can be queued-in
+ // during the comparatively I/O expensive task of committing a
+ // layer.
+ //
+ // The conceptual benefit of this design is that caller can continue
+ // pulling layers after an early return. At any given time, only one
+ // caller is the "worker" routine committing layers. All other routines
+ // can continue pulling and queuing in layers.
+ s.lock.Lock()
+ s.indexToPulledLayerInfo[index] = &manifest.LayerInfo{
+ BlobInfo: blob,
+ EmptyLayer: emptyLayer,
+ }
+
+ // We're still waiting for at least one previous/parent layer to be
+ // committed, so there's nothing to do.
+ if index != s.currentIndex {
+ s.lock.Unlock()
+ return nil
+ }
+
+ for info := s.indexToPulledLayerInfo[index]; info != nil; info = s.indexToPulledLayerInfo[index] {
+ s.lock.Unlock()
+ // Note: commitLayer locks on-demand.
+ if err := s.commitLayer(ctx, *info, index); err != nil {
+ return err
+ }
+ s.lock.Lock()
+ index++
+ }
+
+ // Set the index at the very end to make sure that only one routine
+ // enters stage 2).
+ s.currentIndex = index
+ s.lock.Unlock()
+ return nil
+}
+
+// commitLayer commits the specified blob with the given index to the storage.
+// Note that the previous layer is expected to already be committed.
+//
+// Caution: this function must be called without holding `s.lock`. Callers
+// must guarantee that, at any given time, at most one goroutine may execute
+// `commitLayer()`.
+func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error {
+ // Already committed? Return early.
+ if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
+ return nil
+ }
+
+ // Start with an empty string or the previous layer ID. Note that
+ // `s.indexToStorageID` can only be accessed by *one* goroutine at any
+ // given time. Hence, we don't need to lock accesses.
+ var lastLayer string
+ if prev := s.indexToStorageID[index-1]; prev != nil {
+ lastLayer = *prev
+ }
+
+ // Carry over the previous ID for empty non-base layers.
+ if blob.EmptyLayer {
+ s.indexToStorageID[index] = &lastLayer
+ return nil
+ }
+
+ // Check if there's already a layer with the ID that we'd give to the result of applying
+ // this layer blob to its parent, if it has one, or the blob's hex value otherwise.
+ s.lock.Lock()
+ diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
+ s.lock.Unlock()
+ if !haveDiffID {
+ // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
+ // or to even check if we had it.
+ // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
+ // that relies on using a blob digest that has never been seen by the store had better call
+ // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
+ // so far we are going to accommodate that (if we should be doing that at all).
+ logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
+ // NOTE: use `TryReusingBlob` to prevent recursion.
+ has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
+ if err != nil {
+ return fmt.Errorf("checking for a layer based on blob %q: %w", blob.Digest.String(), err)
+ }
+ if !has {
+ return fmt.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String())
+ }
+ diffID, haveDiffID = s.blobDiffIDs[blob.Digest]
+ if !haveDiffID {
+ return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String())
+ }
+ }
+ id := diffID.Hex()
+ if lastLayer != "" {
+ id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex()
+ }
+ if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
+ // There's already a layer that should have the right contents, just reuse it.
+ lastLayer = layer.ID
+ s.indexToStorageID[index] = &lastLayer
+ return nil
+ }
+
+ s.lock.Lock()
+ diffOutput, ok := s.diffOutputs[blob.Digest]
+ s.lock.Unlock()
+ if ok {
+ layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
+ if err != nil {
+ return err
+ }
+
+ // FIXME: what to do with the uncompressed digest?
+ diffOutput.UncompressedDigest = blob.Digest
+
+ if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil {
+ _ = s.imageRef.transport.store.Delete(layer.ID)
+ return err
+ }
+
+ s.indexToStorageID[index] = &layer.ID
+ return nil
+ }
+
+ s.lock.Lock()
+ al, ok := s.blobAdditionalLayer[blob.Digest]
+ s.lock.Unlock()
+ if ok {
+ layer, err := al.PutAs(id, lastLayer, nil)
+ if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
+ return fmt.Errorf("failed to put layer from digest and labels: %w", err)
+ }
+ lastLayer = layer.ID
+ s.indexToStorageID[index] = &lastLayer
+ return nil
+ }
+
+ // Check if we previously cached a file with that blob's contents. If we didn't,
+ // then we need to read the desired contents from a layer.
+ s.lock.Lock()
+ filename, ok := s.filenames[blob.Digest]
+ s.lock.Unlock()
+ if !ok {
+ // Try to find the layer with contents matching that blobsum.
+ layer := ""
+ layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID)
+ if err2 == nil && len(layers) > 0 {
+ layer = layers[0].ID
+ } else {
+ layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest)
+ if err2 == nil && len(layers) > 0 {
+ layer = layers[0].ID
+ }
+ }
+ if layer == "" {
+ return fmt.Errorf("locating layer for blob %q: %w", blob.Digest, err2)
+ }
+ // Read the layer's contents.
+ noCompression := archive.Uncompressed
+ diffOptions := &storage.DiffOptions{
+ Compression: &noCompression,
+ }
+ diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
+ if err2 != nil {
+ return fmt.Errorf("reading layer %q for blob %q: %w", layer, blob.Digest, err2)
+ }
+ // Copy the layer diff to a file. Diff() takes a lock that it holds
+ // until the ReadCloser that it returns is closed, and PutLayer() wants
+ // the same lock, so the diff can't just be directly streamed from one
+ // to the other.
+ filename = s.computeNextBlobCacheFile()
+ file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
+ if err != nil {
+ diff.Close()
+ return fmt.Errorf("creating temporary file %q: %w", filename, err)
+ }
+ // Copy the data to the file.
+ // TODO: This can take quite some time, and should ideally be cancellable using
+ // ctx.Done().
+ _, err = io.Copy(file, diff)
+ diff.Close()
+ file.Close()
+ if err != nil {
+ return fmt.Errorf("storing blob to file %q: %w", filename, err)
+ }
+ // Make sure that we can find this file later, should we need the layer's
+ // contents again.
+ s.lock.Lock()
+ s.filenames[blob.Digest] = filename
+ s.lock.Unlock()
+ }
+ // Read the cached blob and use it as a diff.
+ file, err := os.Open(filename)
+ if err != nil {
+ return fmt.Errorf("opening file %q: %w", filename, err)
+ }
+ defer file.Close()
+ // Build the new layer using the diff, regardless of where it came from.
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
+ OriginalDigest: blob.Digest,
+ UncompressedDigest: diffID,
+ }, file)
+ if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
+ return fmt.Errorf("adding layer with blob %q: %w", blob.Digest, err)
+ }
+
+ s.indexToStorageID[index] = &layer.ID
+ return nil
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+// original manifest list digest, if desired.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ if len(s.manifest) == 0 {
+ return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
+ }
+ toplevelManifest, _, err := unparsedToplevel.Manifest(ctx)
+ if err != nil {
+ return fmt.Errorf("retrieving top-level manifest: %w", err)
+ }
+ // If the name we're saving to includes a digest, then check that the
+ // manifests that we're about to save all either match the one from the
+ // unparsedToplevel, or match the digest in the name that we're using.
+ if s.imageRef.named != nil {
+ if digested, ok := s.imageRef.named.(reference.Digested); ok {
+ matches, err := manifest.MatchesDigest(s.manifest, digested.Digest())
+ if err != nil {
+ return err
+ }
+ if !matches {
+ matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest())
+ if err != nil {
+ return err
+ }
+ }
+ if !matches {
+ return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest())
+ }
+ }
+ }
+ // Find the list of layer blobs.
+ man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
+ if err != nil {
+ return fmt.Errorf("parsing manifest: %w", err)
+ }
+ layerBlobs := man.LayerInfos()
+
+ // Extract, commit, or find the layers.
+ for i, blob := range layerBlobs {
+ if err := s.commitLayer(ctx, blob, i); err != nil {
+ return err
+ }
+ }
+ var lastLayer string
+ if len(layerBlobs) > 0 { // Can happen when using caches
+ prev := s.indexToStorageID[len(layerBlobs)-1]
+ if prev == nil {
+ return fmt.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1)
+ }
+ lastLayer = *prev
+ }
+
+ // If one of those blobs was a configuration blob, then we can try to dig out the date when the image
+ // was originally created, in case we're just copying it. If not, no harm done.
+ options := &storage.ImageOptions{}
+ if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil {
+ logrus.Debugf("setting image creation date to %s", inspect.Created)
+ options.CreationDate = *inspect.Created
+ }
+ // Create the image record, pointing to the most-recently added layer.
+ intendedID := s.imageRef.id
+ if intendedID == "" {
+ intendedID = s.computeID(man)
+ }
+ oldNames := []string{}
+ img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options)
+ if err != nil {
+ if !errors.Is(err, storage.ErrDuplicateID) {
+ logrus.Debugf("error creating image: %q", err)
+ return fmt.Errorf("creating image %q: %w", intendedID, err)
+ }
+ img, err = s.imageRef.transport.store.Image(intendedID)
+ if err != nil {
+ return fmt.Errorf("reading image %q: %w", intendedID, err)
+ }
+ if img.TopLayer != lastLayer {
+ logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID)
+ return fmt.Errorf("image with ID %q already exists, but uses a different top layer: %w", intendedID, storage.ErrDuplicateID)
+ }
+ logrus.Debugf("reusing image ID %q", img.ID)
+ oldNames = append(oldNames, img.Names...)
+ } else {
+ logrus.Debugf("created new image ID %q", img.ID)
+ }
+
+ // Clean up the unfinished image on any error.
+ // (Is this the right thing to do if the image has existed before?)
+ commitSucceeded := false
+ defer func() {
+ if !commitSucceeded {
+ logrus.Errorf("Updating image %q (old names %v) failed, deleting it", img.ID, oldNames)
+ if _, err := s.imageRef.transport.store.DeleteImage(img.ID, true); err != nil {
+ logrus.Errorf("Error deleting incomplete image %q: %v", img.ID, err)
+ }
+ }
+ }()
+
+ // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so
+ // we just need to screen out the ones that are actually layers to get the list of non-layers.
+ dataBlobs := make(map[digest.Digest]struct{})
+ for blob := range s.filenames {
+ dataBlobs[blob] = struct{}{}
+ }
+ for _, layerBlob := range layerBlobs {
+ delete(dataBlobs, layerBlob.Digest)
+ }
+ for blob := range dataBlobs {
+ v, err := os.ReadFile(s.filenames[blob])
+ if err != nil {
+ return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
+ }
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil {
+ logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err)
+ return fmt.Errorf("saving big data %q for image %q: %w", blob.String(), img.ID, err)
+ }
+ }
+ // Save the unparsedToplevel's manifest if it differs from the per-platform one, which is saved below.
+ if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
+ manifestDigest, err := manifest.Digest(toplevelManifest)
+ if err != nil {
+ return fmt.Errorf("digesting top-level manifest: %w", err)
+ }
+ key := manifestBigDataKey(manifestDigest)
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil {
+ logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err)
+ return fmt.Errorf("saving top-level manifest for image %q: %w", img.ID, err)
+ }
+ }
+ // Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
+ // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
+ // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
+ key := manifestBigDataKey(s.manifestDigest)
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
+ logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
+ return fmt.Errorf("saving manifest for image %q: %w", img.ID, err)
+ }
+ key = storage.ImageDigestBigDataKey
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
+ logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
+ return fmt.Errorf("saving manifest for image %q: %w", img.ID, err)
+ }
+ // Save the signatures, if we have any.
+ if len(s.signatures) > 0 {
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil {
+ logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
+ return fmt.Errorf("saving signatures for image %q: %w", img.ID, err)
+ }
+ }
+ for instanceDigest, signatures := range s.signatureses {
+ key := signatureBigDataKey(instanceDigest)
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil {
+ logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
+ return fmt.Errorf("saving signatures for image %q: %w", img.ID, err)
+ }
+ }
+ // Save our metadata.
+ metadata, err := json.Marshal(s)
+ if err != nil {
+ logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err)
+ return fmt.Errorf("encoding metadata for image %q: %w", img.ID, err)
+ }
+ if len(metadata) != 0 {
+ if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil {
+ logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
+ return fmt.Errorf("saving metadata for image %q: %w", img.ID, err)
+ }
+ logrus.Debugf("saved image metadata %q", string(metadata))
+ }
+ // Adds the reference's name on the image. We don't need to worry about avoiding duplicate
+ // values because AddNames() will deduplicate the list that we pass to it.
+ if name := s.imageRef.DockerReference(); name != nil {
+ if err := s.imageRef.transport.store.AddNames(img.ID, []string{name.String()}); err != nil {
+ return fmt.Errorf("adding names %v to image %q: %w", name, img.ID, err)
+ }
+ logrus.Debugf("added name %q to image %q", name, img.ID)
+ }
+
+ commitSucceeded = true
+ return nil
+}
+
+// PutManifest writes the manifest to the destination.
+func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
+ digest, err := manifest.Digest(manifestBlob)
+ if err != nil {
+ return err
+ }
+ newBlob := make([]byte, len(manifestBlob))
+ copy(newBlob, manifestBlob)
+ s.manifest = newBlob
+ s.manifestDigest = digest
+ return nil
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ sizes := []int{}
+ sigblob := []byte{}
+ for _, sigWithFormat := range signatures {
+ sig, err := signature.Blob(sigWithFormat)
+ if err != nil {
+ return err
+ }
+ sizes = append(sizes, len(sig))
+ newblob := make([]byte, len(sigblob)+len(sig))
+ copy(newblob, sigblob)
+ copy(newblob[len(sigblob):], sig)
+ sigblob = newblob
+ }
+ if instanceDigest == nil {
+ s.signatures = sigblob
+ s.SignatureSizes = sizes
+ if len(s.manifest) > 0 {
+ manifestDigest := s.manifestDigest
+ instanceDigest = &manifestDigest
+ }
+ }
+ if instanceDigest != nil {
+ s.signatureses[*instanceDigest] = sigblob
+ s.SignaturesSizes[*instanceDigest] = sizes
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go
index 7329ef6eee0..9f16dd334d9 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_image.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_image.go
@@ -4,94 +4,20 @@
package storage
import (
- "bytes"
"context"
- "encoding/json"
- stderrors "errors"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "sync"
- "sync/atomic"
- "github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
- "github.com/containers/image/v5/internal/putblobdigest"
- "github.com/containers/image/v5/internal/tmpdir"
- internalTypes "github.com/containers/image/v5/internal/types"
- "github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
- graphdriver "github.com/containers/storage/drivers"
- "github.com/containers/storage/pkg/archive"
- "github.com/containers/storage/pkg/chunked"
- "github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest"
- imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
var (
- // ErrBlobDigestMismatch could potentially be returned when PutBlob() is given a blob
- // with a digest-based name that doesn't match its contents.
- // Deprecated: PutBlob() doesn't do this any more (it just accepts the caller’s value),
- // and there is no known user of this error.
- ErrBlobDigestMismatch = stderrors.New("blob digest mismatch")
- // ErrBlobSizeMismatch is returned when PutBlob() is given a blob
- // with an expected size that doesn't match the reader.
- ErrBlobSizeMismatch = stderrors.New("blob size mismatch")
// ErrNoSuchImage is returned when we attempt to access an image which
// doesn't exist in the storage area.
ErrNoSuchImage = storage.ErrNotAnImage
)
-type storageImageSource struct {
- imageRef storageReference
- image *storage.Image
- systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
- layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
- cachedManifest []byte // A cached copy of the manifest, if already known, or nil
- getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
- SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
- SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice
-}
-
-type storageImageDestination struct {
- imageRef storageReference
- directory string // Temporary directory where we store blobs until Commit() time
- nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
- manifest []byte // Manifest contents, temporary
- manifestDigest digest.Digest // Valid if len(manifest) != 0
- signatures []byte // Signature contents, temporary
- signatureses map[digest.Digest][]byte // Instance signature contents, temporary
- SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
- SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
-
- // A storage destination may be used concurrently. Accesses are
- // serialized via a mutex. Please refer to the individual comments
- // below for details.
- lock sync.Mutex
- // Mapping from layer (by index) to the associated ID in the storage.
- // It's protected *implicitly* since `commitLayer()`, at any given
- // time, can only be executed by *one* goroutine. Please refer to
- // `queueOrCommit()` for further details on how the single-caller
- // guarantee is implemented.
- indexToStorageID map[int]*string
- // All accesses to below data are protected by `lock` which is made
- // *explicit* in the code.
- blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
- fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
- filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
- currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
- indexToPulledLayerInfo map[int]*manifest.LayerInfo // Mapping from layer (by index) to pulled down blob
- blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
- diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
-}
-
type storageImageCloser struct {
types.ImageCloser
size int64
@@ -110,1228 +36,6 @@ func signatureBigDataKey(digest digest.Digest) string {
return "signature-" + digest.Encoded()
}
-// newImageSource sets up an image for reading.
-func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
- // First, locate the image.
- img, err := imageRef.resolveImage(sys)
- if err != nil {
- return nil, err
- }
-
- // Build the reader object.
- image := &storageImageSource{
- imageRef: imageRef,
- systemContext: sys,
- image: img,
- layerPosition: make(map[digest.Digest]int),
- SignatureSizes: []int{},
- SignaturesSizes: make(map[digest.Digest][]int),
- }
- if img.Metadata != "" {
- if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
- return nil, errors.Wrap(err, "decoding metadata for source image")
- }
- }
- return image, nil
-}
-
-// Reference returns the image reference that we used to find this image.
-func (s *storageImageSource) Reference() types.ImageReference {
- return s.imageRef
-}
-
-// Close cleans up any resources we tied up while reading the image.
-func (s *storageImageSource) Close() error {
- return nil
-}
-
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (s *storageImageSource) HasThreadSafeGetBlob() bool {
- return true
-}
-
-// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
-// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
-func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
- if info.Digest == image.GzippedEmptyLayerDigest {
- return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
- }
-
- // NOTE: the blob is first written to a temporary file and subsequently
- // closed. The intention is to keep the time we own the storage lock
- // as short as possible to allow other processes to access the storage.
- rc, n, _, err = s.getBlobAndLayerID(info)
- if err != nil {
- return nil, 0, err
- }
- defer rc.Close()
-
- tmpFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "")
- if err != nil {
- return nil, 0, err
- }
-
- if _, err := io.Copy(tmpFile, rc); err != nil {
- return nil, 0, err
- }
-
- if _, err := tmpFile.Seek(0, 0); err != nil {
- return nil, 0, err
- }
-
- wrapper := ioutils.NewReadCloserWrapper(tmpFile, func() error {
- defer os.Remove(tmpFile.Name())
- return tmpFile.Close()
- })
-
- return wrapper, n, err
-}
-
-// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given.
-func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) {
- var layer storage.Layer
- var diffOptions *storage.DiffOptions
- // We need a valid digest value.
- err = info.Digest.Validate()
- if err != nil {
- return nil, -1, "", err
- }
- // Check if the blob corresponds to a diff that was used to initialize any layers. Our
- // callers should try to retrieve layers using their uncompressed digests, so no need to
- // check if they're using one of the compressed digests, which we can't reproduce anyway.
- layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest)
-
- // If it's not a layer, then it must be a data item.
- if len(layers) == 0 {
- b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String())
- if err != nil {
- return nil, -1, "", err
- }
- r := bytes.NewReader(b)
- logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
- return ioutil.NopCloser(r), int64(r.Len()), "", nil
- }
- // Step through the list of matching layers. Tests may want to verify that if we have multiple layers
- // which claim to have the same contents, that we actually do have multiple layers, otherwise we could
- // just go ahead and use the first one every time.
- s.getBlobMutex.Lock()
- i := s.layerPosition[info.Digest]
- s.layerPosition[info.Digest] = i + 1
- s.getBlobMutex.Unlock()
- if len(layers) > 0 {
- layer = layers[i%len(layers)]
- }
- // Force the storage layer to not try to match any compression that was used when the layer was first
- // handed to it.
- noCompression := archive.Uncompressed
- diffOptions = &storage.DiffOptions{
- Compression: &noCompression,
- }
- if layer.UncompressedSize < 0 {
- n = -1
- } else {
- n = layer.UncompressedSize
- }
- logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest)
- rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
- if err != nil {
- return nil, -1, "", err
- }
- return rc, n, layer.ID, err
-}
-
-// GetManifest() reads the image's manifest.
-func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) {
- if instanceDigest != nil {
- key := manifestBigDataKey(*instanceDigest)
- blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
- if err != nil {
- return nil, "", errors.Wrapf(err, "reading manifest for image instance %q", *instanceDigest)
- }
- return blob, manifest.GuessMIMEType(blob), err
- }
- if len(s.cachedManifest) == 0 {
- // The manifest is stored as a big data item.
- // Prefer the manifest corresponding to the user-specified digest, if available.
- if s.imageRef.named != nil {
- if digested, ok := s.imageRef.named.(reference.Digested); ok {
- key := manifestBigDataKey(digested.Digest())
- blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
- if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
- return nil, "", err
- }
- if err == nil {
- s.cachedManifest = blob
- }
- }
- }
- // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
- // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
- if len(s.cachedManifest) == 0 {
- cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
- if err != nil {
- return nil, "", err
- }
- s.cachedManifest = cachedBlob
- }
- }
- return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
-}
-
-// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
-// the image, after they've been decompressed.
-func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
- manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest)
- if err != nil {
- return nil, errors.Wrapf(err, "reading image manifest for %q", s.image.ID)
- }
- if manifest.MIMETypeIsMultiImage(manifestType) {
- return nil, errors.Errorf("can't copy layers for a manifest list (shouldn't be attempted)")
- }
- man, err := manifest.FromBlob(manifestBlob, manifestType)
- if err != nil {
- return nil, errors.Wrapf(err, "parsing image manifest for %q", s.image.ID)
- }
-
- uncompressedLayerType := ""
- switch manifestType {
- case imgspecv1.MediaTypeImageManifest:
- uncompressedLayerType = imgspecv1.MediaTypeImageLayer
- case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
- uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
- }
-
- physicalBlobInfos := []types.BlobInfo{}
- layerID := s.image.TopLayer
- for layerID != "" {
- layer, err := s.imageRef.transport.store.Layer(layerID)
- if err != nil {
- return nil, errors.Wrapf(err, "reading layer %q in image %q", layerID, s.image.ID)
- }
- if layer.UncompressedDigest == "" {
- return nil, errors.Errorf("uncompressed digest for layer %q is unknown", layerID)
- }
- if layer.UncompressedSize < 0 {
- return nil, errors.Errorf("uncompressed size for layer %q is unknown", layerID)
- }
- blobInfo := types.BlobInfo{
- Digest: layer.UncompressedDigest,
- Size: layer.UncompressedSize,
- MediaType: uncompressedLayerType,
- }
- physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
- layerID = layer.Parent
- }
-
- res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos)
- if err != nil {
- return nil, errors.Wrapf(err, "creating LayerInfosForCopy of image %q", s.image.ID)
- }
- return res, nil
-}
-
-// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest,
-// but using layer data which we can actually produce — physicalInfos for non-empty layers,
-// and image.GzippedEmptyLayer for empty ones.
-// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.)
-func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) {
- nextPhysical := 0
- res := make([]types.BlobInfo, len(manifestInfos))
- for i, mi := range manifestInfos {
- if mi.EmptyLayer {
- res[i] = types.BlobInfo{
- Digest: image.GzippedEmptyLayerDigest,
- Size: int64(len(image.GzippedEmptyLayer)),
- MediaType: mi.MediaType,
- }
- } else {
- if nextPhysical >= len(physicalInfos) {
- return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos))
- }
- res[i] = physicalInfos[nextPhysical]
- nextPhysical++
- }
- }
- if nextPhysical != len(physicalInfos) {
- return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos))
- }
- return res, nil
-}
-
-// GetSignatures() parses the image's signatures blob into a slice of byte slices.
-func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) {
- var offset int
- sigslice := [][]byte{}
- signature := []byte{}
- signatureSizes := s.SignatureSizes
- key := "signatures"
- instance := "default instance"
- if instanceDigest != nil {
- signatureSizes = s.SignaturesSizes[*instanceDigest]
- key = signatureBigDataKey(*instanceDigest)
- instance = instanceDigest.Encoded()
- }
- if len(signatureSizes) > 0 {
- signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
- if err != nil {
- return nil, errors.Wrapf(err, "looking up signatures data for image %q (%s)", s.image.ID, instance)
- }
- signature = signatureBlob
- }
- for _, length := range signatureSizes {
- if offset+length > len(signature) {
- return nil, errors.Wrapf(err, "looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signature), offset+length)
- }
- sigslice = append(sigslice, signature[offset:offset+length])
- offset += length
- }
- if offset != len(signature) {
- return nil, errors.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatures)-offset)
- }
- return sigslice, nil
-}
-
-// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
-// it's time to Commit() the image
-func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
- directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage")
- if err != nil {
- return nil, errors.Wrapf(err, "creating a temporary directory")
- }
- image := &storageImageDestination{
- imageRef: imageRef,
- directory: directory,
- signatureses: make(map[digest.Digest][]byte),
- blobDiffIDs: make(map[digest.Digest]digest.Digest),
- blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
- fileSizes: make(map[digest.Digest]int64),
- filenames: make(map[digest.Digest]string),
- SignatureSizes: []int{},
- SignaturesSizes: make(map[digest.Digest][]int),
- indexToStorageID: make(map[int]*string),
- indexToPulledLayerInfo: make(map[int]*manifest.LayerInfo),
- diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
- }
- return image, nil
-}
-
-// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
-// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
-func (s *storageImageDestination) Reference() types.ImageReference {
- return s.imageRef
-}
-
-// Close cleans up the temporary directory and additional layer store handlers.
-func (s *storageImageDestination) Close() error {
- for _, al := range s.blobAdditionalLayer {
- al.Release()
- }
- for _, v := range s.diffOutputs {
- if v.Target != "" {
- _ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target)
- }
- }
- return os.RemoveAll(s.directory)
-}
-
-func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression {
- // We ultimately have to decompress layers to populate trees on disk
- // and need to explicitly ask for it here, so that the layers' MIME
- // types can be set accordingly.
- return types.PreserveOriginal
-}
-
-func (s *storageImageDestination) computeNextBlobCacheFile() string {
- return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
-}
-
-// PutBlobWithOptions is a wrapper around PutBlob. If options.LayerIndex is
-// set, the blob will be committed directly. Either by the calling goroutine
-// or by another goroutine already committing layers.
-//
-// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be
-// used the together. Mixing the two with non "WithOptions" functions is not
-// supported.
-func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options internalTypes.PutBlobOptions) (types.BlobInfo, error) {
- info, err := s.PutBlob(ctx, stream, blobinfo, options.Cache, options.IsConfig)
- if err != nil {
- return info, err
- }
-
- if options.IsConfig || options.LayerIndex == nil {
- return info, nil
- }
-
- return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
-}
-
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (s *storageImageDestination) HasThreadSafePutBlob() bool {
- return true
-}
-
-// PutBlob writes contents of stream and returns data representing the result.
-// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
-// inputInfo.Size is the expected length of stream, if known.
-// inputInfo.MediaType describes the blob format, if known.
-// May update cache.
-// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
-// to any other readers for download using the supplied digest.
-// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
- // Stores a layer or data blob in our temporary directory, checking that any information
- // in the blobinfo matches the incoming data.
- errorBlobInfo := types.BlobInfo{
- Digest: "",
- Size: -1,
- }
- if blobinfo.Digest != "" {
- if err := blobinfo.Digest.Validate(); err != nil {
- return errorBlobInfo, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err)
- }
- }
-
- // Set up to digest the blob if necessary, and count its size while saving it to a file.
- filename := s.computeNextBlobCacheFile()
- file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
- if err != nil {
- return errorBlobInfo, errors.Wrapf(err, "creating temporary file %q", filename)
- }
- defer file.Close()
- counter := ioutils.NewWriteCounter(file)
- stream = io.TeeReader(stream, counter)
- digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
- decompressed, err := archive.DecompressStream(stream)
- if err != nil {
- return errorBlobInfo, errors.Wrap(err, "setting up to decompress blob")
- }
-
- diffID := digest.Canonical.Digester()
- // Copy the data to the file.
- // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- _, err = io.Copy(diffID.Hash(), decompressed)
- decompressed.Close()
- if err != nil {
- return errorBlobInfo, errors.Wrapf(err, "storing blob to file %q", filename)
- }
-
- // Determine blob properties, and fail if information that we were given about the blob
- // is known to be incorrect.
- blobDigest := digester.Digest()
- blobSize := blobinfo.Size
- if blobSize < 0 {
- blobSize = counter.Count
- } else if blobinfo.Size != counter.Count {
- return errorBlobInfo, errors.WithStack(ErrBlobSizeMismatch)
- }
-
- // Record information about the blob.
- s.lock.Lock()
- s.blobDiffIDs[blobDigest] = diffID.Digest()
- s.fileSizes[blobDigest] = counter.Count
- s.filenames[blobDigest] = filename
- s.lock.Unlock()
- // This is safe because we have just computed diffID, and blobDigest was either computed
- // by us, or validated by the caller (usually copy.digestingReader).
- cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
- return types.BlobInfo{
- Digest: blobDigest,
- Size: blobSize,
- MediaType: blobinfo.MediaType,
- }, nil
-}
-
-// TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If
-// options.LayerIndex is set, the reused blob will be recoreded as already
-// pulled.
-//
-// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be
-// used the together. Mixing the two with the non "WithOptions" functions
-// is not supported.
-func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options internalTypes.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
- reused, info, err := s.tryReusingBlobWithSrcRef(ctx, blobinfo, options.Cache, options.CanSubstitute, options.SrcRef)
- if err != nil || !reused || options.LayerIndex == nil {
- return reused, info, err
- }
-
- return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
-}
-
-// tryReusingBlobWithSrcRef is a wrapper around TryReusingBlob.
-// If ref is provided, this function first tries to get layer from Additional Layer Store.
-func (s *storageImageDestination) tryReusingBlobWithSrcRef(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool, ref reference.Named) (bool, types.BlobInfo, error) {
- // lock the entire method as it executes fairly quickly
- s.lock.Lock()
- defer s.lock.Unlock()
-
- if ref != nil {
- // Check if we have the layer in the underlying additional layer store.
- aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, ref.String())
- if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest)
- } else if err == nil {
- // Record the uncompressed value so that we can use it to calculate layer IDs.
- s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest()
- s.blobAdditionalLayer[blobinfo.Digest] = aLayer
- return true, types.BlobInfo{
- Digest: blobinfo.Digest,
- Size: aLayer.CompressedSize(),
- MediaType: blobinfo.MediaType,
- }, nil
- }
- }
-
- return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute)
-}
-
-type zstdFetcher struct {
- stream internalTypes.ImageSourceSeekable
- ctx context.Context
- blobInfo types.BlobInfo
-}
-
-// GetBlobAt converts from chunked.GetBlobAt to ImageSourceSeekable.GetBlobAt.
-func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
- var newChunks []internalTypes.ImageSourceChunk
- for _, v := range chunks {
- i := internalTypes.ImageSourceChunk{
- Offset: v.Offset,
- Length: v.Length,
- }
- newChunks = append(newChunks, i)
- }
- rc, errs, err := f.stream.GetBlobAt(f.ctx, f.blobInfo, newChunks)
- if _, ok := err.(internalTypes.BadPartialRequestError); ok {
- err = chunked.ErrBadRequest{}
- }
- return rc, errs, err
-
-}
-
-// PutBlobPartial attempts to create a blob using the data that is already present at the destination storage. stream is accessed
-// in a non-sequential way to retrieve the missing chunks.
-func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream internalTypes.ImageSourceSeekable, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) {
- fetcher := zstdFetcher{
- stream: stream,
- ctx: ctx,
- blobInfo: srcInfo,
- }
-
- differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher)
- if err != nil {
- return srcInfo, err
- }
-
- out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ)
- if err != nil {
- return srcInfo, err
- }
-
- blobDigest := srcInfo.Digest
-
- s.lock.Lock()
- s.blobDiffIDs[blobDigest] = blobDigest
- s.fileSizes[blobDigest] = 0
- s.filenames[blobDigest] = ""
- s.diffOutputs[blobDigest] = out
- s.lock.Unlock()
-
- return srcInfo, nil
-}
-
-// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
-// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
-// info.Digest must not be empty.
-// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
-// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
-// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
-// reflected in the manifest that will be written.
-// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
-// May use and/or update cache.
-func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
- // lock the entire method as it executes fairly quickly
- s.lock.Lock()
- defer s.lock.Unlock()
-
- return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute)
-}
-
-// tryReusingBlobLocked implements a core functionality of TryReusingBlob.
-// This must be called with a lock being held on storageImageDestination.
-func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
- if blobinfo.Digest == "" {
- return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`)
- }
- if err := blobinfo.Digest.Validate(); err != nil {
- return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
- }
-
- // Check if we've already cached it in a file.
- if size, ok := s.fileSizes[blobinfo.Digest]; ok {
- return true, types.BlobInfo{
- Digest: blobinfo.Digest,
- Size: size,
- MediaType: blobinfo.MediaType,
- }, nil
- }
-
- // Check if we have a wasn't-compressed layer in storage that's based on that blob.
- layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
- if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, blobinfo.Digest)
- }
- if len(layers) > 0 {
- // Save this for completeness.
- s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, types.BlobInfo{
- Digest: blobinfo.Digest,
- Size: layers[0].UncompressedSize,
- MediaType: blobinfo.MediaType,
- }, nil
- }
-
- // Check if we have a was-compressed layer in storage that's based on that blob.
- layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
- if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q`, blobinfo.Digest)
- }
- if len(layers) > 0 {
- // Record the uncompressed value so that we can use it to calculate layer IDs.
- s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, types.BlobInfo{
- Digest: blobinfo.Digest,
- Size: layers[0].CompressedSize,
- MediaType: blobinfo.MediaType,
- }, nil
- }
-
- // Does the blob correspond to a known DiffID which we already have available?
- // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
- // uncompressed layer, and that can happen only if canSubstitute, or if the incoming manifest already specifies the size.
- if canSubstitute || blobinfo.Size != -1 {
- if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
- layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
- if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, uncompressedDigest)
- }
- if len(layers) > 0 {
- if blobinfo.Size != -1 {
- s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, blobinfo, nil
- }
- if !canSubstitute {
- return false, types.BlobInfo{}, fmt.Errorf("Internal error: canSubstitute was expected to be true for blobInfo %v", blobinfo)
- }
- s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
- return true, types.BlobInfo{
- Digest: uncompressedDigest,
- Size: layers[0].UncompressedSize,
- MediaType: blobinfo.MediaType,
- }, nil
- }
- }
- }
-
- // Nope, we don't have it.
- return false, types.BlobInfo{}, nil
-}
-
-// computeID computes a recommended image ID based on information we have so far. If
-// the manifest is not of a type that we recognize, we return an empty value, indicating
-// that since we don't have a recommendation, a random ID should be used if one needs
-// to be allocated.
-func (s *storageImageDestination) computeID(m manifest.Manifest) string {
- // Build the diffID list. We need the decompressed sums that we've been calculating to
- // fill in the DiffIDs. It's expected (but not enforced by us) that the number of
- // diffIDs corresponds to the number of non-EmptyLayer entries in the history.
- var diffIDs []digest.Digest
- switch m := m.(type) {
- case *manifest.Schema1:
- // Build a list of the diffIDs we've generated for the non-throwaway FS layers,
- // in reverse of the order in which they were originally listed.
- for i, compat := range m.ExtractedV1Compatibility {
- if compat.ThrowAway {
- continue
- }
- blobSum := m.FSLayers[i].BlobSum
- diffID, ok := s.blobDiffIDs[blobSum]
- if !ok {
- logrus.Infof("error looking up diffID for layer %q", blobSum.String())
- return ""
- }
- diffIDs = append([]digest.Digest{diffID}, diffIDs...)
- }
- case *manifest.Schema2, *manifest.OCI1:
- // We know the ID calculation for these formats doesn't actually use the diffIDs,
- // so we don't need to populate the diffID list.
- default:
- return ""
- }
- id, err := m.ImageID(diffIDs)
- if err != nil {
- return ""
- }
- return id
-}
-
-// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
-// information out of it for Inspect().
-func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) {
- if info.Digest == "" {
- return nil, errors.Errorf(`no digest supplied when reading blob`)
- }
- if err := info.Digest.Validate(); err != nil {
- return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`)
- }
- // Assume it's a file, since we're only calling this from a place that expects to read files.
- if filename, ok := s.filenames[info.Digest]; ok {
- contents, err2 := ioutil.ReadFile(filename)
- if err2 != nil {
- return nil, errors.Wrapf(err2, `reading blob from file %q`, filename)
- }
- return contents, nil
- }
- // If it's not a file, it's a bug, because we're not expecting to be asked for a layer.
- return nil, errors.New("blob not found")
-}
-
-// queueOrCommit queues in the specified blob to be committed to the storage.
-// If no other goroutine is already committing layers, the layer and all
-// subsequent layers (if already queued) will be committed to the storage.
-func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.BlobInfo, index int, emptyLayer bool) error {
- // NOTE: whenever the code below is touched, make sure that all code
- // paths unlock the lock and to unlock it exactly once.
- //
- // Conceptually, the code is divided in two stages:
- //
- // 1) Queue in work by marking the layer as ready to be committed.
- // If at least one previous/parent layer with a lower index has
- // not yet been committed, return early.
- //
- // 2) Process the queued-in work by committing the "ready" layers
- // in sequence. Make sure that more items can be queued-in
- // during the comparatively I/O expensive task of committing a
- // layer.
- //
- // The conceptual benefit of this design is that caller can continue
- // pulling layers after an early return. At any given time, only one
- // caller is the "worker" routine committing layers. All other routines
- // can continue pulling and queuing in layers.
- s.lock.Lock()
- s.indexToPulledLayerInfo[index] = &manifest.LayerInfo{
- BlobInfo: blob,
- EmptyLayer: emptyLayer,
- }
-
- // We're still waiting for at least one previous/parent layer to be
- // committed, so there's nothing to do.
- if index != s.currentIndex {
- s.lock.Unlock()
- return nil
- }
-
- for info := s.indexToPulledLayerInfo[index]; info != nil; info = s.indexToPulledLayerInfo[index] {
- s.lock.Unlock()
- // Note: commitLayer locks on-demand.
- if err := s.commitLayer(ctx, *info, index); err != nil {
- return err
- }
- s.lock.Lock()
- index++
- }
-
- // Set the index at the very end to make sure that only one routine
- // enters stage 2).
- s.currentIndex = index
- s.lock.Unlock()
- return nil
-}
-
-// commitLayer commits the specified blob with the given index to the storage.
-// Note that the previous layer is expected to already be committed.
-//
-// Caution: this function must be called without holding `s.lock`. Callers
-// must guarantee that, at any given time, at most one goroutine may execute
-// `commitLayer()`.
-func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error {
- // Already committed? Return early.
- if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
- return nil
- }
-
- // Start with an empty string or the previous layer ID. Note that
- // `s.indexToStorageID` can only be accessed by *one* goroutine at any
- // given time. Hence, we don't need to lock accesses.
- var lastLayer string
- if prev := s.indexToStorageID[index-1]; prev != nil {
- lastLayer = *prev
- }
-
- // Carry over the previous ID for empty non-base layers.
- if blob.EmptyLayer {
- s.indexToStorageID[index] = &lastLayer
- return nil
- }
-
- // Check if there's already a layer with the ID that we'd give to the result of applying
- // this layer blob to its parent, if it has one, or the blob's hex value otherwise.
- s.lock.Lock()
- diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
- s.lock.Unlock()
- if !haveDiffID {
- // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
- // or to even check if we had it.
- // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
- // that relies on using a blob digest that has never been seen by the store had better call
- // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
- // so far we are going to accommodate that (if we should be doing that at all).
- logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
- // NOTE: use `TryReusingBlob` to prevent recursion.
- has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
- if err != nil {
- return errors.Wrapf(err, "checking for a layer based on blob %q", blob.Digest.String())
- }
- if !has {
- return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String())
- }
- diffID, haveDiffID = s.blobDiffIDs[blob.Digest]
- if !haveDiffID {
- return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String())
- }
- }
- id := diffID.Hex()
- if lastLayer != "" {
- id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex()
- }
- if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
- // There's already a layer that should have the right contents, just reuse it.
- lastLayer = layer.ID
- s.indexToStorageID[index] = &lastLayer
- return nil
- }
-
- s.lock.Lock()
- diffOutput, ok := s.diffOutputs[blob.Digest]
- s.lock.Unlock()
- if ok {
- layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
- if err != nil {
- return err
- }
-
- // FIXME: what to do with the uncompressed digest?
- diffOutput.UncompressedDigest = blob.Digest
-
- if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil {
- _ = s.imageRef.transport.store.Delete(layer.ID)
- return err
- }
-
- s.indexToStorageID[index] = &layer.ID
- return nil
- }
-
- s.lock.Lock()
- al, ok := s.blobAdditionalLayer[blob.Digest]
- s.lock.Unlock()
- if ok {
- layer, err := al.PutAs(id, lastLayer, nil)
- if err != nil {
- return errors.Wrapf(err, "failed to put layer from digest and labels")
- }
- lastLayer = layer.ID
- s.indexToStorageID[index] = &lastLayer
- return nil
- }
-
- // Check if we previously cached a file with that blob's contents. If we didn't,
- // then we need to read the desired contents from a layer.
- s.lock.Lock()
- filename, ok := s.filenames[blob.Digest]
- s.lock.Unlock()
- if !ok {
- // Try to find the layer with contents matching that blobsum.
- layer := ""
- layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID)
- if err2 == nil && len(layers) > 0 {
- layer = layers[0].ID
- } else {
- layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest)
- if err2 == nil && len(layers) > 0 {
- layer = layers[0].ID
- }
- }
- if layer == "" {
- return errors.Wrapf(err2, "locating layer for blob %q", blob.Digest)
- }
- // Read the layer's contents.
- noCompression := archive.Uncompressed
- diffOptions := &storage.DiffOptions{
- Compression: &noCompression,
- }
- diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
- if err2 != nil {
- return errors.Wrapf(err2, "reading layer %q for blob %q", layer, blob.Digest)
- }
- // Copy the layer diff to a file. Diff() takes a lock that it holds
- // until the ReadCloser that it returns is closed, and PutLayer() wants
- // the same lock, so the diff can't just be directly streamed from one
- // to the other.
- filename = s.computeNextBlobCacheFile()
- file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
- if err != nil {
- diff.Close()
- return errors.Wrapf(err, "creating temporary file %q", filename)
- }
- // Copy the data to the file.
- // TODO: This can take quite some time, and should ideally be cancellable using
- // ctx.Done().
- _, err = io.Copy(file, diff)
- diff.Close()
- file.Close()
- if err != nil {
- return errors.Wrapf(err, "storing blob to file %q", filename)
- }
- // Make sure that we can find this file later, should we need the layer's
- // contents again.
- s.lock.Lock()
- s.filenames[blob.Digest] = filename
- s.lock.Unlock()
- }
- // Read the cached blob and use it as a diff.
- file, err := os.Open(filename)
- if err != nil {
- return errors.Wrapf(err, "opening file %q", filename)
- }
- defer file.Close()
- // Build the new layer using the diff, regardless of where it came from.
- // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
- OriginalDigest: blob.Digest,
- UncompressedDigest: diffID,
- }, file)
- if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
- return errors.Wrapf(err, "adding layer with blob %q", blob.Digest)
- }
-
- s.indexToStorageID[index] = &layer.ID
- return nil
-}
-
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
-// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
-// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
-// original manifest list digest, if desired.
-// WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
- if len(s.manifest) == 0 {
- return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
- }
- toplevelManifest, _, err := unparsedToplevel.Manifest(ctx)
- if err != nil {
- return errors.Wrapf(err, "retrieving top-level manifest")
- }
- // If the name we're saving to includes a digest, then check that the
- // manifests that we're about to save all either match the one from the
- // unparsedToplevel, or match the digest in the name that we're using.
- if s.imageRef.named != nil {
- if digested, ok := s.imageRef.named.(reference.Digested); ok {
- matches, err := manifest.MatchesDigest(s.manifest, digested.Digest())
- if err != nil {
- return err
- }
- if !matches {
- matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest())
- if err != nil {
- return err
- }
- }
- if !matches {
- return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest())
- }
- }
- }
- // Find the list of layer blobs.
- man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
- if err != nil {
- return errors.Wrapf(err, "parsing manifest")
- }
- layerBlobs := man.LayerInfos()
-
- // Extract, commit, or find the layers.
- for i, blob := range layerBlobs {
- if err := s.commitLayer(ctx, blob, i); err != nil {
- return err
- }
- }
- var lastLayer string
- if len(layerBlobs) > 0 { // Can happen when using caches
- prev := s.indexToStorageID[len(layerBlobs)-1]
- if prev == nil {
- return errors.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1)
- }
- lastLayer = *prev
- }
-
- // If one of those blobs was a configuration blob, then we can try to dig out the date when the image
- // was originally created, in case we're just copying it. If not, no harm done.
- options := &storage.ImageOptions{}
- if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil {
- logrus.Debugf("setting image creation date to %s", inspect.Created)
- options.CreationDate = *inspect.Created
- }
- // Create the image record, pointing to the most-recently added layer.
- intendedID := s.imageRef.id
- if intendedID == "" {
- intendedID = s.computeID(man)
- }
- oldNames := []string{}
- img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options)
- if err != nil {
- if errors.Cause(err) != storage.ErrDuplicateID {
- logrus.Debugf("error creating image: %q", err)
- return errors.Wrapf(err, "creating image %q", intendedID)
- }
- img, err = s.imageRef.transport.store.Image(intendedID)
- if err != nil {
- return errors.Wrapf(err, "reading image %q", intendedID)
- }
- if img.TopLayer != lastLayer {
- logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID)
- return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID)
- }
- logrus.Debugf("reusing image ID %q", img.ID)
- oldNames = append(oldNames, img.Names...)
- } else {
- logrus.Debugf("created new image ID %q", img.ID)
- }
-
- // Clean up the unfinished image on any error.
- // (Is this the right thing to do if the image has existed before?)
- commitSucceeded := false
- defer func() {
- if !commitSucceeded {
- logrus.Errorf("Updating image %q (old names %v) failed, deleting it", img.ID, oldNames)
- if _, err := s.imageRef.transport.store.DeleteImage(img.ID, true); err != nil {
- logrus.Errorf("Error deleting incomplete image %q: %v", img.ID, err)
- }
- }
- }()
-
- // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so
- // we just need to screen out the ones that are actually layers to get the list of non-layers.
- dataBlobs := make(map[digest.Digest]struct{})
- for blob := range s.filenames {
- dataBlobs[blob] = struct{}{}
- }
- for _, layerBlob := range layerBlobs {
- delete(dataBlobs, layerBlob.Digest)
- }
- for blob := range dataBlobs {
- v, err := ioutil.ReadFile(s.filenames[blob])
- if err != nil {
- return errors.Wrapf(err, "copying non-layer blob %q to image", blob)
- }
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil {
- logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err)
- return errors.Wrapf(err, "saving big data %q for image %q", blob.String(), img.ID)
- }
- }
- // Save the unparsedToplevel's manifest if it differs from the per-platform one, which is saved below.
- if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
- manifestDigest, err := manifest.Digest(toplevelManifest)
- if err != nil {
- return errors.Wrapf(err, "digesting top-level manifest")
- }
- key := manifestBigDataKey(manifestDigest)
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil {
- logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "saving top-level manifest for image %q", img.ID)
- }
- }
- // Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
- // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
- // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
- key := manifestBigDataKey(s.manifestDigest)
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
- logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "saving manifest for image %q", img.ID)
- }
- key = storage.ImageDigestBigDataKey
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
- logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "saving manifest for image %q", img.ID)
- }
- // Save the signatures, if we have any.
- if len(s.signatures) > 0 {
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil {
- logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "saving signatures for image %q", img.ID)
- }
- }
- for instanceDigest, signatures := range s.signatureses {
- key := signatureBigDataKey(instanceDigest)
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil {
- logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "saving signatures for image %q", img.ID)
- }
- }
- // Save our metadata.
- metadata, err := json.Marshal(s)
- if err != nil {
- logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "encoding metadata for image %q", img.ID)
- }
- if len(metadata) != 0 {
- if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil {
- logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
- return errors.Wrapf(err, "saving metadata for image %q", img.ID)
- }
- logrus.Debugf("saved image metadata %q", string(metadata))
- }
- // Set the reference's name on the image. We don't need to worry about avoiding duplicate
- // values because SetNames() will deduplicate the list that we pass to it.
- if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil {
- names := []string{}
- if name != nil {
- names = append(names, name.String())
- }
- if len(oldNames) > 0 {
- names = append(names, oldNames...)
- }
- if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil {
- logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err)
- return errors.Wrapf(err, "setting names %v on image %q", names, img.ID)
- }
- logrus.Debugf("set names of image %q to %v", img.ID, names)
- }
-
- commitSucceeded = true
- return nil
-}
-
-var manifestMIMETypes = []string{
- imgspecv1.MediaTypeImageManifest,
- manifest.DockerV2Schema2MediaType,
- manifest.DockerV2Schema1SignedMediaType,
- manifest.DockerV2Schema1MediaType,
-}
-
-func (s *storageImageDestination) SupportedManifestMIMETypes() []string {
- return manifestMIMETypes
-}
-
-// PutManifest writes the manifest to the destination.
-func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
- digest, err := manifest.Digest(manifestBlob)
- if err != nil {
- return err
- }
- newBlob := make([]byte, len(manifestBlob))
- copy(newBlob, manifestBlob)
- s.manifest = newBlob
- s.manifestDigest = digest
- return nil
-}
-
-// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was
-// previously supplied to PutSignatures().
-func (s *storageImageDestination) SupportsSignatures(ctx context.Context) error {
- return nil
-}
-
-// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be
-// uploaded to the image destination, true otherwise.
-func (s *storageImageDestination) AcceptsForeignLayerURLs() bool {
- return false
-}
-
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
-func (s *storageImageDestination) MustMatchRuntimeOS() bool {
- return true
-}
-
-// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
-// and would prefer to receive an unmodified manifest instead of one modified for the destination.
-// Does not make a difference if Reference().DockerReference() is nil.
-func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool {
- return true // Yes, we want the unmodified manifest
-}
-
-// PutSignatures records the image's signatures for committing as a single data blob.
-func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
- sizes := []int{}
- sigblob := []byte{}
- for _, sig := range signatures {
- sizes = append(sizes, len(sig))
- newblob := make([]byte, len(sigblob)+len(sig))
- copy(newblob, sigblob)
- copy(newblob[len(sigblob):], sig)
- sigblob = newblob
- }
- if instanceDigest == nil {
- s.signatures = sigblob
- s.SignatureSizes = sizes
- if len(s.manifest) > 0 {
- manifestDigest := s.manifestDigest
- instanceDigest = &manifestDigest
- }
- }
- if instanceDigest != nil {
- s.signatureses[*instanceDigest] = sigblob
- s.SignaturesSizes[*instanceDigest] = sizes
- }
- return nil
-}
-
-// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the
-// signatures, and the uncompressed sizes of all of the image's layers.
-func (s *storageImageSource) getSize() (int64, error) {
- var sum int64
- // Size up the data blobs.
- dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID)
- if err != nil {
- return -1, errors.Wrapf(err, "reading image %q", s.image.ID)
- }
- for _, dataName := range dataNames {
- bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName)
- if err != nil {
- return -1, errors.Wrapf(err, "reading data blob size %q for %q", dataName, s.image.ID)
- }
- sum += bigSize
- }
- // Add the signature sizes.
- for _, sigSize := range s.SignatureSizes {
- sum += int64(sigSize)
- }
- // Walk the layer list.
- layerID := s.image.TopLayer
- for layerID != "" {
- layer, err := s.imageRef.transport.store.Layer(layerID)
- if err != nil {
- return -1, err
- }
- if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 {
- return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID)
- }
- sum += layer.UncompressedSize
- if layer.Parent == "" {
- break
- }
- layerID = layer.Parent
- }
- return sum, nil
-}
-
-// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the
-// signatures, and the uncompressed sizes of all of the image's layers.
-func (s *storageImageSource) Size() (int64, error) {
- return s.getSize()
-}
-
// Size() returns the previously-computed size of the image, with no error.
func (s *storageImageCloser) Size() (int64, error) {
return s.size, nil
diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go
index 7c6da112c74..dbb9804a6bd 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_reference.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go
@@ -5,6 +5,7 @@ package storage
import (
"context"
+ "fmt"
"strings"
"github.com/containers/image/v5/docker/reference"
@@ -12,7 +13,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -31,11 +31,11 @@ func newReference(transport storageTransport, named reference.Named, id string)
return nil, ErrInvalidReference
}
if named != nil && reference.IsNameOnly(named) {
- return nil, errors.Wrapf(ErrInvalidReference, "reference %s has neither a tag nor a digest", named.String())
+ return nil, fmt.Errorf("reference %s has neither a tag nor a digest: %w", named.String(), ErrInvalidReference)
}
if id != "" {
if err := validateImageID(id); err != nil {
- return nil, errors.Wrapf(ErrInvalidReference, "invalid ID value %q: %v", id, err)
+ return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err, ErrInvalidReference)
}
}
// We take a copy of the transport, which contains a pointer to the
@@ -145,12 +145,12 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
}
if s.id == "" {
logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
- return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport())
+ return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage)
}
if loadedImage == nil {
img, err := s.transport.store.Image(s.id)
if err != nil {
- return nil, errors.Wrapf(err, "reading image %q", s.id)
+ return nil, fmt.Errorf("reading image %q: %w", s.id, err)
}
loadedImage = img
}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go
new file mode 100644
index 00000000000..d4288dade59
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/storage/storage_src.go
@@ -0,0 +1,380 @@
+//go:build !containers_image_storage_stub
+// +build !containers_image_storage_stub
+
+package storage
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/ioutils"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+type storageImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoGetBlobAtInitialize
+
+ imageRef storageReference
+ image *storage.Image
+ systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
+ layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
+ cachedManifest []byte // A cached copy of the manifest, if already known, or nil
+ getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
+ SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
+ SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice
+}
+
+// newImageSource sets up an image for reading.
+func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
+ // First, locate the image.
+ img, err := imageRef.resolveImage(sys)
+ if err != nil {
+ return nil, err
+ }
+
+ // Build the reader object.
+ image := &storageImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(imageRef),
+
+ imageRef: imageRef,
+ systemContext: sys,
+ image: img,
+ layerPosition: make(map[digest.Digest]int),
+ SignatureSizes: []int{},
+ SignaturesSizes: make(map[digest.Digest][]int),
+ }
+ image.Compat = impl.AddCompat(image)
+ if img.Metadata != "" {
+ if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
+ return nil, fmt.Errorf("decoding metadata for source image: %w", err)
+ }
+ }
+ return image, nil
+}
+
+// Reference returns the image reference that we used to find this image.
+func (s *storageImageSource) Reference() types.ImageReference {
+ return s.imageRef
+}
+
+// Close cleans up any resources we tied up while reading the image.
+func (s *storageImageSource) Close() error {
+ return nil
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
+ if info.Digest == image.GzippedEmptyLayerDigest {
+ return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
+ }
+
+ // NOTE: the blob is first written to a temporary file and subsequently
+ // closed. The intention is to keep the time we own the storage lock
+ // as short as possible to allow other processes to access the storage.
+ rc, n, _, err = s.getBlobAndLayerID(info)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer rc.Close()
+
+ tmpFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "")
+ if err != nil {
+ return nil, 0, err
+ }
+
+ if _, err := io.Copy(tmpFile, rc); err != nil {
+ return nil, 0, err
+ }
+
+ if _, err := tmpFile.Seek(0, 0); err != nil {
+ return nil, 0, err
+ }
+
+ wrapper := ioutils.NewReadCloserWrapper(tmpFile, func() error {
+ defer os.Remove(tmpFile.Name())
+ return tmpFile.Close()
+ })
+
+ return wrapper, n, err
+}
+
+// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given.
+func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) {
+ var layer storage.Layer
+ var diffOptions *storage.DiffOptions
+ // We need a valid digest value.
+ err = info.Digest.Validate()
+ if err != nil {
+ return nil, -1, "", err
+ }
+ // Check if the blob corresponds to a diff that was used to initialize any layers. Our
+ // callers should try to retrieve layers using their uncompressed digests, so no need to
+ // check if they're using one of the compressed digests, which we can't reproduce anyway.
+ layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest)
+
+ // If it's not a layer, then it must be a data item.
+ if len(layers) == 0 {
+ b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String())
+ if err != nil {
+ return nil, -1, "", err
+ }
+ r := bytes.NewReader(b)
+ logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
+ return io.NopCloser(r), int64(r.Len()), "", nil
+ }
+ // Step through the list of matching layers. Tests may want to verify that if we have multiple layers
+ // which claim to have the same contents, that we actually do have multiple layers, otherwise we could
+ // just go ahead and use the first one every time.
+ s.getBlobMutex.Lock()
+ i := s.layerPosition[info.Digest]
+ s.layerPosition[info.Digest] = i + 1
+ s.getBlobMutex.Unlock()
+ if len(layers) > 0 {
+ layer = layers[i%len(layers)]
+ }
+ // Force the storage layer to not try to match any compression that was used when the layer was first
+ // handed to it.
+ noCompression := archive.Uncompressed
+ diffOptions = &storage.DiffOptions{
+ Compression: &noCompression,
+ }
+ if layer.UncompressedSize < 0 {
+ n = -1
+ } else {
+ n = layer.UncompressedSize
+ }
+ logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest)
+ rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
+ if err != nil {
+ return nil, -1, "", err
+ }
+ return rc, n, layer.ID, err
+}
+
+// GetManifest() reads the image's manifest.
+func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) {
+ if instanceDigest != nil {
+ key := manifestBigDataKey(*instanceDigest)
+ blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
+ if err != nil {
+ return nil, "", fmt.Errorf("reading manifest for image instance %q: %w", *instanceDigest, err)
+ }
+ return blob, manifest.GuessMIMEType(blob), err
+ }
+ if len(s.cachedManifest) == 0 {
+ // The manifest is stored as a big data item.
+ // Prefer the manifest corresponding to the user-specified digest, if available.
+ if s.imageRef.named != nil {
+ if digested, ok := s.imageRef.named.(reference.Digested); ok {
+ key := manifestBigDataKey(digested.Digest())
+ blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
+ if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
+ return nil, "", err
+ }
+ if err == nil {
+ s.cachedManifest = blob
+ }
+ }
+ }
+ // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
+ // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
+ if len(s.cachedManifest) == 0 {
+ cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
+ if err != nil {
+ return nil, "", err
+ }
+ s.cachedManifest = cachedBlob
+ }
+ }
+ return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
+}
+
+// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
+// the image, after they've been decompressed.
+func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest)
+ if err != nil {
+ return nil, fmt.Errorf("reading image manifest for %q: %w", s.image.ID, err)
+ }
+ if manifest.MIMETypeIsMultiImage(manifestType) {
+ return nil, errors.New("can't copy layers for a manifest list (shouldn't be attempted)")
+ }
+ man, err := manifest.FromBlob(manifestBlob, manifestType)
+ if err != nil {
+ return nil, fmt.Errorf("parsing image manifest for %q: %w", s.image.ID, err)
+ }
+
+ uncompressedLayerType := ""
+ switch manifestType {
+ case imgspecv1.MediaTypeImageManifest:
+ uncompressedLayerType = imgspecv1.MediaTypeImageLayer
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
+ uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
+ }
+
+ physicalBlobInfos := []types.BlobInfo{}
+ layerID := s.image.TopLayer
+ for layerID != "" {
+ layer, err := s.imageRef.transport.store.Layer(layerID)
+ if err != nil {
+ return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err)
+ }
+ if layer.UncompressedDigest == "" {
+ return nil, fmt.Errorf("uncompressed digest for layer %q is unknown", layerID)
+ }
+ if layer.UncompressedSize < 0 {
+ return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID)
+ }
+ blobInfo := types.BlobInfo{
+ Digest: layer.UncompressedDigest,
+ Size: layer.UncompressedSize,
+ MediaType: uncompressedLayerType,
+ }
+ physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
+ layerID = layer.Parent
+ }
+
+ res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos)
+ if err != nil {
+ return nil, fmt.Errorf("creating LayerInfosForCopy of image %q: %w", s.image.ID, err)
+ }
+ return res, nil
+}
+
+// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest,
+// but using layer data which we can actually produce — physicalInfos for non-empty layers,
+// and image.GzippedEmptyLayer for empty ones.
+// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.)
+func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) {
+ nextPhysical := 0
+ res := make([]types.BlobInfo, len(manifestInfos))
+ for i, mi := range manifestInfos {
+ if mi.EmptyLayer {
+ res[i] = types.BlobInfo{
+ Digest: image.GzippedEmptyLayerDigest,
+ Size: int64(len(image.GzippedEmptyLayer)),
+ MediaType: mi.MediaType,
+ }
+ } else {
+ if nextPhysical >= len(physicalInfos) {
+ return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos))
+ }
+ res[i] = physicalInfos[nextPhysical]
+ nextPhysical++
+ }
+ }
+ if nextPhysical != len(physicalInfos) {
+ return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos))
+ }
+ return res, nil
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ var offset int
+ signatureBlobs := []byte{}
+ signatureSizes := s.SignatureSizes
+ key := "signatures"
+ instance := "default instance"
+ if instanceDigest != nil {
+ signatureSizes = s.SignaturesSizes[*instanceDigest]
+ key = signatureBigDataKey(*instanceDigest)
+ instance = instanceDigest.Encoded()
+ }
+ if len(signatureSizes) > 0 {
+ data, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
+ if err != nil {
+ return nil, fmt.Errorf("looking up signatures data for image %q (%s): %w", s.image.ID, instance, err)
+ }
+ signatureBlobs = data
+ }
+ res := []signature.Signature{}
+ for _, length := range signatureSizes {
+ if offset+length > len(signatureBlobs) {
+ return nil, fmt.Errorf("looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signatureBlobs), offset+length)
+ }
+ sig, err := signature.FromBlob(signatureBlobs[offset : offset+length])
+ if err != nil {
+ return nil, fmt.Errorf("parsing signature at (%d, %d): %w", offset, length, err)
+ }
+ res = append(res, sig)
+ offset += length
+ }
+ if offset != len(signatureBlobs) {
+ return nil, fmt.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatureBlobs)-offset)
+ }
+ return res, nil
+}
+
+// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the
+// signatures, and the uncompressed sizes of all of the image's layers.
+func (s *storageImageSource) getSize() (int64, error) {
+ var sum int64
+ // Size up the data blobs.
+ dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID)
+ if err != nil {
+ return -1, fmt.Errorf("reading image %q: %w", s.image.ID, err)
+ }
+ for _, dataName := range dataNames {
+ bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName)
+ if err != nil {
+ return -1, fmt.Errorf("reading data blob size %q for %q: %w", dataName, s.image.ID, err)
+ }
+ sum += bigSize
+ }
+ // Add the signature sizes.
+ for _, sigSize := range s.SignatureSizes {
+ sum += int64(sigSize)
+ }
+ // Walk the layer list.
+ layerID := s.image.TopLayer
+ for layerID != "" {
+ layer, err := s.imageRef.transport.store.Layer(layerID)
+ if err != nil {
+ return -1, err
+ }
+ if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 {
+ return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID)
+ }
+ sum += layer.UncompressedSize
+ if layer.Parent == "" {
+ break
+ }
+ layerID = layer.Parent
+ }
+ return sum, nil
+}
+
+// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the
+// signatures, and the uncompressed sizes of all of the image's layers.
+func (s *storageImageSource) Size() (int64, error) {
+ return s.getSize()
+}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go
index ab59c8a290e..104e9d8ccab 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_transport.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go
@@ -4,6 +4,7 @@
package storage
import (
+ "errors"
"fmt"
"path/filepath"
"strings"
@@ -14,7 +15,6 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -117,13 +117,13 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap {
// relative to the given store, and returns it in a reference object.
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
if ref == "" {
- return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference", ref)
+ return nil, fmt.Errorf("%q is an empty reference: %w", ref, ErrInvalidReference)
}
if ref[0] == '[' {
// Ignore the store specifier.
closeIndex := strings.IndexRune(ref, ']')
if closeIndex < 1 {
- return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref)
+ return nil, fmt.Errorf("store specifier in %q did not end: %w", ref, ErrInvalidReference)
}
ref = ref[closeIndex+1:]
}
@@ -135,7 +135,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
if split != -1 {
possibleID := ref[split+1:]
if possibleID == "" {
- return nil, errors.Wrapf(ErrInvalidReference, "empty trailing digest or ID in %q", ref)
+ return nil, fmt.Errorf("empty trailing digest or ID in %q: %w", ref, ErrInvalidReference)
}
// If it looks like a digest, leave it alone for now.
if _, err := digest.Parse(possibleID); err != nil {
@@ -147,7 +147,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
// so we might as well use the expanded value.
id = img.ID
} else {
- return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID or digest", possibleID)
+ return nil, fmt.Errorf("%q does not look like an image ID or digest: %w", possibleID, ErrInvalidReference)
}
// We have recognized an image ID; peel it off.
ref = ref[:split]
@@ -173,7 +173,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
var err error
named, err = reference.ParseNormalizedNamed(ref)
if err != nil {
- return nil, errors.Wrapf(err, "parsing named reference %q", ref)
+ return nil, fmt.Errorf("parsing named reference %q: %w", ref, err)
}
named = reference.TagNameOnly(named)
}
@@ -225,7 +225,7 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
// needs to match a store that was previously initialized using
// storage.GetStore(), or be enough to let the storage library fill out
// the rest using knowledge that it has from elsewhere.
- if reference[0] == '[' {
+ if len(reference) > 0 && reference[0] == '[' {
closeIndex := strings.IndexRune(reference, ']')
if closeIndex < 1 {
return nil, ErrInvalidReference
diff --git a/vendor/github.com/containers/image/v5/tarball/doc.go b/vendor/github.com/containers/image/v5/tarball/doc.go
index e9d321b8f87..064c78b1776 100644
--- a/vendor/github.com/containers/image/v5/tarball/doc.go
+++ b/vendor/github.com/containers/image/v5/tarball/doc.go
@@ -2,6 +2,7 @@
// tarballs and an optional template configuration.
//
// An example:
+//
// package main
//
// import (
diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go
index 23f67c49e62..690067ec3da 100644
--- a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go
@@ -7,7 +7,7 @@ import (
"strings"
"github.com/containers/image/v5/docker/reference"
- "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -67,16 +67,7 @@ func (r *tarballReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := r.NewImageSource(ctx, sys)
- if err != nil {
- return nil, err
- }
- img, err := image.FromSource(ctx, sys, src)
- if err != nil {
- src.Close()
- return nil, err
- }
- return img, nil
+ return image.FromReference(ctx, sys, r)
}
func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
index 694ad17bd1a..1dc4c3ad94b 100644
--- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
@@ -6,12 +6,13 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"os"
"runtime"
"strings"
"time"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/types"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
@@ -20,6 +21,12 @@ import (
)
type tarballImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
reference tarballReference
filenames []string
diffIDs []digest.Digest
@@ -87,7 +94,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
uncompressed = nil
}
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- n, err := io.Copy(ioutil.Discard, reader)
+ n, err := io.Copy(io.Discard, reader)
if err != nil {
return nil, fmt.Errorf("error reading %q: %v", filename, err)
}
@@ -186,6 +193,11 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
// Return the image.
src := &tarballImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(r),
+
reference: *r,
filenames: filenames,
diffIDs: diffIDs,
@@ -198,6 +210,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
configSize: configSize,
manifest: manifestBytes,
}
+ src.Compat = impl.AddCompat(src)
return src, nil
}
@@ -206,25 +219,20 @@ func (is *tarballImageSource) Close() error {
return nil
}
-// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
-func (is *tarballImageSource) HasThreadSafeGetBlob() bool {
- return false
-}
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
// We should only be asked about things in the manifest. Maybe the configuration blob.
if blobinfo.Digest == is.configID {
- return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
+ return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
}
// Maybe one of the layer blobs.
for i := range is.blobIDs {
if blobinfo.Digest == is.blobIDs[i] {
// We want to read that layer: open the file or memory block and hand it back.
if is.filenames[i] == "-" {
- return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil
+ return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil
}
reader, err := os.Open(is.filenames[i])
if err != nil {
@@ -247,28 +255,6 @@ func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *d
return is.manifest, imgspecv1.MediaTypeImageManifest, nil
}
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
-// as there can be no secondary manifests.
-func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- if instanceDigest != nil {
- return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName)
- }
- return nil, nil
-}
-
func (is *tarballImageSource) Reference() types.ImageReference {
return &is.reference
}
-
-// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
-// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
-// to read the image's layers.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-// The Digest field is guaranteed to be provided; Size may be -1.
-// WARNING: The list may contain duplicates, and they are semantically relevant.
-func (*tarballImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
- return nil, nil
-}
diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
index d407c657fae..63d835530b5 100644
--- a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
@@ -3,7 +3,7 @@ package tarball
import (
"errors"
"fmt"
- "io/ioutil"
+ "io"
"os"
"strings"
@@ -36,7 +36,7 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc
filenames := strings.Split(reference, separator)
for _, filename := range filenames {
if filename == "-" {
- stdin, err = ioutil.ReadAll(os.Stdin)
+ stdin, err = io.ReadAll(os.Stdin)
if err != nil {
return nil, fmt.Errorf("error buffering stdin: %v", err)
}
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
index 2110a091d28..62d767b5833 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
@@ -1,6 +1,7 @@
package alltransports
import (
+ "fmt"
"strings"
// register all known transports
@@ -12,12 +13,13 @@ import (
_ "github.com/containers/image/v5/oci/archive"
_ "github.com/containers/image/v5/oci/layout"
_ "github.com/containers/image/v5/openshift"
+ _ "github.com/containers/image/v5/sif"
_ "github.com/containers/image/v5/tarball"
+
// The ostree transport is registered by ostree*.go
// The storage transport is registered by storage*.go
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
- "github.com/pkg/errors"
)
// ParseImageName converts a URL-like image name to a types.ImageReference.
@@ -25,11 +27,11 @@ func ParseImageName(imgName string) (types.ImageReference, error) {
// Keep this in sync with TransportFromImageName!
parts := strings.SplitN(imgName, ":", 2)
if len(parts) != 2 {
- return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
+ return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
}
transport := transports.Get(parts[0])
if transport == nil {
- return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0])
+ return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0])
}
return transport.ParseReference(parts[1])
}
diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go
index 354b3f6631b..7075b09e093 100644
--- a/vendor/github.com/containers/image/v5/types/types.go
+++ b/vendor/github.com/containers/image/v5/types/types.go
@@ -177,24 +177,25 @@ type BICReplacementCandidate struct {
// BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies.
//
// It records two kinds of data:
-// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
-// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
-// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression),
-// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
//
-// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
-// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
+// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
+// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
+// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression),
+// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
//
-// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
-// compress/decompress blobs for their own purposes.
+// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
+// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
//
-// - Known blob locations, managed by individual transports:
-// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
-// recording transport-specific information that allows the transport to reuse the blob in the future;
-// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
+// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
+// compress/decompress blobs for their own purposes.
//
-// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
-// can be directly reused within a registry, or mounted across registries within a registry server.)
+// - Known blob locations, managed by individual transports:
+// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
+// recording transport-specific information that allows the transport to reuse the blob in the future;
+// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
+//
+// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
+// can be directly reused within a registry, or mounted across registries within a registry server.)
//
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
// users of the cache should just fall back to copying the blobs the usual way.
@@ -465,7 +466,17 @@ type ImageInspectInfo struct {
Variant string
Os string
Layers []string
+ LayersData []ImageInspectLayer
Env []string
+ Author string
+}
+
+// ImageInspectLayer is a set of metadata describing an image layers' detail
+type ImageInspectLayer struct {
+ MIMEType string // "" if unknown.
+ Digest digest.Digest
+ Size int64 // -1 if unknown.
+ Annotations map[string]string
}
// DockerAuthConfig contains authorization information for connecting to a registry.
@@ -561,6 +572,11 @@ type SystemContext struct {
UserShortNameAliasConfPath string
// If set, short-name resolution in pkg/shortnames must follow the specified mode
ShortNameMode *ShortNameMode
+ // If set, short names will resolve in pkg/shortnames to docker.io only, and unqualified-search registries and
+ // short-name aliases in registries.conf are ignored. Note that this field is only intended to help enforce
+ // resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this
+ // specific context.
+ PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool
// If not "", overrides the default path for the authentication file, but only new format files
AuthFilePath string
// if not "", overrides the default path for the authentication file, but with the legacy format;
@@ -622,6 +638,10 @@ type SystemContext struct {
DockerLogMirrorChoice bool
// Directory to use for OSTree temporary files
OSTreeTmpDirPath string
+ // If true, all blobs will have precomputed digests to ensure layers are not uploaded that already exist on the registry.
+ // Note that this requires writing blobs to temporary files, and takes more time than the default behavior,
+ // when the digest for a blob is unknown.
+ DockerRegistryPushPrecomputeDigests bool
// === docker/daemon.Transport overrides ===
// A directory containing a CA certificate (ending with ".crt"),
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index b9f8c3e9ffe..b7734837c3d 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -6,9 +6,9 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
- VersionMinor = 16
+ VersionMinor = 23
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 0
+ VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""
diff --git a/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md
new file mode 100644
index 00000000000..83b061c70b2
--- /dev/null
+++ b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md
@@ -0,0 +1,3 @@
+## The libtrust Project Community Code of Conduct
+
+The libtrust project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md).
diff --git a/vendor/github.com/containers/libtrust/SECURITY.md b/vendor/github.com/containers/libtrust/SECURITY.md
new file mode 100644
index 00000000000..fab2c41e89b
--- /dev/null
+++ b/vendor/github.com/containers/libtrust/SECURITY.md
@@ -0,0 +1,3 @@
+## Security and Disclosure Information Policy for the libtrust Project
+
+The libtrust Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects.
diff --git a/vendor/github.com/containers/ocicrypt/.travis.yml b/vendor/github.com/containers/ocicrypt/.travis.yml
index e4dd4a4021c..1036c8d3f4f 100644
--- a/vendor/github.com/containers/ocicrypt/.travis.yml
+++ b/vendor/github.com/containers/ocicrypt/.travis.yml
@@ -21,7 +21,7 @@ addons:
go_import_path: github.com/containers/ocicrypt
install:
- - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0
+ - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.46.2
script:
- make
diff --git a/vendor/github.com/containers/ocicrypt/MAINTAINERS b/vendor/github.com/containers/ocicrypt/MAINTAINERS
index e6a7d1f0a7d..af38d03bffb 100644
--- a/vendor/github.com/containers/ocicrypt/MAINTAINERS
+++ b/vendor/github.com/containers/ocicrypt/MAINTAINERS
@@ -3,3 +3,4 @@
# Github ID, Name, Email Address
lumjjb, Brandon Lum, lumjjb@gmail.com
stefanberger, Stefan Berger, stefanb@linux.ibm.com
+arronwy, Arron Wang, arron.wang@intel.com
diff --git a/vendor/github.com/containers/ocicrypt/config/constructors.go b/vendor/github.com/containers/ocicrypt/config/constructors.go
index a789d052dc5..c537a20a0a1 100644
--- a/vendor/github.com/containers/ocicrypt/config/constructors.go
+++ b/vendor/github.com/containers/ocicrypt/config/constructors.go
@@ -21,7 +21,7 @@ import (
"strings"
"github.com/pkg/errors"
- "gopkg.in/yaml.v2"
+ "gopkg.in/yaml.v3"
)
// EncryptWithJwe returns a CryptoConfig to encrypt with jwe public keys
diff --git a/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go b/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go
index 76be3413822..5a8bc022cb7 100644
--- a/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go
+++ b/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go
@@ -24,7 +24,7 @@ import (
"github.com/containers/ocicrypt/crypto/pkcs11"
"github.com/pkg/errors"
- "gopkg.in/yaml.v2"
+ "gopkg.in/yaml.v3"
)
// OcicryptConfig represents the format of an imgcrypt.conf config file
diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go
index 7fcd2e3af68..c6d47e8300a 100644
--- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go
+++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go
@@ -17,7 +17,7 @@ import (
"fmt"
"github.com/pkg/errors"
pkcs11uri "github.com/stefanberger/go-pkcs11uri"
- "gopkg.in/yaml.v2"
+ "gopkg.in/yaml.v3"
)
// Pkcs11KeyFile describes the format of the pkcs11 (private) key file.
diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go
index 448e88c7cd9..7d80f5f844b 100644
--- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go
+++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go
@@ -40,8 +40,6 @@ import (
var (
// OAEPLabel defines the label we use for OAEP encryption; this cannot be changed
OAEPLabel = []byte("")
- // OAEPDefaultHash defines the default hash used for OAEP encryption; this cannot be changed
- OAEPDefaultHash = "sha1"
// OAEPSha1Params describes the OAEP parameters with sha1 hash algorithm; needed by SoftHSM
OAEPSha1Params = &pkcs11.OAEPParams{
@@ -69,12 +67,12 @@ func rsaPublicEncryptOAEP(pubKey *rsa.PublicKey, plaintext []byte) ([]byte, stri
)
oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG")
- // The default is 'sha1'
+ // The default is sha256 (previously was sha1)
switch strings.ToLower(oaephash) {
- case "sha1", "":
+ case "sha1":
hashfunc = sha1.New()
hashalg = "sha1"
- case "sha256":
+ case "sha256", "":
hashfunc = sha256.New()
hashalg = "sha256"
default:
@@ -283,12 +281,12 @@ func publicEncryptOAEP(pubKey *Pkcs11KeyFileObject, plaintext []byte) ([]byte, s
var oaep *pkcs11.OAEPParams
oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG")
- // the default is sha1
+ // The default is sha256 (previously was sha1)
switch strings.ToLower(oaephash) {
- case "sha1", "":
+ case "sha1":
oaep = OAEPSha1Params
hashalg = "sha1"
- case "sha256":
+ case "sha256", "":
oaep = OAEPSha256Params
hashalg = "sha256"
default:
@@ -333,7 +331,7 @@ func privateDecryptOAEP(privKeyObj *Pkcs11KeyFileObject, ciphertext []byte, hash
var oaep *pkcs11.OAEPParams
- // the default is sha1
+ // An empty string from the Hash in the JSON historically defaults to sha1.
switch hashalg {
case "sha1", "":
oaep = OAEPSha1Params
@@ -410,9 +408,6 @@ func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) {
return nil, err
}
- if hashalg == OAEPDefaultHash {
- hashalg = ""
- }
recipient := Pkcs11Recipient{
Version: 0,
Blob: base64.StdEncoding.EncodeToString(ciphertext),
@@ -431,15 +426,18 @@ func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) {
// {
// "version": 0,
// "blob": ,
-// "hash":
+// "hash":
// } ,
// {
// "version": 0,
// "blob": ,
-// "hash":
+// "hash":
// } ,
// [...]
// }
+// Note: More recent versions of this code explicitly write 'sha1'
+// while older versions left it empty in case of 'sha1'.
+//
func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) {
pkcs11blob := Pkcs11Blob{}
err := json.Unmarshal(pkcs11blobstr, &pkcs11blob)
diff --git a/vendor/github.com/containers/ocicrypt/go.mod b/vendor/github.com/containers/ocicrypt/go.mod
index 02be185915f..46ee2a28994 100644
--- a/vendor/github.com/containers/ocicrypt/go.mod
+++ b/vendor/github.com/containers/ocicrypt/go.mod
@@ -5,9 +5,9 @@ go 1.12
require (
github.com/golang/protobuf v1.4.3
github.com/google/go-cmp v0.5.2 // indirect
- github.com/miekg/pkcs11 v1.0.3
+ github.com/miekg/pkcs11 v1.1.1
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/image-spec v1.0.1
+ github.com/opencontainers/image-spec v1.0.2
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.7.0
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980
@@ -17,5 +17,5 @@ require (
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1
google.golang.org/grpc v1.33.2
gopkg.in/square/go-jose.v2 v2.5.1
- gopkg.in/yaml.v2 v2.4.0
+ gopkg.in/yaml.v3 v3.0.0
)
diff --git a/vendor/github.com/containers/ocicrypt/go.sum b/vendor/github.com/containers/ocicrypt/go.sum
index 7153900dafc..86e36e76801 100644
--- a/vendor/github.com/containers/ocicrypt/go.sum
+++ b/vendor/github.com/containers/ocicrypt/go.sum
@@ -30,12 +30,12 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw=
-github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
+github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
-github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
+github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -111,7 +111,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml
index 20bede452dd..e3e2b58f9e9 100644
--- a/vendor/github.com/containers/storage/.cirrus.yml
+++ b/vendor/github.com/containers/storage/.cirrus.yml
@@ -17,19 +17,17 @@ env:
####
#### Cache-image names to test with (double-quotes around names are critical)
###
- FEDORA_NAME: "fedora-34"
- PRIOR_FEDORA_NAME: "fedora-33"
- UBUNTU_NAME: "ubuntu-2104"
- PRIOR_UBUNTU_NAME: "ubuntu-2010"
+ FEDORA_NAME: "fedora-36"
+ PRIOR_FEDORA_NAME: "fedora-35"
+ UBUNTU_NAME: "ubuntu-2204"
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
- _BUILT_IMAGE_SUFFIX: "c6248193773010944"
- FEDORA_CACHE_IMAGE_NAME: "fedora-${_BUILT_IMAGE_SUFFIX}"
- PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${_BUILT_IMAGE_SUFFIX}"
- UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${_BUILT_IMAGE_SUFFIX}"
- PRIOR_UBUNTU_CACHE_IMAGE_NAME: "prior-ubuntu-${_BUILT_IMAGE_SUFFIX}"
+ IMAGE_SUFFIX: "c5878804328480768"
+ FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
+ PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
+ UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
####
#### Command variables to help avoid duplication
@@ -115,20 +113,11 @@ ubuntu_testing_task: &ubuntu_testing
TEST_DRIVER: "overlay"
-prior_ubuntu_testing_task:
- <<: *ubuntu_testing
- alias: prior_ubuntu_testing
- name: *std_test_name
- env:
- OS_NAME: "${PRIOR_UBUNTU_NAME}"
- VM_IMAGE: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}"
-
-
lint_task:
env:
CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage"
container:
- image: golang:1.15
+ image: golang:1.16
modules_cache:
fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod
@@ -143,7 +132,7 @@ lint_task:
meta_task:
container:
- image: "quay.io/libpod/imgts:${_BUILT_IMAGE_SUFFIX}"
+ image: "quay.io/libpod/imgts:latest"
cpu: 1
memory: 1
@@ -153,7 +142,6 @@ meta_task:
${FEDORA_CACHE_IMAGE_NAME}
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
${UBUNTU_CACHE_IMAGE_NAME}
- ${PRIOR_UBUNTU_CACHE_IMAGE_NAME}
BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
GCPJSON: ENCRYPTED[244a93fe8b386b48b96f748342bf741350e43805eee81dd04b45093bdf737e540b993fc735df41f131835fa0f9b65826]
@@ -166,7 +154,7 @@ meta_task:
vendor_task:
container:
- image: golang:1.15
+ image: golang:1.16
modules_cache:
fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod
@@ -181,10 +169,9 @@ success_task:
- fedora_testing
- prior_fedora_testing
- ubuntu_testing
- - prior_ubuntu_testing
- meta
- vendor
container:
- image: golang:1.15
+ image: golang:1.16
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
script: /bin/true
diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile
index dbc1f7c9987..244576d546a 100644
--- a/vendor/github.com/containers/storage/Makefile
+++ b/vendor/github.com/containers/storage/Makefile
@@ -51,13 +51,16 @@ sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.
containers-storage: $(sources) ## build using gc on the host
$(GO) build $(MOD_VENDOR) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
+codespell:
+ codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L flate,uint,iff,od,ERRO -w
+
binary local-binary: containers-storage
local-gccgo: ## build using gccgo on the host
GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
-local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd
- @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 ; do \
+local-cross: ## cross build the binaries for arm, darwin, and freebsd
+ @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \
os=`echo $${target} | cut -f1 -d/` ; \
arch=`echo $${target} | cut -f2 -d/` ; \
suffix=$${os}.$${arch} ; \
@@ -66,44 +69,44 @@ local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd
done
cross: ## cross build the binaries for arm, darwin, and\nfreebsd using VMs
- $(RUNINVM) make local-$@
+ $(RUNINVM) $(MAKE) local-$@
docs: install.tools ## build the docs on the host
$(MAKE) -C docs docs
gccgo: ## build using gccgo using VMs
- $(RUNINVM) make local-$@
+ $(RUNINVM) $(MAKE) local-$@
test: local-binary ## build the binaries and run the tests using VMs
- $(RUNINVM) make local-binary local-cross local-test-unit local-test-integration
+ $(RUNINVM) $(MAKE) local-binary local-cross local-test-unit local-test-integration
local-test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges)
@$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
test-unit: local-binary ## run the unit tests using VMs
- $(RUNINVM) make local-$@
+ $(RUNINVM) $(MAKE) local-$@
local-test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges)
@cd tests; ./test_runner.bash
test-integration: local-binary ## run the integration tests using VMs
- $(RUNINVM) make local-$@
+ $(RUNINVM) $(MAKE) local-$@
local-validate: ## validate DCO and gofmt on the host
@./hack/git-validation.sh
@./hack/gofmt.sh
validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor using VMs
- $(RUNINVM) make local-$@
+ $(RUNINVM) $(MAKE) local-$@
install.tools:
- make -C tests/tools
+ $(MAKE) -C tests/tools
$(FFJSON):
- make -C tests/tools
+ $(MAKE) -C tests/tools
install.docs: docs
- make -C docs install
+ $(MAKE) -C docs install
install: install.docs
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 39fc130ef85..b978278f05f 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.36.0
+1.43.0
diff --git a/vendor/github.com/containers/storage/Vagrantfile b/vendor/github.com/containers/storage/Vagrantfile
deleted file mode 100644
index c82c1f81bd3..00000000000
--- a/vendor/github.com/containers/storage/Vagrantfile
+++ /dev/null
@@ -1,25 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-#
-# The fedora/28-cloud-base and debian/jessie64 boxes are also available for
-# the "virtualbox" provider. Set the VAGRANT_PROVIDER environment variable to
-# "virtualbox" to use them instead.
-#
-Vagrant.configure("2") do |config|
- config.vm.define "fedora" do |c|
- c.vm.box = "fedora/28-cloud-base"
- c.vm.synced_folder ".", "/vagrant", type: "rsync",
- rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"]
- c.vm.provision "shell", inline: <<-SHELL
- sudo /vagrant/vagrant/provision.sh
- SHELL
- end
- config.vm.define "debian" do |c|
- c.vm.box = "debian/jessie64"
- c.vm.synced_folder ".", "/vagrant", type: "rsync",
- rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"]
- c.vm.provision "shell", inline: <<-SHELL
- sudo /vagrant/vagrant/provision.sh
- SHELL
- end
-end
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
index b4f773f2b73..4f2b61f52cd 100644
--- a/vendor/github.com/containers/storage/containers.go
+++ b/vendor/github.com/containers/storage/containers.go
@@ -1,8 +1,8 @@
package storage
import (
+ "errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"sync"
@@ -13,7 +13,6 @@ import (
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
// A Container is a reference to a read-write layer with metadata.
@@ -84,8 +83,17 @@ type ContainerStore interface {
// SetNames updates the list of names associated with the container
// with the specified ID.
+ // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
SetNames(id string, names []string) error
+ // AddNames adds the supplied values to the list of names associated with the container with
+ // the specified id.
+ AddNames(id string, names []string) error
+
+ // RemoveNames removes the supplied values from the list of names associated with the container with
+ // the specified id.
+ RemoveNames(id string, names []string) error
+
// Get retrieves information about a container given an ID or name.
Get(id string) (*Container, error)
@@ -135,26 +143,26 @@ func copyContainer(c *Container) *Container {
}
func (c *Container) MountLabel() string {
- if label, ok := c.Flags["MountLabel"].(string); ok {
+ if label, ok := c.Flags[mountLabelFlag].(string); ok {
return label
}
return ""
}
func (c *Container) ProcessLabel() string {
- if label, ok := c.Flags["ProcessLabel"].(string); ok {
+ if label, ok := c.Flags[processLabelFlag].(string); ok {
return label
}
return ""
}
func (c *Container) MountOpts() []string {
- switch c.Flags["MountOpts"].(type) {
+ switch c.Flags[mountOptsFlag].(type) {
case []string:
- return c.Flags["MountOpts"].([]string)
+ return c.Flags[mountOptsFlag].([]string)
case []interface{}:
var mountOpts []string
- for _, v := range c.Flags["MountOpts"].([]interface{}) {
+ for _, v := range c.Flags[mountOptsFlag].([]interface{}) {
if flag, ok := v.(string); ok {
mountOpts = append(mountOpts, flag)
}
@@ -188,7 +196,7 @@ func (r *containerStore) datapath(id, key string) string {
func (r *containerStore) Load() error {
needSave := false
rpath := r.containerspath()
- data, err := ioutil.ReadFile(rpath)
+ data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -312,18 +320,23 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
return nil, ErrDuplicateID
}
if options.MountOpts != nil {
- options.Flags["MountOpts"] = append([]string{}, options.MountOpts...)
+ options.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...)
}
if options.Volatile {
- options.Flags["Volatile"] = true
+ options.Flags[volatileFlag] = true
}
names = dedupeNames(names)
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
- return nil, errors.Wrapf(ErrDuplicateName,
- fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID))
+ return nil, fmt.Errorf("the container name %q is already in use by %s. You have to remove that container to be able to reuse that name: %w", name, r.byname[name].ID, ErrDuplicateName)
}
}
+ if err := hasOverlappingRanges(options.UIDMap); err != nil {
+ return nil, err
+ }
+ if err := hasOverlappingRanges(options.GIDMap); err != nil {
+ return nil, err
+ }
if err == nil {
container = &Container{
ID: id,
@@ -371,22 +384,40 @@ func (r *containerStore) removeName(container *Container, name string) {
container.Names = stringSliceWithoutValue(container.Names, name)
}
+// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
func (r *containerStore) SetNames(id string, names []string) error {
- names = dedupeNames(names)
- if container, ok := r.lookup(id); ok {
- for _, name := range container.Names {
- delete(r.byname, name)
- }
- for _, name := range names {
- if otherContainer, ok := r.byname[name]; ok {
- r.removeName(otherContainer, name)
- }
- r.byname[name] = container
+ return r.updateNames(id, names, setNames)
+}
+
+func (r *containerStore) AddNames(id string, names []string) error {
+ return r.updateNames(id, names, addNames)
+}
+
+func (r *containerStore) RemoveNames(id string, names []string) error {
+ return r.updateNames(id, names, removeNames)
+}
+
+func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error {
+ container, ok := r.lookup(id)
+ if !ok {
+ return ErrContainerUnknown
+ }
+ oldNames := container.Names
+ names, err := applyNameOperation(oldNames, names, op)
+ if err != nil {
+ return err
+ }
+ for _, name := range oldNames {
+ delete(r.byname, name)
+ }
+ for _, name := range names {
+ if otherContainer, ok := r.byname[name]; ok {
+ r.removeName(otherContainer, name)
}
- container.Names = names
- return r.Save()
+ r.byname[name] = container
}
- return ErrContainerUnknown
+ container.Names = names
+ return r.Save()
}
func (r *containerStore) Delete(id string) error {
@@ -446,18 +477,18 @@ func (r *containerStore) Exists(id string) bool {
func (r *containerStore) BigData(id, key string) ([]byte, error) {
if key == "" {
- return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name")
+ return nil, fmt.Errorf("can't retrieve container big data value for empty name: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
return nil, ErrContainerUnknown
}
- return ioutil.ReadFile(r.datapath(c.ID, key))
+ return os.ReadFile(r.datapath(c.ID, key))
}
func (r *containerStore) BigDataSize(id, key string) (int64, error) {
if key == "" {
- return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name")
+ return -1, fmt.Errorf("can't retrieve size of container big data with empty name: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
@@ -487,7 +518,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
if key == "" {
- return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name")
+ return "", fmt.Errorf("can't retrieve digest of container big data value with empty name: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
@@ -525,7 +556,7 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) {
func (r *containerStore) SetBigData(id, key string, data []byte) error {
if key == "" {
- return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item")
+ return fmt.Errorf("can't set empty name for container big data item: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
index 76f12ec3be5..2642874be6d 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
/*
@@ -24,9 +25,10 @@ package aufs
import (
"bufio"
+ "errors"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"os"
"os/exec"
"path"
@@ -44,9 +46,8 @@ import (
mountpk "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/system"
- "github.com/opencontainers/runc/libcontainer/userns"
+ "github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
"golang.org/x/sys/unix"
@@ -54,9 +55,9 @@ import (
var (
// ErrAufsNotSupported is returned if aufs is not supported by the host.
- ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems")
+ ErrAufsNotSupported = fmt.Errorf("aufs was not found in /proc/filesystems")
// ErrAufsNested means aufs cannot be used bc we are in a user namespace
- ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace")
+ ErrAufsNested = fmt.Errorf("aufs cannot be used in non-init user namespace")
backingFs = ""
enableDirpermLock sync.Once
@@ -89,7 +90,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// Try to load the aufs kernel module
if err := supportsAufs(); err != nil {
- return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs")
+ return nil, fmt.Errorf("kernel does not support aufs: %w", graphdriver.ErrNotSupported)
}
@@ -104,7 +105,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs:
logrus.Errorf("AUFS is not supported over %s", backingFs)
- return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "AUFS is not supported over %q", backingFs)
+ return nil, fmt.Errorf("aufs is not supported over %q: %w", backingFs, graphdriver.ErrIncompatibleFS)
}
var mountOptions string
@@ -168,7 +169,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
for _, path := range []string{"mnt", "diff"} {
p := filepath.Join(home, path)
- entries, err := ioutil.ReadDir(p)
+ entries, err := os.ReadDir(p)
if err != nil {
logger.WithError(err).WithField("dir", p).Error("error reading dir entries")
continue
@@ -198,7 +199,7 @@ func supportsAufs() error {
// proc/filesystems for when aufs is supported
exec.Command("modprobe", "aufs").Run()
- if userns.RunningInUserNS() {
+ if unshare.IsRootless() {
return ErrAufsNested
}
@@ -372,10 +373,10 @@ func (a *Driver) Remove(id string) error {
}
if err != unix.EBUSY {
- return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint)
+ return fmt.Errorf("aufs: unmount error: %s: %w", mountpoint, err)
}
if retries >= 5 {
- return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint)
+ return fmt.Errorf("aufs: unmount error after retries: %s: %w", mountpoint, err)
}
// If unmount returns EBUSY, it could be a transient error. Sleep and retry.
retries++
@@ -385,21 +386,21 @@ func (a *Driver) Remove(id string) error {
// Remove the layers file for the id
if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
- return errors.Wrapf(err, "error removing layers dir for %s", id)
+ return fmt.Errorf("removing layers dir for %s: %w", id, err)
}
if err := atomicRemove(a.getDiffPath(id)); err != nil {
- return errors.Wrapf(err, "could not remove diff path for id %s", id)
+ return fmt.Errorf("could not remove diff path for id %s: %w", id, err)
}
// Atomically remove each directory in turn by first moving it out of the
// way (so that container runtime doesn't find it anymore) before doing removal of
// the whole tree.
if err := atomicRemove(mountpoint); err != nil {
- if errors.Cause(err) == unix.EBUSY {
+ if errors.Is(err, unix.EBUSY) {
logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY")
}
- return errors.Wrapf(err, "could not remove mountpoint for id %s", id)
+ return fmt.Errorf("could not remove mountpoint for id %s: %w", id, err)
}
a.pathCacheLock.Lock()
@@ -417,10 +418,10 @@ func atomicRemove(source string) error {
case os.IsExist(err):
// Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove
if _, e := os.Stat(source); !os.IsNotExist(e) {
- return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up", target)
+ return fmt.Errorf("target rename dir '%s' exists but should not, this needs to be manually cleaned up: %w", target, err)
}
default:
- return errors.Wrapf(err, "error preparing atomic delete")
+ return fmt.Errorf("preparing atomic delete: %w", err)
}
return system.EnsureRemoveAll(target)
@@ -624,7 +625,7 @@ func (a *Driver) mount(id string, target string, layers []string, options graphd
rw := a.getDiffPath(id)
if err := a.aufsMount(layers, rw, target, options); err != nil {
- return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
+ return fmt.Errorf("creating aufs mount to %s: %w", target, err)
}
return nil
}
@@ -649,11 +650,11 @@ func (a *Driver) mounted(mountpoint string) (bool, error) {
// Cleanup aufs and unmount all mountpoints
func (a *Driver) Cleanup() error {
var dirs []string
- if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error {
+ if err := filepath.WalkDir(a.mntPath(), func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
- if !info.IsDir() {
+ if !d.IsDir() {
return nil
}
dirs = append(dirs, path)
@@ -728,16 +729,16 @@ func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.M
// version of aufs.
func useDirperm() bool {
enableDirpermLock.Do(func() {
- base, err := ioutil.TempDir("", "storage-aufs-base")
+ base, err := os.MkdirTemp("", "storage-aufs-base")
if err != nil {
- logrus.Errorf("error checking dirperm1: %v", err)
+ logrus.Errorf("Checking dirperm1: %v", err)
return
}
defer os.RemoveAll(base)
- union, err := ioutil.TempDir("", "storage-aufs-union")
+ union, err := os.MkdirTemp("", "storage-aufs-union")
if err != nil {
- logrus.Errorf("error checking dirperm1: %v", err)
+ logrus.Errorf("Checking dirperm1: %v", err)
return
}
defer os.RemoveAll(union)
@@ -748,7 +749,7 @@ func useDirperm() bool {
}
enableDirperm = true
if err := Unmount(union); err != nil {
- logrus.Errorf("error checking dirperm1: failed to unmount %v", err)
+ logrus.Errorf("Checking dirperm1: failed to unmount %v", err)
}
})
return enableDirperm
diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go
index d2325fc46cd..27e62163312 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/dirs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/dirs.go
@@ -1,17 +1,17 @@
+//go:build linux
// +build linux
package aufs
import (
"bufio"
- "io/ioutil"
"os"
"path"
)
// Return all the directories
func loadIds(root string) ([]string, error) {
- dirs, err := ioutil.ReadDir(root)
+ dirs, err := os.ReadDir(root)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go
deleted file mode 100644
index d030b066378..00000000000
--- a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !linux
-
-package aufs
-
-import "errors"
-
-// MsRemount declared to specify a non-linux system mount.
-const MsRemount = 0
-
-func mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
- return errors.New("mount is not implemented on this platform")
-}
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
index 3903b1dddd9..0b5d1c510db 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
@@ -1,3 +1,4 @@
+//go:build linux && cgo
// +build linux,cgo
package btrfs
@@ -16,7 +17,7 @@ import "C"
import (
"fmt"
- "io/ioutil"
+ "io/fs"
"math"
"os"
"path"
@@ -34,7 +35,6 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/docker/go-units"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -60,7 +60,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
if fsMagic != graphdriver.FsMagicBtrfs {
- return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "%q is not on a btrfs filesystem", home)
+ return nil, fmt.Errorf("%q is not on a btrfs filesystem: %w", home, graphdriver.ErrPrerequisites)
}
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
@@ -116,7 +116,7 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) {
case "btrfs.mountopt":
return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options")
default:
- return options, userDiskQuota, fmt.Errorf("Unknown option %s", key)
+ return options, userDiskQuota, fmt.Errorf("unknown option %s", key)
}
}
return options, userDiskQuota, nil
@@ -172,7 +172,7 @@ func openDir(path string) (*C.DIR, error) {
dir := C.opendir(Cpath)
if dir == nil {
- return nil, fmt.Errorf("Can't open dir")
+ return nil, fmt.Errorf("can't open dir %s", path)
}
return dir, nil
}
@@ -202,7 +202,7 @@ func subvolCreate(path, name string) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error())
+ return fmt.Errorf("failed to create btrfs subvolume: %w", errno)
}
return nil
}
@@ -230,7 +230,7 @@ func subvolSnapshot(src, dest, name string) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error())
+ return fmt.Errorf("failed to create btrfs snapshot: %w", errno)
}
return nil
}
@@ -256,32 +256,32 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error {
var args C.struct_btrfs_ioctl_vol_args
// walk the btrfs subvolumes
- walkSubvolumes := func(p string, f os.FileInfo, err error) error {
+ walkSubvolumes := func(p string, d fs.DirEntry, err error) error {
if err != nil {
if os.IsNotExist(err) && p != fullPath {
// missing most likely because the path was a subvolume that got removed in the previous iteration
// since it's gone anyway, we don't care
return nil
}
- return fmt.Errorf("error walking subvolumes: %v", err)
+ return fmt.Errorf("walking subvolumes: %w", err)
}
// we want to check children only so skip itself
// it will be removed after the filepath walk anyways
- if f.IsDir() && p != fullPath {
+ if d.IsDir() && p != fullPath {
sv, err := isSubvolume(p)
if err != nil {
- return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err)
+ return fmt.Errorf("failed to test if %s is a btrfs subvolume: %w", p, err)
}
if sv {
- if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil {
- return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err)
+ if err := subvolDelete(path.Dir(p), d.Name(), quotaEnabled); err != nil {
+ return fmt.Errorf("failed to destroy btrfs child subvolume (%s) of parent (%s): %w", p, dirpath, err)
}
}
}
return nil
}
- if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil {
- return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err)
+ if err := filepath.WalkDir(path.Join(dirpath, name), walkSubvolumes); err != nil {
+ return fmt.Errorf("recursively walking subvolumes for %s failed: %w", dirpath, err)
}
if quotaEnabled {
@@ -307,7 +307,7 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error())
+ return fmt.Errorf("failed to destroy btrfs snapshot %s for %s: %w", dirpath, name, errno)
}
return nil
}
@@ -343,7 +343,7 @@ func (d *Driver) enableQuota() error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error())
+ return fmt.Errorf("failed to enable btrfs quota for %s: %w", dir, errno)
}
d.quotaEnabled = true
@@ -368,7 +368,7 @@ func (d *Driver) subvolRescanQuota() error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error())
+ return fmt.Errorf("failed to rescan btrfs quota for %s: %w", dir, errno)
}
return nil
@@ -387,7 +387,7 @@ func subvolLimitQgroup(path string, size uint64) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error())
+ return fmt.Errorf("failed to limit qgroup for %s: %w", dir, errno)
}
return nil
@@ -416,11 +416,11 @@ func qgroupStatus(path string) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error())
+ return fmt.Errorf("failed to search qgroup for %s: %w", path, errno)
}
sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf))
if sh._type != C.BTRFS_QGROUP_STATUS_KEY {
- return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type)
+ return fmt.Errorf("invalid qgroup search header type for %s: %v", path, sh._type)
}
return nil
}
@@ -438,10 +438,10 @@ func subvolLookupQgroup(path string) (uint64, error) {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
- return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error())
+ return 0, fmt.Errorf("failed to lookup qgroup for %s: %w", dir, errno)
}
if args.treeid == 0 {
- return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir)
+ return 0, fmt.Errorf("invalid qgroup id for %s: 0", dir)
}
return uint64(args.treeid), nil
@@ -523,7 +523,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil {
return err
}
- if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil {
+ if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil {
return err
}
}
@@ -557,7 +557,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e
}
driver.options.size = uint64(size)
default:
- return fmt.Errorf("Unknown option %s", key)
+ return fmt.Errorf("unknown option %s", key)
}
}
@@ -642,7 +642,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
return "", fmt.Errorf("%s: not a directory", dir)
}
- if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil {
+ if quota, err := os.ReadFile(d.quotasDirID(id)); err == nil {
if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace {
if err := d.enableQuota(); err != nil {
return "", err
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go
index 905e834e354..a61d8fbd9ac 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go
@@ -1,4 +1,5 @@
-// +build !linux btrfs_noversion !cgo
+//go:build linux && btrfs_noversion && cgo
+// +build linux,btrfs_noversion,cgo
package btrfs
diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go
index 63bfd2d136f..34a76b8226e 100644
--- a/vendor/github.com/containers/storage/drivers/chown.go
+++ b/vendor/github.com/containers/storage/drivers/chown.go
@@ -2,6 +2,7 @@ package graphdriver
import (
"bytes"
+ "errors"
"fmt"
"os"
@@ -50,11 +51,14 @@ func chownByMapsMain() {
if len(toHost.UIDs()) == 0 && len(toHost.GIDs()) == 0 {
toHost = nil
}
+
+ chowner := newLChowner()
+
chown := func(path string, info os.FileInfo, _ error) error {
if path == "." {
return nil
}
- return platformLChown(path, info, toHost, toContainer)
+ return chowner.LChown(path, info, toHost, toContainer)
}
if err := pwalk.Walk(".", chown); err != nil {
fmt.Fprintf(os.Stderr, "error during chown: %v", err)
@@ -84,13 +88,13 @@ func ChownPathByMaps(path string, toContainer, toHost *idtools.IDMappings) error
cmd.Stdin = bytes.NewReader(config)
output, err := cmd.CombinedOutput()
if len(output) > 0 && err != nil {
- return fmt.Errorf("%v: %s", err, string(output))
+ return fmt.Errorf("%s: %w", string(output), err)
}
if err != nil {
return err
}
if len(output) > 0 {
- return fmt.Errorf("%s", string(output))
+ return errors.New(string(output))
}
return nil
diff --git a/vendor/github.com/containers/storage/drivers/chown_darwin.go b/vendor/github.com/containers/storage/drivers/chown_darwin.go
new file mode 100644
index 00000000000..a732075fbb1
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/chown_darwin.go
@@ -0,0 +1,109 @@
+//go:build darwin
+// +build darwin
+
+package graphdriver
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "sync"
+ "syscall"
+
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/system"
+)
+
+type inode struct {
+ Dev uint64
+ Ino uint64
+}
+
+type platformChowner struct {
+ mutex sync.Mutex
+ inodes map[inode]bool
+}
+
+func newLChowner() *platformChowner {
+ return &platformChowner{
+ inodes: make(map[inode]bool),
+ }
+}
+
+func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
+ st, ok := info.Sys().(*syscall.Stat_t)
+ if !ok {
+ return nil
+ }
+
+ i := inode{
+ Dev: uint64(st.Dev),
+ Ino: uint64(st.Ino),
+ }
+ c.mutex.Lock()
+ _, found := c.inodes[i]
+ if !found {
+ c.inodes[i] = true
+ }
+ c.mutex.Unlock()
+
+ if found {
+ return nil
+ }
+
+ // Map an on-disk UID/GID pair from host to container
+ // using the first map, then back to the host using the
+ // second map. Skip that first step if they're 0, to
+ // compensate for cases where a parent layer should
+ // have had a mapped value, but didn't.
+ uid, gid := int(st.Uid), int(st.Gid)
+ if toContainer != nil {
+ pair := idtools.IDPair{
+ UID: uid,
+ GID: gid,
+ }
+ mappedUID, mappedGID, err := toContainer.ToContainer(pair)
+ if err != nil {
+ if (uid != 0) || (gid != 0) {
+ return fmt.Errorf("mapping host ID pair %#v for %q to container: %w", pair, path, err)
+ }
+ mappedUID, mappedGID = uid, gid
+ }
+ uid, gid = mappedUID, mappedGID
+ }
+ if toHost != nil {
+ pair := idtools.IDPair{
+ UID: uid,
+ GID: gid,
+ }
+ mappedPair, err := toHost.ToHostOverflow(pair)
+ if err != nil {
+ return fmt.Errorf("mapping container ID pair %#v for %q to host: %w", pair, path, err)
+ }
+ uid, gid = mappedPair.UID, mappedPair.GID
+ }
+ if uid != int(st.Uid) || gid != int(st.Gid) {
+ cap, err := system.Lgetxattr(path, "security.capability")
+ if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
+ return fmt.Errorf("%s: %w", os.Args[0], err)
+ }
+
+ // Make the change.
+ if err := system.Lchown(path, uid, gid); err != nil {
+ return fmt.Errorf("%s: %w", os.Args[0], err)
+ }
+ // Restore the SUID and SGID bits if they were originally set.
+ if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 {
+ if err := system.Chmod(path, info.Mode()); err != nil {
+ return fmt.Errorf("%s: %w", os.Args[0], err)
+ }
+ }
+ if cap != nil {
+ if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil {
+ return fmt.Errorf("%s: %w", os.Args[0], err)
+ }
+ }
+
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go
index 0387adfc123..42c12c6279c 100644
--- a/vendor/github.com/containers/storage/drivers/chown_unix.go
+++ b/vendor/github.com/containers/storage/drivers/chown_unix.go
@@ -1,4 +1,5 @@
-// +build !windows
+//go:build !windows && !darwin
+// +build !windows,!darwin
package graphdriver
@@ -6,17 +7,68 @@ import (
"errors"
"fmt"
"os"
+ "sync"
"syscall"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
)
-func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
+type inode struct {
+ Dev uint64
+ Ino uint64
+}
+
+type platformChowner struct {
+ mutex sync.Mutex
+ inodes map[inode]string
+}
+
+func newLChowner() *platformChowner {
+ return &platformChowner{
+ inodes: make(map[inode]string),
+ }
+}
+
+func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
st, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
+
+ i := inode{
+ Dev: uint64(st.Dev),
+ Ino: uint64(st.Ino),
+ }
+
+ c.mutex.Lock()
+
+ oldTarget, found := c.inodes[i]
+ if !found {
+ c.inodes[i] = path
+ }
+
+ // If we are dealing with a file with multiple links then keep the lock until the file is
+ // chowned to avoid a race where we link to the old version if the file is copied up.
+ if found || st.Nlink > 1 {
+ defer c.mutex.Unlock()
+ } else {
+ c.mutex.Unlock()
+ }
+
+ if found {
+ // If the dev/inode was already chowned then create a link to the old target instead
+ // of chowning it again. This is necessary when the underlying file system breaks
+ // inodes on copy-up (as it is with overlay with index=off) to maintain the original
+ // link and correct file ownership.
+
+ // The target already exists so remove it before creating the link to the new target.
+ if err := os.Remove(path); err != nil {
+ return err
+ }
+ return os.Link(oldTarget, path)
+ }
+
// Map an on-disk UID/GID pair from host to container
// using the first map, then back to the host using the
// second map. Skip that first step if they're 0, to
@@ -31,7 +83,7 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.
mappedUID, mappedGID, err := toContainer.ToContainer(pair)
if err != nil {
if (uid != 0) || (gid != 0) {
- return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err)
+ return fmt.Errorf("mapping host ID pair %#v for %q to container: %w", pair, path, err)
}
mappedUID, mappedGID = uid, gid
}
@@ -42,31 +94,31 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.
UID: uid,
GID: gid,
}
- mappedPair, err := toHost.ToHost(pair)
+ mappedPair, err := toHost.ToHostOverflow(pair)
if err != nil {
- return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err)
+ return fmt.Errorf("mapping container ID pair %#v for %q to host: %w", pair, path, err)
}
uid, gid = mappedPair.UID, mappedPair.GID
}
if uid != int(st.Uid) || gid != int(st.Gid) {
cap, err := system.Lgetxattr(path, "security.capability")
- if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform {
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
// Make the change.
if err := system.Lchown(path, uid, gid); err != nil {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
// Restore the SUID and SGID bits if they were originally set.
if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 {
if err := system.Chmod(path, info.Mode()); err != nil {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
}
if cap != nil {
if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil {
- return fmt.Errorf("%s: %v", os.Args[0], err)
+ return fmt.Errorf("%s: %w", os.Args[0], err)
}
}
diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go
index 31bd5bb52dd..1845a4e086c 100644
--- a/vendor/github.com/containers/storage/drivers/chown_windows.go
+++ b/vendor/github.com/containers/storage/drivers/chown_windows.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package graphdriver
@@ -9,6 +10,13 @@ import (
"github.com/containers/storage/pkg/idtools"
)
-func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
+type platformChowner struct {
+}
+
+func newLChowner() *platformChowner {
+ return &platformChowner{}
+}
+
+func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
return &os.PathError{"lchown", path, syscall.EWINDOWS}
}
diff --git a/vendor/github.com/containers/storage/drivers/chroot_unix.go b/vendor/github.com/containers/storage/drivers/chroot_unix.go
index c8c4905bfee..9a1c6751f8e 100644
--- a/vendor/github.com/containers/storage/drivers/chroot_unix.go
+++ b/vendor/github.com/containers/storage/drivers/chroot_unix.go
@@ -1,3 +1,4 @@
+//go:build linux || darwin || freebsd || solaris
// +build linux darwin freebsd solaris
package graphdriver
@@ -12,10 +13,10 @@ import (
// specified path followed by chdir() to the new root directory
func chrootOrChdir(path string) error {
if err := syscall.Chroot(path); err != nil {
- return fmt.Errorf("error chrooting to %q: %v", path, err)
+ return fmt.Errorf("chrooting to %q: %w", path, err)
}
if err := syscall.Chdir(string(os.PathSeparator)); err != nil {
- return fmt.Errorf("error changing to %q: %v", path, err)
+ return fmt.Errorf("changing to %q: %w", path, err)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/chroot_windows.go b/vendor/github.com/containers/storage/drivers/chroot_windows.go
index f4dc22a9615..d5eb5ed983b 100644
--- a/vendor/github.com/containers/storage/drivers/chroot_windows.go
+++ b/vendor/github.com/containers/storage/drivers/chroot_windows.go
@@ -9,7 +9,7 @@ import (
// specified path followed by chdir() to the new root directory
func chrootOrChdir(path string) error {
if err := syscall.Chdir(path); err != nil {
- return fmt.Errorf("error changing to %q: %v", path, err)
+ return fmt.Errorf("changing to %q: %w", path, err)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
index 7773844f916..58c0b5daf1a 100644
--- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
+++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
@@ -1,3 +1,4 @@
+//go:build cgo
// +build cgo
package copy
@@ -26,7 +27,6 @@ import (
"github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
- "github.com/opencontainers/runc/libcontainer/userns"
"golang.org/x/sys/unix"
)
@@ -154,7 +154,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
dstPath := filepath.Join(dstDir, relPath)
stat, ok := f.Sys().(*syscall.Stat_t)
if !ok {
- return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)
+ return fmt.Errorf("unable to get raw syscall.Stat_t data for %s", srcPath)
}
isHardlink := false
@@ -206,7 +206,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
s.Close()
case mode&os.ModeDevice != 0:
- if userns.RunningInUserNS() {
+ if unshare.IsRootless() {
// cannot create a device if running in user namespace
return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
index c23097b7635..96c4cdacb4b 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
@@ -1,3 +1,4 @@
+//go:build linux && cgo
// +build linux,cgo
package devmapper
@@ -5,14 +6,13 @@ package devmapper
import (
"bufio"
"bytes"
+ "errors"
"fmt"
- "io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -59,7 +59,7 @@ func checkDevAvailable(dev string) error {
}
if !bytes.Contains(out, []byte(dev)) {
- return errors.Errorf("%s is not available for use with devicemapper", dev)
+ return fmt.Errorf("%s is not available for use with devicemapper", dev)
}
return nil
}
@@ -84,7 +84,7 @@ func checkDevInVG(dev string) error {
// got "VG Name" line"
vg := strings.TrimSpace(fields[1])
if len(vg) > 0 {
- return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg)
+ return fmt.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg)
}
logrus.Error(fields)
break
@@ -112,7 +112,7 @@ func checkDevHasFS(dev string) error {
if bytes.Equal(kv[0], []byte("TYPE")) {
v := bytes.Trim(kv[1], "\"")
if len(v) > 0 {
- return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev)
+ return fmt.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev)
}
return nil
}
@@ -123,16 +123,16 @@ func checkDevHasFS(dev string) error {
func verifyBlockDevice(dev string, force bool) error {
absPath, err := filepath.Abs(dev)
if err != nil {
- return errors.Errorf("unable to get absolute path for %s: %s", dev, err)
+ return fmt.Errorf("unable to get absolute path for %s: %s", dev, err)
}
realPath, err := filepath.EvalSymlinks(absPath)
if err != nil {
- return errors.Errorf("failed to canonicalise path for %s: %s", dev, err)
+ return fmt.Errorf("failed to canonicalise path for %s: %s", dev, err)
}
if err := checkDevAvailable(absPath); err != nil {
logrus.Infof("block device '%s' not available, checking '%s'", absPath, realPath)
if err := checkDevAvailable(realPath); err != nil {
- return errors.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device.", absPath, realPath)
+ return fmt.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device", absPath, realPath)
}
}
if err := checkDevInVG(realPath); err != nil {
@@ -153,31 +153,34 @@ func readLVMConfig(root string) (directLVMConfig, error) {
var cfg directLVMConfig
p := filepath.Join(root, "setup-config.json")
- b, err := ioutil.ReadFile(p)
+ b, err := os.ReadFile(p)
if err != nil {
if os.IsNotExist(err) {
return cfg, nil
}
- return cfg, errors.Wrap(err, "error reading existing setup config")
+ return cfg, fmt.Errorf("reading existing setup config: %w", err)
}
// check if this is just an empty file, no need to produce a json error later if so
if len(b) == 0 {
return cfg, nil
}
-
- err = json.Unmarshal(b, &cfg)
- return cfg, errors.Wrap(err, "error unmarshaling previous device setup config")
+ if err := json.Unmarshal(b, &cfg); err != nil {
+ return cfg, fmt.Errorf("unmarshaling previous device setup config: %w", err)
+ }
+ return cfg, nil
}
func writeLVMConfig(root string, cfg directLVMConfig) error {
p := filepath.Join(root, "setup-config.json")
b, err := json.Marshal(cfg)
if err != nil {
- return errors.Wrap(err, "error marshalling direct lvm config")
+ return fmt.Errorf("marshalling direct lvm config: %w", err)
}
- err = ioutil.WriteFile(p, b, 0600)
- return errors.Wrap(err, "error writing direct lvm config to file")
+ if err := os.WriteFile(p, b, 0600); err != nil {
+ return fmt.Errorf("writing direct lvm config to file: %w", err)
+ }
+ return nil
}
func setupDirectLVM(cfg directLVMConfig) error {
@@ -186,13 +189,13 @@ func setupDirectLVM(cfg directLVMConfig) error {
for _, bin := range binaries {
if _, err := exec.LookPath(bin); err != nil {
- return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm")
+ return fmt.Errorf("looking up command `"+bin+"` while setting up direct lvm: %w", err)
}
}
err := os.MkdirAll(lvmProfileDir, 0755)
if err != nil {
- return errors.Wrap(err, "error creating lvm profile directory")
+ return fmt.Errorf("creating lvm profile directory: %w", err)
}
if cfg.AutoExtendPercent == 0 {
@@ -215,34 +218,37 @@ func setupDirectLVM(cfg directLVMConfig) error {
out, err := exec.Command("pvcreate", "--metadatasize", cfg.MetaDataSize, "-f", cfg.Device).CombinedOutput()
if err != nil {
- return errors.Wrap(err, string(out))
+ return fmt.Errorf("%v: %w", string(out), err)
}
out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput()
if err != nil {
- return errors.Wrap(err, string(out))
+ return fmt.Errorf("%v: %w", string(out), err)
}
out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput()
if err != nil {
- return errors.Wrap(err, string(out))
+ return fmt.Errorf("%v: %w", string(out), err)
}
out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput()
if err != nil {
- return errors.Wrap(err, string(out))
+ return fmt.Errorf("%v: %w", string(out), err)
}
out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput()
if err != nil {
- return errors.Wrap(err, string(out))
+ return fmt.Errorf("%v: %w", string(out), err)
}
profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent)
- err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600)
+ err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600)
if err != nil {
- return errors.Wrap(err, "error writing storage thinp autoextend profile")
+ return fmt.Errorf("writing storage thinp autoextend profile: %w", err)
}
out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput()
- return errors.Wrap(err, string(out))
+ if err != nil {
+ return fmt.Errorf("%s: %w", string(out), err)
+ }
+ return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
index c5168bfdd25..697a16fda73 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
@@ -1,12 +1,14 @@
+//go:build linux && cgo
// +build linux,cgo
package devmapper
import (
"bufio"
+ "errors"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"os"
"os/exec"
"path"
@@ -27,7 +29,6 @@ import (
"github.com/containers/storage/pkg/parsers/kernel"
units "github.com/docker/go-units"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -298,7 +299,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
}
defer file.Close()
if err := file.Truncate(size); err != nil {
- return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err)
+ return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %w", filename, err)
}
} else if fi.Size() > size {
logrus.Warnf("devmapper: Can't shrink loopback file %s", filename)
@@ -329,7 +330,7 @@ func (devices *DeviceSet) removeMetadata(info *devInfo) error {
// Given json data and file path, write it to disk
func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error {
- tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp")
+ tmpFile, err := os.CreateTemp(devices.metadataDir(), ".tmp")
if err != nil {
return fmt.Errorf("devmapper: Error creating metadata file: %s", err)
}
@@ -419,40 +420,35 @@ func (devices *DeviceSet) constructDeviceIDMap() {
}
}
-func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error {
+func (devices *DeviceSet) deviceFileWalkFunction(path string, name string) error {
// Skip some of the meta files which are not device files.
- if strings.HasSuffix(finfo.Name(), ".migrated") {
+ if strings.HasSuffix(name, ".migrated") {
logrus.Debugf("devmapper: Skipping file %s", path)
return nil
}
- if strings.HasPrefix(finfo.Name(), ".") {
+ if strings.HasPrefix(name, ".") {
logrus.Debugf("devmapper: Skipping file %s", path)
return nil
}
- if finfo.Name() == deviceSetMetaFile {
+ if name == deviceSetMetaFile {
logrus.Debugf("devmapper: Skipping file %s", path)
return nil
}
- if finfo.Name() == transactionMetaFile {
+ if name == transactionMetaFile {
logrus.Debugf("devmapper: Skipping file %s", path)
return nil
}
logrus.Debugf("devmapper: Loading data for file %s", path)
- hash := finfo.Name()
- if hash == base {
- hash = ""
- }
-
// Include deleted devices also as cleanup delete device logic
// will go through it and see if there are any deleted devices.
- if _, err := devices.lookupDevice(hash); err != nil {
- return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err)
+ if _, err := devices.lookupDevice(name); err != nil {
+ return fmt.Errorf("devmapper: Error looking up device %s:%w", name, err)
}
return nil
@@ -462,21 +458,21 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error {
logrus.Debug("devmapper: loadDeviceFilesOnStart()")
defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END")
- var scan = func(path string, info os.FileInfo, err error) error {
+ var scan = func(path string, d fs.DirEntry, err error) error {
if err != nil {
logrus.Debugf("devmapper: Can't walk the file %s", path)
return nil
}
// Skip any directories
- if info.IsDir() {
+ if d.IsDir() {
return nil
}
- return devices.deviceFileWalkFunction(path, info)
+ return devices.deviceFileWalkFunction(path, d.Name())
}
- return filepath.Walk(devices.metadataDir(), scan)
+ return filepath.WalkDir(devices.metadataDir(), scan)
}
// Should be called with devices.Lock() held.
@@ -551,7 +547,7 @@ func xfsSupported() error {
f, err := os.Open("/proc/filesystems")
if err != nil {
- return errors.Wrapf(err, "error checking for xfs support")
+ return fmt.Errorf("checking for xfs support: %w", err)
}
defer f.Close()
@@ -563,7 +559,7 @@ func xfsSupported() error {
}
if err := s.Err(); err != nil {
- return errors.Wrapf(err, "error checking for xfs support")
+ return fmt.Errorf("checking for xfs support: %w", err)
}
return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`)
@@ -633,7 +629,7 @@ func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) {
func (devices *DeviceSet) migrateOldMetaData() error {
// Migrate old metadata file
- jsonData, err := ioutil.ReadFile(devices.oldMetadataFile())
+ jsonData, err := os.ReadFile(devices.oldMetadataFile())
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -734,7 +730,7 @@ func (devices *DeviceSet) initMetaData() error {
devices.TransactionID = transactionID
if err := devices.loadDeviceFilesOnStart(); err != nil {
- return fmt.Errorf("devmapper: Failed to load device files:%v", err)
+ return fmt.Errorf("devmapper: Failed to load device files:%w", err)
}
devices.constructDeviceIDMap()
@@ -873,7 +869,7 @@ func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint
err = devices.cancelDeferredRemoval(baseInfo)
if err != nil {
// If Error is ErrEnxio. Device is probably already gone. Continue.
- if errors.Cause(err) != devicemapper.ErrEnxio {
+ if !errors.Is(err, devicemapper.ErrEnxio) {
return err
}
devinfo = nil
@@ -958,7 +954,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf
func (devices *DeviceSet) loadMetadata(hash string) *devInfo {
info := &devInfo{Hash: hash, devices: devices}
- jsonData, err := ioutil.ReadFile(devices.metadataFile(info))
+ jsonData, err := os.ReadFile(devices.metadataFile(info))
if err != nil {
logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err)
return nil
@@ -980,7 +976,7 @@ func (devices *DeviceSet) loadMetadata(hash string) *devInfo {
func getDeviceUUID(device string) (string, error) {
out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output()
if err != nil {
- return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err)
+ return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%w", device, err)
}
uuid := strings.TrimSuffix(string(out), "\n")
@@ -1088,7 +1084,7 @@ func (devices *DeviceSet) createBaseImage() error {
}
if err := devices.saveBaseDeviceUUID(info); err != nil {
- return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err)
+ return fmt.Errorf("devmapper: Could not query and save base device UUID:%w", err)
}
return nil
@@ -1101,7 +1097,7 @@ func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) {
info, err := devicemapper.GetInfo(thinPoolDevice)
if err != nil {
- return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err)
+ return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %w", thinPoolDevice, err)
}
// Device does not exist.
@@ -1111,7 +1107,7 @@ func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) {
_, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice)
if err != nil {
- return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err)
+ return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %w", thinPoolDevice, err)
}
if deviceType != "thin-pool" {
@@ -1143,13 +1139,13 @@ func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error {
// If BaseDeviceUUID is nil (upgrade case), save it and return success.
if devices.BaseDeviceUUID == "" {
if err := devices.saveBaseDeviceUUID(baseInfo); err != nil {
- return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err)
+ return fmt.Errorf("devmapper: Could not query and save base device UUID:%w", err)
}
return nil
}
if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil {
- return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err)
+ return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %w", err)
}
return nil
@@ -1188,7 +1184,7 @@ func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error {
func (devices *DeviceSet) growFS(info *devInfo) error {
if err := devices.activateDeviceIfNeeded(info, false); err != nil {
- return fmt.Errorf("Error activating devmapper device: %s", err)
+ return fmt.Errorf("activating devmapper device: %s", err)
}
defer devices.deactivateDevice(info)
@@ -1209,7 +1205,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error {
options = joinMountOptions(options, devices.mountOptions)
if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil {
- return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256)))
+ return fmt.Errorf("failed to mount; dmesg: %s: %w", string(dmesg.Dmesg(256)), err)
}
defer func() {
@@ -1221,14 +1217,14 @@ func (devices *DeviceSet) growFS(info *devInfo) error {
switch devices.BaseDeviceFilesystem {
case ext4:
if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil {
- return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out))
+ return fmt.Errorf("failed to grow rootfs:%s:%w", string(out), err)
}
case xfs:
if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil {
- return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out))
+ return fmt.Errorf("failed to grow rootfs:%s:%w", string(out), err)
}
default:
- return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem)
+ return fmt.Errorf("unsupported filesystem type %s", devices.BaseDeviceFilesystem)
}
return nil
}
@@ -1279,11 +1275,11 @@ func (devices *DeviceSet) setupBaseImage() error {
}
func setCloseOnExec(name string) {
- fileInfos, _ := ioutil.ReadDir("/proc/self/fd")
- for _, i := range fileInfos {
- link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
+ fileEntries, _ := os.ReadDir("/proc/self/fd")
+ for _, e := range fileEntries {
+ link, _ := os.Readlink(filepath.Join("/proc/self/fd", e.Name()))
if link == name {
- fd, err := strconv.Atoi(i.Name())
+ fd, err := strconv.Atoi(e.Name())
if err == nil {
unix.CloseOnExec(fd)
}
@@ -1351,7 +1347,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
// Reload size for loopback device
if err := loopback.SetCapacity(dataloopback); err != nil {
- return fmt.Errorf("Unable to update loopback capacity: %s", err)
+ return fmt.Errorf("unable to update loopback capacity: %s", err)
}
// Suspend the pool
@@ -1373,7 +1369,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
}
func (devices *DeviceSet) loadTransactionMetaData() error {
- jsonData, err := ioutil.ReadFile(devices.transactionMetaFile())
+ jsonData, err := os.ReadFile(devices.transactionMetaFile())
if err != nil {
// There is no active transaction. This will be the case
// during upgrade.
@@ -1454,7 +1450,7 @@ func (devices *DeviceSet) processPendingTransaction() error {
}
func (devices *DeviceSet) loadDeviceSetMetaData() error {
- jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile())
+ jsonData, err := os.ReadFile(devices.deviceSetMetaFile())
if err != nil {
// For backward compatibility return success if file does
// not exist.
@@ -1510,7 +1506,7 @@ func determineDriverCapabilities(version string) error {
versionSplit := strings.Split(version, ".")
major, err := strconv.Atoi(versionSplit[0])
if err != nil {
- return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver major version %q as a number", versionSplit[0])
+ return fmt.Errorf("unable to parse driver major version %q as a number: %w", versionSplit[0], graphdriver.ErrNotSupported)
}
if major > 4 {
@@ -1524,7 +1520,7 @@ func determineDriverCapabilities(version string) error {
minor, err := strconv.Atoi(versionSplit[1])
if err != nil {
- return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver minor version %q as a number", versionSplit[1])
+ return fmt.Errorf("unable to parse driver minor version %q as a number: %w", versionSplit[1], graphdriver.ErrNotSupported)
}
/*
@@ -1786,7 +1782,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
}
switch fsMagic {
case graphdriver.FsMagicAufs:
- return errors.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems")
+ return fmt.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems")
}
if devices.dataDevice == "" {
@@ -1982,7 +1978,7 @@ func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64,
}
return uint64(size), nil
default:
- return 0, fmt.Errorf("Unknown option %s", key)
+ return 0, fmt.Errorf("unknown option %s", key)
}
}
@@ -2016,7 +2012,7 @@ func (devices *DeviceSet) deleteDeviceNoLock(info *devInfo, syncDelete bool) err
// If syncDelete is true, we want to return error. If deferred
// deletion is not enabled, we return an error. If error is
// something other then EBUSY, return an error.
- if syncDelete || !devices.deferredDelete || errors.Cause(err) != devicemapper.ErrBusy {
+ if syncDelete || !devices.deferredDelete || !errors.Is(err, devicemapper.ErrBusy) {
logrus.Debugf("devmapper: Error deleting device: %s", err)
return err
}
@@ -2177,7 +2173,7 @@ func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove boo
// This function's semantics is such that it does not return an
// error if device does not exist. So if device went away by
// the time we actually tried to remove it, do not return error.
- if errors.Cause(err) != devicemapper.ErrEnxio {
+ if !errors.Is(err, devicemapper.ErrEnxio) {
return err
}
return nil
@@ -2195,7 +2191,7 @@ func (devices *DeviceSet) removeDevice(devname string) error {
if err == nil {
break
}
- if errors.Cause(err) != devicemapper.ErrBusy {
+ if !errors.Is(err, devicemapper.ErrBusy) {
return err
}
@@ -2229,7 +2225,7 @@ func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error {
// Cancel deferred remove
if err := devices.cancelDeferredRemoval(info); err != nil {
// If Error is ErrEnxio. Device is probably already gone. Continue.
- if errors.Cause(err) != devicemapper.ErrEnxio {
+ if !errors.Is(err, devicemapper.ErrEnxio) {
return err
}
}
@@ -2246,7 +2242,7 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
for i := 0; i < 100; i++ {
err = devicemapper.CancelDeferredRemove(info.Name())
if err != nil {
- if errors.Cause(err) != devicemapper.ErrBusy {
+ if !errors.Is(err, devicemapper.ErrBusy) {
// If we see EBUSY it may be a transient error,
// sleep a bit a retry a few times.
devices.Unlock()
@@ -2261,7 +2257,7 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
}
func (devices *DeviceSet) unmountAndDeactivateAll(dir string) {
- files, err := ioutil.ReadDir(dir)
+ files, err := os.ReadDir(dir)
if err != nil {
logrus.Warnf("devmapper: unmountAndDeactivate: %s", err)
return
@@ -2347,21 +2343,21 @@ func (devices *DeviceSet) Shutdown(home string) error {
func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error {
dmDevicePath, err := os.Readlink(info.DevName())
if err != nil {
- return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err)
+ return fmt.Errorf("devmapper: readlink failed for device %v:%w", info.DevName(), err)
}
dmDeviceName := path.Base(dmDevicePath)
filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries"
maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0)
if err != nil {
- return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err)
+ return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%w", err)
}
defer maxRetriesFile.Close()
// Set max retries to 0
_, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries)
if err != nil {
- return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err)
+ return fmt.Errorf("devmapper: Failed to write string %v to file %v:%w", devices.xfsNospaceRetries, filePath, err)
}
return nil
}
@@ -2412,7 +2408,7 @@ func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.Mo
options = joinMountOptions(options, label.FormatMountLabel("", moptions.MountLabel))
if err := mount.Mount(info.DevName(), path, fstype, options); err != nil {
- return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256)))
+ return fmt.Errorf("failed to mount; dmesg: %s: %w", string(dmesg.Dmesg(256)), err)
}
if fstype == xfs && devices.xfsNospaceRetries != "" {
@@ -2793,7 +2789,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
case "dm.thinp_percent":
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val)
+ return nil, fmt.Errorf("could not parse `dm.thinp_percent=%s`: %w", val, err)
}
if per >= 100 {
return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100")
@@ -2802,7 +2798,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
case "dm.thinp_metapercent":
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val)
+ return nil, fmt.Errorf("could not parse `dm.thinp_metapercent=%s`: %w", val, err)
}
if per >= 100 {
return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100")
@@ -2811,7 +2807,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
case "dm.thinp_autoextend_percent":
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val)
+ return nil, fmt.Errorf("could not parse `dm.thinp_autoextend_percent=%s`: %w", val, err)
}
if per > 100 {
return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100")
@@ -2820,7 +2816,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
case "dm.thinp_autoextend_threshold":
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val)
+ return nil, fmt.Errorf("could not parse `dm.thinp_autoextend_threshold=%s`: %w", val, err)
}
if per > 100 {
return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100")
@@ -2829,10 +2825,10 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
case "dm.libdm_log_level":
level, err := strconv.ParseInt(val, 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val)
+ return nil, fmt.Errorf("could not parse `dm.libdm_log_level=%s`: %w", val, err)
}
if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug {
- return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug)
+ return nil, fmt.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug)
}
// Register a new logging callback with the specified level.
devicemapper.LogInit(devicemapper.DefaultLogger{
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
index d2f165e26de..f9f775a5d14 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
@@ -1,10 +1,10 @@
+//go:build linux && cgo
// +build linux,cgo
package devmapper
import (
"fmt"
- "io/ioutil"
"os"
"path"
"strconv"
@@ -227,7 +227,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
// Create an "id" file with the container/image id in it to help reconstruct this in case
// of later problems
- if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
+ if err := os.WriteFile(idFile, []byte(id), 0600); err != nil {
d.ctr.Decrement(mp)
d.DeviceSet.UnmountDevice(id, mp)
return "", err
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go
index 54db6ab4aea..52f0e863e5c 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go
@@ -1,3 +1,6 @@
+//go:build linux && cgo
+// +build linux,cgo
+
package devmapper
import jsoniter "github.com/json-iterator/go"
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
index 770b431bdd3..7d96ebe54fb 100644
--- a/vendor/github.com/containers/storage/drivers/driver.go
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -1,13 +1,13 @@
package graphdriver
import (
+ "errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
@@ -48,7 +48,7 @@ type CreateOpts struct {
ignoreChownErrors bool
}
-// MountOpts contains optional arguments for LayerStope.Mount() methods.
+// MountOpts contains optional arguments for Driver.Get() methods.
type MountOpts struct {
// Mount label is the MAC Labels to assign to mount point (SELINUX)
MountLabel string
@@ -282,7 +282,7 @@ func init() {
// Register registers an InitFunc for the driver.
func Register(name string, initFunc InitFunc) error {
if _, exists := drivers[name]; exists {
- return fmt.Errorf("Name already registered %s", name)
+ return fmt.Errorf("name already registered %s", name)
}
drivers[name] = initFunc
@@ -296,7 +296,7 @@ func GetDriver(name string, config Options) (Driver, error) {
}
logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root)
- return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, config.Root)
+ return nil, fmt.Errorf("failed to GetDriver graph %s %s: %w", name, config.Root, ErrNotSupported)
}
// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
@@ -305,7 +305,7 @@ func getBuiltinDriver(name, home string, options Options) (Driver, error) {
return initFunc(filepath.Join(home, name), options)
}
logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home)
- return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home)
+ return nil, fmt.Errorf("failed to built-in GetDriver graph %s %s: %w", name, home, ErrNotSupported)
}
// Options is used to initialize a graphdriver
@@ -384,14 +384,13 @@ func New(name string, config Options) (Driver, error) {
}
return driver, nil
}
- return nil, fmt.Errorf("No supported storage backend found")
+ return nil, fmt.Errorf("no supported storage backend found")
}
// isDriverNotSupported returns true if the error initializing
// the graph driver is a non-supported error.
func isDriverNotSupported(err error) bool {
- cause := errors.Cause(err)
- return cause == ErrNotSupported || cause == ErrPrerequisites || cause == ErrIncompatibleFS
+ return errors.Is(err, ErrNotSupported) || errors.Is(err, ErrPrerequisites) || errors.Is(err, ErrIncompatibleFS)
}
// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
diff --git a/vendor/github.com/containers/storage/drivers/driver_darwin.go b/vendor/github.com/containers/storage/drivers/driver_darwin.go
new file mode 100644
index 00000000000..357851543ed
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/driver_darwin.go
@@ -0,0 +1,14 @@
+package graphdriver
+
+var (
+ // Slice of drivers that should be used in order
+ priority = []string{
+ "vfs",
+ }
+)
+
+// GetFSMagic returns the filesystem id given the path.
+func GetFSMagic(rootpath string) (FsMagic, error) {
+ // Note it is OK to return FsMagicUnsupported on Windows.
+ return FsMagicUnsupported, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go
index e1320ee07f6..143cccf92ec 100644
--- a/vendor/github.com/containers/storage/drivers/driver_freebsd.go
+++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go
@@ -2,15 +2,43 @@ package graphdriver
import (
"golang.org/x/sys/unix"
+
+ "github.com/containers/storage/pkg/mount"
+)
+
+const (
+ // FsMagicZfs filesystem id for Zfs
+ FsMagicZfs = FsMagic(0x2fc12fc1)
)
var (
// Slice of drivers that should be used in an order
priority = []string{
"zfs",
+ "vfs",
+ }
+
+ // FsNames maps filesystem id to name of the filesystem.
+ FsNames = map[FsMagic]string{
+ FsMagicZfs: "zfs",
}
)
+// NewDefaultChecker returns a check that parses /proc/mountinfo to check
+// if the specified path is mounted.
+// No-op on FreeBSD.
+func NewDefaultChecker() Checker {
+ return &defaultChecker{}
+}
+
+type defaultChecker struct {
+}
+
+func (c *defaultChecker) IsMounted(path string) bool {
+ m, _ := mount.Mounted(path)
+ return m
+}
+
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf unix.Statfs_t
diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go
index dddf8a8b4c3..b9e57a60d6c 100644
--- a/vendor/github.com/containers/storage/drivers/driver_linux.go
+++ b/vendor/github.com/containers/storage/drivers/driver_linux.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package graphdriver
@@ -6,6 +7,7 @@ import (
"path/filepath"
"github.com/containers/storage/pkg/mount"
+ "github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -50,6 +52,40 @@ const (
FsMagicOverlay = FsMagic(0x794C7630)
// FsMagicFUSE filesystem id for FUSE
FsMagicFUSE = FsMagic(0x65735546)
+ // FsMagicAcfs filesystem id for Acfs
+ FsMagicAcfs = FsMagic(0x61636673)
+ // FsMagicAfs filesystem id for Afs
+ FsMagicAfs = FsMagic(0x5346414f)
+ // FsMagicCephFs filesystem id for Ceph
+ FsMagicCephFs = FsMagic(0x00C36400)
+ // FsMagicCIFS filesystem id for CIFS
+ FsMagicCIFS = FsMagic(0xFF534D42)
+ // FsMagicFHGFS filesystem id for FHGFS
+ FsMagicFHGFSFs = FsMagic(0x19830326)
+ // FsMagicIBRIX filesystem id for IBRIX
+ FsMagicIBRIX = FsMagic(0x013111A8)
+ // FsMagicKAFS filesystem id for KAFS
+ FsMagicKAFS = FsMagic(0x6B414653)
+ // FsMagicLUSTRE filesystem id for LUSTRE
+ FsMagicLUSTRE = FsMagic(0x0BD00BD0)
+ // FsMagicNCP filesystem id for NCP
+ FsMagicNCP = FsMagic(0x564C)
+ // FsMagicNFSD filesystem id for NFSD
+ FsMagicNFSD = FsMagic(0x6E667364)
+ // FsMagicOCFS2 filesystem id for OCFS2
+ FsMagicOCFS2 = FsMagic(0x7461636F)
+ // FsMagicPANFS filesystem id for PANFS
+ FsMagicPANFS = FsMagic(0xAAD7AAEA)
+ // FsMagicPRLFS filesystem id for PRLFS
+ FsMagicPRLFS = FsMagic(0x7C7C6673)
+ // FsMagicSMB2 filesystem id for SMB2
+ FsMagicSMB2 = FsMagic(0xFE534D42)
+ // FsMagicSNFS filesystem id for SNFS
+ FsMagicSNFS = FsMagic(0xBEEFDEAD)
+ // FsMagicVBOXSF filesystem id for VBOXSF
+ FsMagicVBOXSF = FsMagic(0x786F4256)
+ // FsMagicVXFS filesystem id for VXFS
+ FsMagicVXFS = FsMagic(0xA501FCF5)
)
var (
@@ -92,9 +128,14 @@ var (
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
var buf unix.Statfs_t
- if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil {
+ path := filepath.Dir(rootpath)
+ if err := unix.Statfs(path, &buf); err != nil {
return 0, err
}
+
+ if _, ok := FsNames[FsMagic(buf.Type)]; !ok {
+ logrus.Debugf("Unknown filesystem type %#x reported for %s", buf.Type, path)
+ }
return FsMagic(buf.Type), nil
}
@@ -128,11 +169,32 @@ func (c *defaultChecker) IsMounted(path string) bool {
return m
}
+// isMountPoint checks that the given path is a mount point
+func isMountPoint(mountPath string) (bool, error) {
+ // it is already the root
+ if mountPath == "/" {
+ return true, nil
+ }
+
+ var s1, s2 unix.Stat_t
+ if err := unix.Stat(mountPath, &s1); err != nil {
+ return true, err
+ }
+ if err := unix.Stat(filepath.Dir(mountPath), &s2); err != nil {
+ return true, err
+ }
+ return s1.Dev != s2.Dev, nil
+}
+
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf unix.Statfs_t
+
if err := unix.Statfs(mountPath, &buf); err != nil {
return false, err
}
- return FsMagic(buf.Type) == fsType, nil
+ if FsMagic(buf.Type) != fsType {
+ return false, nil
+ }
+ return isMountPoint(mountPath)
}
diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go
index 4a875608b0d..3932c3ea5c9 100644
--- a/vendor/github.com/containers/storage/drivers/driver_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!windows,!freebsd,!solaris
+// +build !linux,!windows,!freebsd,!solaris,!darwin
package graphdriver
diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go
index c5278850955..5022037dc91 100644
--- a/vendor/github.com/containers/storage/drivers/fsdiff.go
+++ b/vendor/github.com/containers/storage/drivers/fsdiff.go
@@ -2,13 +2,15 @@ package graphdriver
import (
"io"
+ "os"
+ "runtime"
"time"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
- "github.com/opencontainers/runc/libcontainer/userns"
+ "github.com/containers/storage/pkg/unshare"
"github.com/sirupsen/logrus"
)
@@ -31,10 +33,11 @@ type NaiveDiffDriver struct {
// NewNaiveDiffDriver returns a fully functional driver that wraps the
// given ProtoDriver and adds the capability of the following methods which
// it may or may not support on its own:
-// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error)
-// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error)
-// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error)
-// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error)
+//
+// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error)
+// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error)
+// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error)
+// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error)
func NewNaiveDiffDriver(driver ProtoDriver, updater LayerIDMapUpdater) Driver {
return &NaiveDiffDriver{ProtoDriver: driver, LayerIDMapUpdater: updater}
}
@@ -107,7 +110,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
// are extracted from tar's with full second precision on modified time.
// We need this hack here to make sure calls within same second receive
// correct result.
- time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now()))
+ time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second)))
return err
}), nil
}
@@ -138,6 +141,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p
if parent != "" {
options := MountOpts{
MountLabel: mountLabel,
+ Options: []string{"ro"},
}
parentFs, err = driver.Get(parent, options)
if err != nil {
@@ -169,9 +173,16 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts)
}
defer driver.Put(id)
+ defaultForceMask := os.FileMode(0700)
+ var forceMask *os.FileMode = nil
+ if runtime.GOOS == "darwin" {
+ forceMask = &defaultForceMask
+ }
+
tarOptions := &archive.TarOptions{
- InUserNS: userns.RunningInUserNS(),
+ InUserNS: unshare.IsRootless(),
IgnoreChownErrors: options.IgnoreChownErrors,
+ ForceMask: forceMask,
}
if options.Mappings != nil {
tarOptions.UIDMaps = options.Mappings.UIDs()
@@ -180,7 +191,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts)
start := time.Now().UTC()
logrus.Debug("Start untar layer")
if size, err = ApplyUncompressedLayer(layerFs, options.Diff, tarOptions); err != nil {
- logrus.Errorf("Error while applying layer: %s", err)
+ logrus.Errorf("While applying layer: %s", err)
return
}
logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go
index 44b3515a854..0a0ad7dd553 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/check.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/check.go
@@ -1,21 +1,22 @@
+//go:build linux
// +build linux
package overlay
import (
+ "errors"
"fmt"
- "io/ioutil"
"os"
"path"
"path/filepath"
"syscall"
"github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -25,7 +26,7 @@ import (
// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR.
// When these exist naive diff should be used.
func doesSupportNativeDiff(d, mountOpts string) error {
- td, err := ioutil.TempDir(d, "opaque-bug-check")
+ td, err := os.MkdirTemp(d, "opaque-bug-check")
if err != nil {
return err
}
@@ -57,7 +58,7 @@ func doesSupportNativeDiff(d, mountOpts string) error {
// Mark l2/d as opaque
if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), archive.GetOverlayXattrName("opaque"), []byte("y"), 0); err != nil {
- return errors.Wrap(err, "failed to set opaque flag on middle layer")
+ return fmt.Errorf("failed to set opaque flag on middle layer: %w", err)
}
mountFlags := "lowerdir=%s:%s,upperdir=%s,workdir=%s"
@@ -71,7 +72,7 @@ func doesSupportNativeDiff(d, mountOpts string) error {
opts = fmt.Sprintf("%s,%s", opts, data)
}
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
- return errors.Wrap(err, "failed to mount overlay")
+ return fmt.Errorf("failed to mount overlay: %w", err)
}
defer func() {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
@@ -80,14 +81,14 @@ func doesSupportNativeDiff(d, mountOpts string) error {
}()
// Touch file in d to force copy up of opaque directory "d" from "l2" to "l3"
- if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil {
- return errors.Wrap(err, "failed to write to merged directory")
+ if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil {
+ return fmt.Errorf("failed to write to merged directory: %w", err)
}
// Check l3/d does not have opaque flag
xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), archive.GetOverlayXattrName("opaque"))
if err != nil {
- return errors.Wrap(err, "failed to read opaque flag on upper layer")
+ return fmt.Errorf("failed to read opaque flag on upper layer: %w", err)
}
if string(xattrOpaque) == "y" {
return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix")
@@ -99,12 +100,12 @@ func doesSupportNativeDiff(d, mountOpts string) error {
if err.(*os.LinkError).Err == syscall.EXDEV {
return nil
}
- return errors.Wrap(err, "failed to rename dir in merged directory")
+ return fmt.Errorf("failed to rename dir in merged directory: %w", err)
}
// get the xattr of "d2"
xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), archive.GetOverlayXattrName("redirect"))
if err != nil {
- return errors.Wrap(err, "failed to read redirect flag on upper layer")
+ return fmt.Errorf("failed to read redirect flag on upper layer: %w", err)
}
if string(xattrRedirect) == "d1" {
@@ -119,7 +120,7 @@ func doesSupportNativeDiff(d, mountOpts string) error {
// copying up a file from a lower layer unless/until its contents are being
// modified
func doesMetacopy(d, mountOpts string) (bool, error) {
- td, err := ioutil.TempDir(d, "metacopy-check")
+ td, err := os.MkdirTemp(d, "metacopy-check")
if err != nil {
return false, err
}
@@ -155,11 +156,11 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
opts = fmt.Sprintf("%s,%s", opts, data)
}
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
- if errors.Cause(err) == unix.EINVAL {
- logrus.Info("metacopy option not supported on this kernel", mountOpts)
+ if errors.Is(err, unix.EINVAL) {
+ logrus.Infof("overlay: metacopy option not supported on this kernel, checked using options %q", mountOpts)
return false, nil
}
- return false, errors.Wrapf(err, "failed to mount overlay for metacopy check with %q options", mountOpts)
+ return false, fmt.Errorf("failed to mount overlay for metacopy check with %q options: %w", mountOpts, err)
}
defer func() {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
@@ -169,7 +170,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
// Make a change that only impacts the inode, and check if the pulled-up copy is marked
// as a metadata-only copy
if err := os.Chmod(filepath.Join(td, "merged", "f"), 0600); err != nil {
- return false, errors.Wrap(err, "error changing permissions on file for metacopy check")
+ return false, fmt.Errorf("changing permissions on file for metacopy check: %w", err)
}
metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), archive.GetOverlayXattrName("metacopy"))
if err != nil {
@@ -177,14 +178,14 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
logrus.Info("metacopy option not supported")
return false, nil
}
- return false, errors.Wrap(err, "metacopy flag was not set on file in upper layer")
+ return false, fmt.Errorf("metacopy flag was not set on file in upper layer: %w", err)
}
return metacopy != nil, nil
}
// doesVolatile checks if the filesystem supports the "volatile" mount option
func doesVolatile(d string) (bool, error) {
- td, err := ioutil.TempDir(d, "volatile-check")
+ td, err := os.MkdirTemp(d, "volatile-check")
if err != nil {
return false, err
}
@@ -209,7 +210,7 @@ func doesVolatile(d string) (bool, error) {
// Mount using the mandatory options and configured options
opts := fmt.Sprintf("volatile,lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "lower"), path.Join(td, "upper"), path.Join(td, "work"))
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil {
- return false, errors.Wrapf(err, "failed to mount overlay for volatile check")
+ return false, fmt.Errorf("failed to mount overlay for volatile check: %w", err)
}
defer func() {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
@@ -218,3 +219,55 @@ func doesVolatile(d string) (bool, error) {
}()
return true, nil
}
+
+// supportsIdmappedLowerLayers checks if the kernel supports mounting overlay on top of
+// a idmapped lower layer.
+func supportsIdmappedLowerLayers(home string) (bool, error) {
+ layerDir, err := os.MkdirTemp(home, "compat")
+ if err != nil {
+ return false, err
+ }
+ defer func() {
+ _ = os.RemoveAll(layerDir)
+ }()
+
+ mergedDir := filepath.Join(layerDir, "merged")
+ lowerDir := filepath.Join(layerDir, "lower")
+ lowerMappedDir := filepath.Join(layerDir, "lower-mapped")
+ upperDir := filepath.Join(layerDir, "upper")
+ workDir := filepath.Join(layerDir, "work")
+
+ _ = idtools.MkdirAs(mergedDir, 0700, 0, 0)
+ _ = idtools.MkdirAs(lowerDir, 0700, 0, 0)
+ _ = idtools.MkdirAs(lowerMappedDir, 0700, 0, 0)
+ _ = idtools.MkdirAs(upperDir, 0700, 0, 0)
+ _ = idtools.MkdirAs(workDir, 0700, 0, 0)
+
+ idmap := []idtools.IDMap{
+ {
+ ContainerID: 0,
+ HostID: 0,
+ Size: 1,
+ },
+ }
+ pid, cleanupFunc, err := createUsernsProcess(idmap, idmap)
+ if err != nil {
+ return false, err
+ }
+ defer cleanupFunc()
+
+ if err := createIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil {
+ return false, fmt.Errorf("create mapped mount: %w", err)
+ }
+ defer unix.Unmount(lowerMappedDir, unix.MNT_DETACH)
+
+ opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerMappedDir, upperDir, workDir)
+ flags := uintptr(0)
+ if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil {
+ return false, err
+ }
+ defer func() {
+ _ = unix.Unmount(mergedDir, unix.MNT_DETACH)
+ }()
+ return true, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_115.go b/vendor/github.com/containers/storage/drivers/overlay/check_115.go
deleted file mode 100644
index 9ad1b863d8d..00000000000
--- a/vendor/github.com/containers/storage/drivers/overlay/check_115.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// +build !go1.16
-
-package overlay
-
-import (
- "os"
- "path/filepath"
- "strings"
-
- "github.com/containers/storage/pkg/archive"
- "github.com/containers/storage/pkg/system"
-)
-
-func scanForMountProgramIndicators(home string) (detected bool, err error) {
- err = filepath.Walk(home, func(path string, info os.FileInfo, err error) error {
- if detected {
- return filepath.SkipDir
- }
- if err != nil {
- return err
- }
- basename := filepath.Base(path)
- if strings.HasPrefix(basename, archive.WhiteoutPrefix) {
- detected = true
- return filepath.SkipDir
- }
- if info.IsDir() {
- xattrs, err := system.Llistxattr(path)
- if err != nil {
- return err
- }
- for _, xattr := range xattrs {
- if strings.HasPrefix(xattr, "user.fuseoverlayfs.") || strings.HasPrefix(xattr, "user.containers.") {
- detected = true
- return filepath.SkipDir
- }
- }
- }
- return nil
- })
- return detected, err
-}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_116.go b/vendor/github.com/containers/storage/drivers/overlay/check_116.go
index 6d7913cbfab..bec455dd4f8 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/check_116.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/check_116.go
@@ -1,14 +1,17 @@
-// +build go1.16
+//go:build linux
+// +build linux
package overlay
import (
+ "errors"
"io/fs"
"path/filepath"
"strings"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/system"
+ "golang.org/x/sys/unix"
)
func scanForMountProgramIndicators(home string) (detected bool, err error) {
@@ -26,7 +29,7 @@ func scanForMountProgramIndicators(home string) (detected bool, err error) {
}
if d.IsDir() {
xattrs, err := system.Llistxattr(path)
- if err != nil {
+ if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
return err
}
for _, xattr := range xattrs {
diff --git a/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
new file mode 100644
index 00000000000..a7924ff3b03
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
@@ -0,0 +1,147 @@
+//go:build linux
+// +build linux
+
+package overlay
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+
+ "github.com/containers/storage/pkg/idtools"
+ "golang.org/x/sys/unix"
+)
+
+type attr struct {
+ attrSet uint64
+ attrClr uint64
+ propagation uint64
+ userNs uint64
+}
+
+// openTree is a wrapper for the open_tree syscall
+func openTree(path string, flags int) (fd int, err error) {
+ var _p0 *byte
+
+ if _p0, err = syscall.BytePtrFromString(path); err != nil {
+ return 0, err
+ }
+
+ r, _, e1 := syscall.Syscall6(uintptr(unix.SYS_OPEN_TREE), uintptr(0), uintptr(unsafe.Pointer(_p0)),
+ uintptr(flags), 0, 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return int(r), nil
+}
+
+// moveMount is a wrapper for the the move_mount syscall.
+func moveMount(fdTree int, target string) (err error) {
+ var _p0, _p1 *byte
+
+ empty := ""
+
+ if _p0, err = syscall.BytePtrFromString(target); err != nil {
+ return err
+ }
+ if _p1, err = syscall.BytePtrFromString(empty); err != nil {
+ return err
+ }
+
+ flags := unix.MOVE_MOUNT_F_EMPTY_PATH
+
+ _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOVE_MOUNT),
+ uintptr(fdTree), uintptr(unsafe.Pointer(_p1)),
+ 0, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// mountSetAttr is a wrapper for the mount_setattr syscall
+func mountSetAttr(dfd int, path string, flags uint, attr *attr, size uint) (err error) {
+ var _p0 *byte
+
+ if _p0, err = syscall.BytePtrFromString(path); err != nil {
+ return err
+ }
+
+ _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOUNT_SETATTR), uintptr(dfd), uintptr(unsafe.Pointer(_p0)),
+ uintptr(flags), uintptr(unsafe.Pointer(attr)), uintptr(size), 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// createIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace
+// for the PID process.
+func createIDMappedMount(source, target string, pid int) error {
+ path := fmt.Sprintf("/proc/%d/ns/user", pid)
+ userNsFile, err := os.Open(path)
+ if err != nil {
+ return fmt.Errorf("unable to get user ns file descriptor for %q: %w", path, err)
+ }
+
+ var attr attr
+ attr.attrSet = unix.MOUNT_ATTR_IDMAP
+ attr.attrClr = 0
+ attr.propagation = 0
+ attr.userNs = uint64(userNsFile.Fd())
+
+ defer userNsFile.Close()
+
+ targetDirFd, err := openTree(source, unix.OPEN_TREE_CLONE)
+ if err != nil {
+ return err
+ }
+ defer unix.Close(targetDirFd)
+
+ if err := mountSetAttr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE,
+ &attr, uint(unsafe.Sizeof(attr))); err != nil {
+ return err
+ }
+ if err := os.Mkdir(target, 0700); err != nil && !os.IsExist(err) {
+ return err
+ }
+ return moveMount(targetDirFd, target)
+}
+
+// createUsernsProcess forks the current process and creates a user namespace using the specified
+// mappings. It returns the pid of the new process.
+func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) {
+ pid, _, err := syscall.Syscall6(uintptr(unix.SYS_CLONE), unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0, 0)
+ if err != 0 {
+ return -1, nil, err
+ }
+ if pid == 0 {
+ _ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0)
+ // just wait for the SIGKILL
+ for {
+ syscall.Pause()
+ }
+ }
+ cleanupFunc := func() {
+ unix.Kill(int(pid), unix.SIGKILL)
+ _, _ = unix.Wait4(int(pid), nil, 0, nil)
+ }
+ writeMappings := func(fname string, idmap []idtools.IDMap) error {
+ mappings := ""
+ for _, m := range idmap {
+ mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size)
+ }
+ return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600)
+ }
+ if err := writeMappings("uid_map", uidMaps); err != nil {
+ cleanupFunc()
+ return -1, nil, err
+ }
+ if err := writeMappings("gid_map", gidMaps); err != nil {
+ cleanupFunc()
+ return -1, nil, err
+ }
+
+ return int(pid), cleanupFunc, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go
index 2a1e9d0cc1a..bedda35078e 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go
@@ -1,3 +1,6 @@
+//go:build linux
+// +build linux
+
package overlay
import jsoniter "github.com/json-iterator/go"
diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go
index 7c8fd50a3a5..cf37f800797 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/mount.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package overlay
@@ -42,7 +43,7 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e
cmd := reexec.Command("storage-mountfrom", dir)
w, err := cmd.StdinPipe()
if err != nil {
- return fmt.Errorf("mountfrom error on pipe creation: %v", err)
+ return fmt.Errorf("mountfrom error on pipe creation: %w", err)
}
output := bytes.NewBuffer(nil)
@@ -50,17 +51,17 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e
cmd.Stderr = output
if err := cmd.Start(); err != nil {
w.Close()
- return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)
+ return fmt.Errorf("mountfrom error on re-exec cmd: %w", err)
}
//write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
w.Close()
- return fmt.Errorf("mountfrom json encode to pipe failed: %v", err)
+ return fmt.Errorf("mountfrom json encode to pipe failed: %w", err)
}
w.Close()
if err := cmd.Wait(); err != nil {
- return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output)
+ return fmt.Errorf("mountfrom re-exec output: %s: error: %w", output, err)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index f546f9b10e0..844d2c793fe 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package overlay
@@ -5,9 +6,9 @@ package overlay
import (
"bytes"
"encoding/base64"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"os"
"os/exec"
"path"
@@ -25,7 +26,6 @@ import (
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/fsutils"
"github.com/containers/storage/pkg/idtools"
- "github.com/containers/storage/pkg/locker"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/system"
@@ -33,12 +33,9 @@ import (
units "github.com/docker/go-units"
"github.com/hashicorp/go-multierror"
digest "github.com/opencontainers/go-digest"
- "github.com/opencontainers/runc/libcontainer/userns"
"github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/vbatts/tar-split/tar/storage"
"golang.org/x/sys/unix"
)
@@ -119,7 +116,8 @@ type Driver struct {
supportsDType bool
supportsVolatile *bool
usingMetacopy bool
- locker *locker.Locker
+
+ supportsIDMappedMounts *bool
}
type additionalLayerStore struct {
@@ -155,6 +153,15 @@ func hasMetacopyOption(opts []string) bool {
return false
}
+func stripOption(opts []string, option string) []string {
+ for i, s := range opts {
+ if s == option {
+ return stripOption(append(opts[:i], opts[i+1:]...), option)
+ }
+ }
+ return opts
+}
+
func hasVolatileOption(opts []string) bool {
for _, s := range opts {
if s == "volatile" {
@@ -174,27 +181,51 @@ func checkSupportVolatile(home, runhome string) (bool, error) {
var usingVolatile bool
if err == nil {
if volatileCacheResult {
- logrus.Debugf("cached value indicated that volatile is being used")
+ logrus.Debugf("Cached value indicated that volatile is being used")
} else {
- logrus.Debugf("cached value indicated that volatile is not being used")
+ logrus.Debugf("Cached value indicated that volatile is not being used")
}
usingVolatile = volatileCacheResult
} else {
usingVolatile, err = doesVolatile(home)
if err == nil {
if usingVolatile {
- logrus.Debugf("overlay test mount indicated that volatile is being used")
+ logrus.Debugf("overlay: test mount indicated that volatile is being used")
} else {
- logrus.Debugf("overlay test mount indicated that volatile is not being used")
+ logrus.Debugf("overlay: test mount indicated that volatile is not being used")
}
if err = cachedFeatureRecord(runhome, feature, usingVolatile, ""); err != nil {
- return false, errors.Wrap(err, "error recording volatile-being-used status")
+ return false, fmt.Errorf("recording volatile-being-used status: %w", err)
}
}
}
return usingVolatile, nil
}
+// checkAndRecordIDMappedSupport checks and stores if the kernel supports mounting overlay on top of a
+// idmapped lower layer.
+func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) {
+ if os.Geteuid() != 0 {
+ return false, nil
+ }
+
+ feature := "idmapped-lower-dir"
+ overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature)
+ if err == nil {
+ if overlayCacheResult {
+ logrus.Debugf("Cached value indicated that idmapped mounts for overlay are supported")
+ return true, nil
+ }
+ logrus.Debugf("Cached value indicated that idmapped mounts for overlay are not supported")
+ return false, errors.New(overlayCacheText)
+ }
+ supportsIDMappedMounts, err := supportsIdmappedLowerLayers(home)
+ if err2 := cachedFeatureRecord(runhome, feature, supportsIDMappedMounts, ""); err2 != nil {
+ return false, fmt.Errorf("recording overlay idmapped mounts support status: %w", err2)
+ }
+ return supportsIDMappedMounts, err
+}
+
func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome string) (bool, error) {
var supportsDType bool
@@ -206,9 +237,9 @@ func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome str
overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature)
if err == nil {
if overlayCacheResult {
- logrus.Debugf("cached value indicated that overlay is supported")
+ logrus.Debugf("Cached value indicated that overlay is supported")
} else {
- logrus.Debugf("cached value indicated that overlay is not supported")
+ logrus.Debugf("Cached value indicated that overlay is not supported")
}
supportsDType = overlayCacheResult
if !supportsDType {
@@ -223,14 +254,14 @@ func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome str
if ok && patherr.Err == syscall.ENOSPC {
return false, err
}
- err = errors.Wrap(err, "kernel does not support overlay fs")
+ err = fmt.Errorf("kernel does not support overlay fs: %w", err)
if err2 := cachedFeatureRecord(runhome, feature, false, err.Error()); err2 != nil {
- return false, errors.Wrapf(err2, "error recording overlay not being supported (%v)", err)
+ return false, fmt.Errorf("recording overlay not being supported (%v): %w", err, err2)
}
return false, err
}
if err = cachedFeatureRecord(runhome, feature, supportsDType, ""); err != nil {
- return false, errors.Wrap(err, "error recording overlay support status")
+ return false, fmt.Errorf("recording overlay support status: %w", err)
}
}
return supportsDType, nil
@@ -248,6 +279,23 @@ func (d *Driver) getSupportsVolatile() (bool, error) {
return supportsVolatile, nil
}
+// isNetworkFileSystem checks if the specified file system is supported by native overlay
+// as backing store when running in a user namespace.
+func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
+ switch fsMagic {
+ // a bunch of network file systems...
+ case graphdriver.FsMagicNfsFs, graphdriver.FsMagicSmbFs, graphdriver.FsMagicAcfs,
+ graphdriver.FsMagicAfs, graphdriver.FsMagicCephFs, graphdriver.FsMagicCIFS,
+ graphdriver.FsMagicFHGFSFs, graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX,
+ graphdriver.FsMagicKAFS, graphdriver.FsMagicLUSTRE, graphdriver.FsMagicNCP,
+ graphdriver.FsMagicNFSD, graphdriver.FsMagicOCFS2, graphdriver.FsMagicPANFS,
+ graphdriver.FsMagicPRLFS, graphdriver.FsMagicSMB2, graphdriver.FsMagicSNFS,
+ graphdriver.FsMagicVBOXSF, graphdriver.FsMagicVXFS:
+ return true
+ }
+ return false
+}
+
// Init returns the a native diff driver for overlay filesystem.
// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error.
// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned.
@@ -265,35 +313,55 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
backingFs = fsName
}
- if opts.mountProgram != "" {
- if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil {
- return nil, err
- }
- } else {
- // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
- if opts.forceMask != nil {
- return nil, errors.New("'force_mask' is supported only with 'mount_program'")
- }
- switch fsMagic {
- case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
- return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s, a mount_program is required", backingFs)
- }
- }
-
+ runhome := filepath.Join(options.RunRoot, filepath.Base(home))
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return nil, err
}
// Create the driver home dir
- if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0755, 0, 0); err != nil {
return nil, err
}
- runhome := filepath.Join(options.RunRoot, filepath.Base(home))
+
if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil {
return nil, err
}
+ if opts.mountProgram == "" {
+ if supported, err := SupportsNativeOverlay(home, runhome); err != nil {
+ return nil, err
+ } else if !supported {
+ if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
+ opts.mountProgram = path
+ }
+ }
+ }
+
+ if opts.mountProgram != "" {
+ if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil {
+ m := os.FileMode(0700)
+ opts.forceMask = &m
+ logrus.Warnf("Network file system detected as backing store. Enforcing overlay option `force_mask=\"%o\"`. Add it to storage.conf to silence this warning", m)
+ }
+
+ if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil {
+ return nil, err
+ }
+ } else {
+ if opts.forceMask != nil {
+ return nil, errors.New("'force_mask' is supported only with 'mount_program'")
+ }
+ // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
+ switch fsMagic {
+ case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
+ return nil, fmt.Errorf("'overlay' is not supported over %s, a mount_program is required: %w", backingFs, graphdriver.ErrIncompatibleFS)
+ }
+ if unshare.IsRootless() && isNetworkFileSystem(fsMagic) {
+ return nil, fmt.Errorf("a network file system with user namespaces is not supported. Please use a mount_program: %w", graphdriver.ErrIncompatibleFS)
+ }
+ }
+
var usingMetacopy bool
var supportsDType bool
var supportsVolatile *bool
@@ -310,24 +378,24 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
metacopyCacheResult, _, err := cachedFeatureCheck(runhome, feature)
if err == nil {
if metacopyCacheResult {
- logrus.Debugf("cached value indicated that metacopy is being used")
+ logrus.Debugf("Cached value indicated that metacopy is being used")
} else {
- logrus.Debugf("cached value indicated that metacopy is not being used")
+ logrus.Debugf("Cached value indicated that metacopy is not being used")
}
usingMetacopy = metacopyCacheResult
} else {
usingMetacopy, err = doesMetacopy(home, opts.mountOptions)
if err == nil {
if usingMetacopy {
- logrus.Debugf("overlay test mount indicated that metacopy is being used")
+ logrus.Debugf("overlay: test mount indicated that metacopy is being used")
} else {
- logrus.Debugf("overlay test mount indicated that metacopy is not being used")
+ logrus.Debugf("overlay: test mount indicated that metacopy is not being used")
}
if err = cachedFeatureRecord(runhome, feature, usingMetacopy, ""); err != nil {
- return nil, errors.Wrap(err, "error recording metacopy-being-used status")
+ return nil, fmt.Errorf("recording metacopy-being-used status: %w", err)
}
} else {
- logrus.Infof("overlay test mount did not indicate whether or not metacopy is being used: %v", err)
+ logrus.Infof("overlay: test mount did not indicate whether or not metacopy is being used: %v", err)
return nil, err
}
}
@@ -354,7 +422,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
supportsDType: supportsDType,
usingMetacopy: usingMetacopy,
supportsVolatile: supportsVolatile,
- locker: locker.New(),
options: *opts,
}
@@ -364,11 +431,11 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
if d.quotaCtl, err = quota.NewControl(home); err == nil {
projectQuotaSupported = true
} else if opts.quota.Size > 0 || opts.quota.Inodes > 0 {
- return nil, fmt.Errorf("Storage options overlay.size and overlay.inodes not supported. Filesystem does not support Project Quota: %v", err)
+ return nil, fmt.Errorf("storage options overlay.size and overlay.inodes not supported. Filesystem does not support Project Quota: %w", err)
}
} else if opts.quota.Size > 0 || opts.quota.Inodes > 0 {
// if xfs is not the backing fs then error out if the storage-opt overlay.size is used.
- return nil, fmt.Errorf("Storage option overlay.size and overlay.inodes only supported for backingFS XFS. Found %v", backingFs)
+ return nil, fmt.Errorf("storage option overlay.size and overlay.inodes only supported for backingFS XFS. Found %v", backingFs)
}
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy)
@@ -419,7 +486,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
}
st, err := os.Stat(store)
if err != nil {
- return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %v", store, err)
+ return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %w", store, err)
}
if !st.IsDir() {
return nil, fmt.Errorf("overlay: image path %q must be a directory", store)
@@ -440,7 +507,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
}
st, err := os.Stat(lstore)
if err != nil {
- return nil, errors.Wrap(err, "overlay: can't stat additionallayerstore dir")
+ return nil, fmt.Errorf("overlay: can't stat additionallayerstore dir: %w", err)
}
if !st.IsDir() {
return nil, fmt.Errorf("overlay: additionallayerstore path %q must be a directory", lstore)
@@ -467,7 +534,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
if val != "" {
_, err := os.Stat(val)
if err != nil {
- return nil, errors.Wrapf(err, "overlay: can't stat program %q", val)
+ return nil, fmt.Errorf("overlay: can't stat program %q: %w", val, err)
}
}
o.mountProgram = val
@@ -511,11 +578,11 @@ func cachedFeatureSet(feature string, set bool) string {
}
func cachedFeatureCheck(runhome, feature string) (supported bool, text string, err error) {
- content, err := ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true)))
+ content, err := os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true)))
if err == nil {
return true, string(content), nil
}
- content, err = ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false)))
+ content, err = os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false)))
if err == nil {
return false, string(content), nil
}
@@ -533,29 +600,26 @@ func cachedFeatureRecord(runhome, feature string, supported bool, text string) (
return err
}
-func SupportsNativeOverlay(graphroot, rundir string) (bool, error) {
- if os.Geteuid() != 0 || graphroot == "" || rundir == "" {
+func SupportsNativeOverlay(home, runhome string) (bool, error) {
+ if os.Geteuid() != 0 || home == "" || runhome == "" {
return false, nil
}
- home := filepath.Join(graphroot, "overlay")
- runhome := filepath.Join(rundir, "overlay")
-
var contents string
- flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home))
+ flagContent, err := os.ReadFile(getMountProgramFlagFile(home))
if err == nil {
contents = strings.TrimSpace(string(flagContent))
}
switch contents {
case "true":
- logrus.Debugf("overlay storage already configured with a mount-program")
+ logrus.Debugf("overlay: storage already configured with a mount-program")
return false, nil
default:
needsMountProgram, err := scanForMountProgramIndicators(home)
if err != nil && !os.IsNotExist(err) {
return false, err
}
- if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) {
+ if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) {
return false, err
}
if needsMountProgram {
@@ -591,7 +655,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
logLevel = logrus.DebugLevel
}
- layerDir, err := ioutil.TempDir(home, "compat")
+ layerDir, err := os.MkdirTemp(home, "compat")
if err != nil {
patherr, ok := err.(*os.PathError)
if ok && patherr.Err == syscall.ENOSPC {
@@ -640,17 +704,17 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
flags = fmt.Sprintf("%s,userxattr", flags)
}
if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0600, int(unix.Mkdev(0, 0))); err != nil {
- logrus.Debugf("unable to create kernel-style whiteout: %v", err)
- return supportsDType, errors.Wrapf(err, "unable to create kernel-style whiteout")
+ logrus.Debugf("Unable to create kernel-style whiteout: %v", err)
+ return supportsDType, fmt.Errorf("unable to create kernel-style whiteout: %w", err)
}
if len(flags) < unix.Getpagesize() {
err := unix.Mount("overlay", mergedDir, "overlay", 0, flags)
if err == nil {
- logrus.Debugf("overlay test mount with multiple lowers succeeded")
+ logrus.Debugf("overlay: test mount with multiple lowers succeeded")
return supportsDType, nil
}
- logrus.Debugf("overlay test mount with multiple lowers failed %v", err)
+ logrus.Debugf("overlay: test mount with multiple lowers failed: %v", err)
}
flags = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower1Dir, upperDir, workDir)
if selinux.GetEnabled() {
@@ -659,17 +723,17 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
if len(flags) < unix.Getpagesize() {
err := unix.Mount("overlay", mergedDir, "overlay", 0, flags)
if err == nil {
- logrus.StandardLogger().Logf(logLevel, "overlay test mount with multiple lowers failed, but succeeded with a single lower")
- return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay")
+ logrus.StandardLogger().Logf(logLevel, "overlay: test mount with multiple lowers failed, but succeeded with a single lower")
+ return supportsDType, fmt.Errorf("kernel too old to provide multiple lowers feature for overlay: %w", graphdriver.ErrNotSupported)
}
- logrus.Debugf("overlay test mount with a single lower failed %v", err)
+ logrus.Debugf("overlay: test mount with a single lower failed: %v", err)
}
logrus.StandardLogger().Logf(logLevel, "'overlay' is not supported over %s at %q", backingFs, home)
- return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home)
+ return supportsDType, fmt.Errorf("'overlay' is not supported over %s at %q: %w", backingFs, home, graphdriver.ErrIncompatibleFS)
}
logrus.StandardLogger().Logf(logLevel, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
- return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
+ return supportsDType, fmt.Errorf("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.: %w", graphdriver.ErrNotSupported)
}
func (d *Driver) useNaiveDiff() bool {
@@ -682,9 +746,9 @@ func (d *Driver) useNaiveDiff() bool {
nativeDiffCacheResult, nativeDiffCacheText, err := cachedFeatureCheck(d.runhome, feature)
if err == nil {
if nativeDiffCacheResult {
- logrus.Debugf("cached value indicated that native-diff is usable")
+ logrus.Debugf("Cached value indicated that native-diff is usable")
} else {
- logrus.Debugf("cached value indicated that native-diff is not being used")
+ logrus.Debugf("Cached value indicated that native-diff is not being used")
logrus.Info(nativeDiffCacheText)
}
useNaiveDiffOnly = !nativeDiffCacheResult
@@ -821,7 +885,7 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
opts.StorageOpt["inodes"] = strconv.FormatUint(d.options.quota.Inodes, 10)
}
- return d.create(id, parent, opts)
+ return d.create(id, parent, opts, false)
}
// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id.
@@ -831,15 +895,16 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
if _, ok := opts.StorageOpt["size"]; ok {
return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers")
}
+
if _, ok := opts.StorageOpt["inodes"]; ok {
return fmt.Errorf("--storage-opt inodes is only supported for ReadWrite Layers")
}
}
- return d.create(id, parent, opts)
+ return d.create(id, parent, opts, true)
}
-func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) {
+func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) {
dir := d.dir(id)
uidMaps := d.uidMaps
@@ -850,15 +915,22 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
gidMaps = opts.IDMappings.GIDs()
}
+ // Make the link directory if it does not exist
+ if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil {
+ return err
+ }
+
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return err
}
- // Make the link directory if it does not exist
- if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil {
- return err
+
+ idPair := idtools.IDPair{
+ UID: rootUID,
+ GID: rootGID,
}
- if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil {
+
+ if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0755, idPair); err != nil {
return err
}
if parent != "" {
@@ -869,18 +941,30 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
rootUID = int(st.UID())
rootGID = int(st.GID())
}
- if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil {
+
+ if _, err := system.Lstat(dir); err == nil {
+ logrus.Warnf("Trying to create a layer %#v while directory %q already exists; removing it first", id, dir)
+ // Don’t just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir,
+ // so that we can’t end up with two symlinks in linkDir pointing to the same layer.
+ if err := d.Remove(id); err != nil {
+ return fmt.Errorf("removing a pre-existing layer directory %q: %w", dir, err)
+ }
+ }
+
+ if err := idtools.MkdirAllAndChownNew(dir, 0700, idPair); err != nil {
return err
}
defer func() {
// Clean up on failure
if retErr != nil {
- os.RemoveAll(dir)
+ if err2 := os.RemoveAll(dir); err2 != nil {
+ logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2)
+ }
}
}()
- if d.quotaCtl != nil {
+ if d.quotaCtl != nil && !disableQuota {
quota := quota.Quota{}
if opts != nil && len(opts.StorageOpt) > 0 {
driver := &Driver{}
@@ -923,7 +1007,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
}
// Write link id to link file
- if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil {
+ if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil {
return err
}
@@ -944,7 +1028,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
return err
}
if lower != "" {
- if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil {
+ if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil {
return err
}
}
@@ -971,7 +1055,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e
}
driver.options.quota.Inodes = uint64(inodes)
default:
- return fmt.Errorf("Unknown option %s", key)
+ return fmt.Errorf("unknown option %s", key)
}
}
@@ -987,23 +1071,23 @@ func (d *Driver) getLower(parent string) (string, error) {
}
// Read Parent link fileA
- parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link"))
+ parentLink, err := os.ReadFile(path.Join(parentDir, "link"))
if err != nil {
if !os.IsNotExist(err) {
return "", err
}
logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(parentDir, "link"))
if err := d.recreateSymlinks(); err != nil {
- return "", errors.Wrap(err, "error recreating the links")
+ return "", fmt.Errorf("recreating the links: %w", err)
}
- parentLink, err = ioutil.ReadFile(path.Join(parentDir, "link"))
+ parentLink, err = os.ReadFile(path.Join(parentDir, "link"))
if err != nil {
return "", err
}
}
lowers := []string{path.Join(linkDir, string(parentLink))}
- parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile))
+ parentLower, err := os.ReadFile(path.Join(parentDir, lowerFile))
if err == nil {
parentLowers := strings.Split(string(parentLower), ":")
lowers = append(lowers, parentLowers...)
@@ -1012,22 +1096,27 @@ func (d *Driver) getLower(parent string) (string, error) {
}
func (d *Driver) dir(id string) string {
+ p, _ := d.dir2(id)
+ return p
+}
+
+func (d *Driver) dir2(id string) (string, bool) {
newpath := path.Join(d.home, id)
if _, err := os.Stat(newpath); err != nil {
for _, p := range d.AdditionalImageStores() {
l := path.Join(p, d.name, id)
_, err = os.Stat(l)
if err == nil {
- return l
+ return l, true
}
}
}
- return newpath
+ return newpath, false
}
func (d *Driver) getLowerDirs(id string) ([]string, error) {
var lowersArray []string
- lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile))
+ lowers, err := os.ReadFile(path.Join(d.dir(id), lowerFile))
if err == nil {
for _, s := range strings.Split(string(lowers), ":") {
lower := d.dir(s)
@@ -1038,7 +1127,7 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) {
if os.IsNotExist(err) {
logrus.Warnf("Can't read link %q because it does not exist. A storage corruption might have occurred, attempting to recreate the missing symlinks. It might be best wipe the storage to avoid further errors due to storage corruption.", lower)
if err := d.recreateSymlinks(); err != nil {
- return nil, fmt.Errorf("error recreating the missing symlinks: %v", err)
+ return nil, fmt.Errorf("recreating the missing symlinks: %w", err)
}
// let's call Readlink on lower again now that we have recreated the missing symlinks
lp, err = os.Readlink(lower)
@@ -1095,11 +1184,8 @@ func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMa
// Remove cleans the directories that are created for this id.
func (d *Driver) Remove(id string) error {
- d.locker.Lock(id)
- defer d.locker.Unlock(id)
-
dir := d.dir(id)
- lid, err := ioutil.ReadFile(path.Join(dir, "link"))
+ lid, err := os.ReadFile(path.Join(dir, "link"))
if err == nil {
if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil {
logrus.Debugf("Failed to remove link: %v", err)
@@ -1118,23 +1204,22 @@ func (d *Driver) Remove(id string) error {
// under each layer has a symlink created for it under the linkDir. If the symlink does not
// exist, it creates them
func (d *Driver) recreateSymlinks() error {
+ // We have at most 3 corrective actions per layer, so 10 iterations is plenty.
+ const maxIterations = 10
+
// List all the directories under the home directory
- dirs, err := ioutil.ReadDir(d.home)
+ dirs, err := os.ReadDir(d.home)
if err != nil {
- return fmt.Errorf("error reading driver home directory %q: %v", d.home, err)
+ return fmt.Errorf("reading driver home directory %q: %w", d.home, err)
}
- linksDir := filepath.Join(d.home, "l")
// This makes the link directory if it doesn't exist
- rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
- if err != nil {
- return err
- }
- if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil {
return err
}
// Keep looping as long as we take some corrective action in each iteration
var errs *multierror.Error
madeProgress := true
+ iterations := 0
for madeProgress {
errs = nil
madeProgress = false
@@ -1142,13 +1227,13 @@ func (d *Driver) recreateSymlinks() error {
// the layer's "link" file that points to the layer's "diff" directory.
for _, dir := range dirs {
// Skip over the linkDir and anything that is not a directory
- if dir.Name() == linkDir || !dir.Mode().IsDir() {
+ if dir.Name() == linkDir || !dir.IsDir() {
continue
}
// Read the "link" file under each layer to get the name of the symlink
- data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link"))
+ data, err := os.ReadFile(path.Join(d.dir(dir.Name()), "link"))
if err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "error reading name of symlink for %q", dir))
+ errs = multierror.Append(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err))
continue
}
linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n"))
@@ -1162,30 +1247,38 @@ func (d *Driver) recreateSymlinks() error {
}
madeProgress = true
} else if err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "error trying to stat %q", linkPath))
+ errs = multierror.Append(errs, err)
continue
}
}
+
+ // linkDirFullPath is the full path to the linkDir
+ linkDirFullPath := filepath.Join(d.home, "l")
// Now check if we somehow lost a "link" file, by making sure
// that each symlink we have corresponds to one.
- links, err := ioutil.ReadDir(linksDir)
+ links, err := os.ReadDir(linkDirFullPath)
if err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "error reading links directory %q", linksDir))
+ errs = multierror.Append(errs, err)
continue
}
// Go through all of the symlinks in the "l" directory
for _, link := range links {
// Read the symlink's target, which should be "../$layer/diff"
- target, err := os.Readlink(filepath.Join(linksDir, link.Name()))
+ target, err := os.Readlink(filepath.Join(linkDirFullPath, link.Name()))
if err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "error reading target of link %q", link))
+ errs = multierror.Append(errs, err)
continue
}
targetComponents := strings.Split(target, string(os.PathSeparator))
if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" {
- errs = multierror.Append(errs, errors.Errorf("link target of %q looks weird: %q", link, target))
+ errs = multierror.Append(errs, fmt.Errorf("link target of %q looks weird: %q", link, target))
// force the link to be recreated on the next pass
- os.Remove(filepath.Join(linksDir, link.Name()))
+ if err := os.Remove(filepath.Join(linkDirFullPath, link.Name())); err != nil {
+ if !os.IsNotExist(err) {
+ errs = multierror.Append(errs, fmt.Errorf("removing link %q: %w", link, err))
+ } // else don’t report any error, but also don’t set madeProgress.
+ continue
+ }
madeProgress = true
continue
}
@@ -1193,15 +1286,22 @@ func (d *Driver) recreateSymlinks() error {
// it has the basename of our symlink in it.
targetID := targetComponents[1]
linkFile := filepath.Join(d.dir(targetID), "link")
- data, err := ioutil.ReadFile(linkFile)
+ data, err := os.ReadFile(linkFile)
if err != nil || string(data) != link.Name() {
- if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "error correcting link for layer %q", targetID))
+ // NOTE: If two or more links point to the same target, we will update linkFile
+ // with every value of link.Name(), and set madeProgress = true every time.
+ if err := os.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err))
continue
}
madeProgress = true
}
}
+ iterations++
+ if iterations >= maxIterations {
+ errs = multierror.Append(errs, fmt.Errorf("reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations))
+ break
+ }
}
if errs != nil {
return errs.ErrorOrNil()
@@ -1210,23 +1310,25 @@ func (d *Driver) recreateSymlinks() error {
}
// Get creates and mounts the required file system for the given id and returns the mount path.
-func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
+func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
return d.get(id, false, options)
}
func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) {
- d.locker.Lock(id)
- defer d.locker.Unlock(id)
- dir := d.dir(id)
+ dir, inAdditionalStore := d.dir2(id)
if _, err := os.Stat(dir); err != nil {
return "", err
}
- readWrite := true
+ readWrite := !inAdditionalStore
if !d.SupportsShifting() || options.DisableShifting {
disableShifting = true
}
+ logLevel := logrus.WarnLevel
+ if unshare.IsRootless() {
+ logLevel = logrus.DebugLevel
+ }
optsList := options.Options
if len(optsList) == 0 {
optsList = strings.Split(d.options.mountOptions, ",")
@@ -1235,16 +1337,27 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
// options otherwise the kernel refuses to follow the metacopy xattr.
if hasMetacopyOption(strings.Split(d.options.mountOptions, ",")) && !hasMetacopyOption(options.Options) {
if d.usingMetacopy {
+ logrus.StandardLogger().Logf(logrus.DebugLevel, "Adding metacopy option, configured globally")
optsList = append(optsList, "metacopy=on")
- } else {
- logLevel := logrus.WarnLevel
- if unshare.IsRootless() {
- logLevel = logrus.DebugLevel
+ }
+ }
+ }
+ if !d.usingMetacopy {
+ if hasMetacopyOption(optsList) {
+ if d.options.mountProgram == "" {
+ release := ""
+ var uts unix.Utsname
+ if err := unix.Uname(&uts); err == nil {
+ release = " " + string(uts.Release[:]) + " " + string(uts.Version[:])
}
- logrus.StandardLogger().Logf(logLevel, "ignoring metacopy option from storage.conf, not supported with booted kernel")
+ logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel"+release)
+ } else {
+ logrus.Debugf("Ignoring global metacopy option, the mount program doesn't support it")
}
}
+ optsList = stripOption(optsList, "metacopy=on")
}
+
for _, o := range optsList {
if o == "ro" {
readWrite = false
@@ -1252,7 +1365,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}
- lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile))
+ lowers, err := os.ReadFile(path.Join(dir, lowerFile))
if err != nil && !os.IsNotExist(err) {
return "", err
}
@@ -1268,16 +1381,16 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
// Check if $link/../diff{1-*} exist. If they do, add them, in order, as the front of the lowers
// lists that we're building. "diff" itself is the upper, so it won't be in the lists.
- link, err := ioutil.ReadFile(path.Join(dir, "link"))
+ link, err := os.ReadFile(path.Join(dir, "link"))
if err != nil {
if !os.IsNotExist(err) {
return "", err
}
logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(dir, "link"))
if err := d.recreateSymlinks(); err != nil {
- return "", errors.Wrap(err, "error recreating the links")
+ return "", fmt.Errorf("recreating the links: %w", err)
}
- link, err = ioutil.ReadFile(path.Join(dir, "link"))
+ link, err = os.ReadFile(path.Join(dir, "link"))
if err != nil {
return "", err
}
@@ -1295,7 +1408,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
for err == nil {
absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN)))
- relLowers = append(relLowers, dumbJoin(string(link), "..", nameWithSuffix("diff", diffN)))
+ relLowers = append(relLowers, dumbJoin(linkDir, string(link), "..", nameWithSuffix("diff", diffN)))
diffN++
st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
if err == nil && !permsKnown {
@@ -1330,11 +1443,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if lower == "" && os.IsNotExist(err) {
logrus.Warnf("Can't stat lower layer %q because it does not exist. Going through storage to recreate the missing symlinks.", newpath)
if err := d.recreateSymlinks(); err != nil {
- return "", fmt.Errorf("error recreating the missing symlinks: %v", err)
+ return "", fmt.Errorf("recreating the missing symlinks: %w", err)
}
lower = newpath
} else if lower == "" {
- return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err)
+ return "", fmt.Errorf("can't stat lower layer %q: %w", newpath, err)
}
} else {
if !permsKnown {
@@ -1381,7 +1494,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if retErr != nil {
if c := d.ctr.Decrement(mergedDir); c <= 0 {
if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
- logrus.Errorf("error unmounting %v: %v", mergedDir, mntErr)
+ logrus.Errorf("Unmounting %v: %v", mergedDir, mntErr)
}
}
}
@@ -1389,47 +1502,93 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
workdir := path.Join(dir, "work")
- var opts string
- if readWrite {
- opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir)
- } else {
- opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":"))
- }
- if len(optsList) > 0 {
- opts = fmt.Sprintf("%s,%s", strings.Join(optsList, ","), opts)
- }
-
if d.options.mountProgram == "" && unshare.IsRootless() {
- opts = fmt.Sprintf("%s,userxattr", opts)
+ optsList = append(optsList, "userxattr")
}
- // If "volatile" is not supported by the file system, just ignore the request
- if options.Volatile && !hasVolatileOption(strings.Split(opts, ",")) {
+ if options.Volatile && !hasVolatileOption(optsList) {
supported, err := d.getSupportsVolatile()
if err != nil {
return "", err
}
+ // If "volatile" is not supported by the file system, just ignore the request
if supported {
- opts = fmt.Sprintf("%s,volatile", opts)
+ optsList = append(optsList, "volatile")
}
}
+ if d.supportsIDmappedMounts() && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 {
+ var newAbsDir []string
+ mappedRoot := filepath.Join(d.home, id, "mapped")
+ if err := os.MkdirAll(mappedRoot, 0700); err != nil {
+ return "", err
+ }
+
+ pid, cleanupFunc, err := createUsernsProcess(options.UidMaps, options.GidMaps)
+ if err != nil {
+ return "", err
+ }
+ defer cleanupFunc()
+
+ idMappedMounts := make(map[string]string)
+
+ // rewrite the lower dirs to their idmapped mount.
+ c := 0
+ for _, absLower := range absLowers {
+ mappedMountSrc := getMappedMountRoot(absLower)
+
+ root, found := idMappedMounts[mappedMountSrc]
+ if !found {
+ root = filepath.Join(mappedRoot, fmt.Sprintf("%d", c))
+ c++
+ if err := createIDMappedMount(mappedMountSrc, root, int(pid)); err != nil {
+ return "", fmt.Errorf("create mapped mount for %q on %q: %w", mappedMountSrc, root, err)
+ }
+ idMappedMounts[mappedMountSrc] = root
+
+ // overlay takes a reference on the mount, so it is safe to unmount
+ // the mapped idmounts as soon as the final overlay file system is mounted.
+ defer unix.Unmount(root, unix.MNT_DETACH)
+ }
+
+ // relative path to the layer through the id mapped mount
+ rel, err := filepath.Rel(mappedMountSrc, absLower)
+ if err != nil {
+ return "", err
+ }
+
+ newAbsDir = append(newAbsDir, filepath.Join(root, rel))
+ }
+ absLowers = newAbsDir
+ }
+
+ var opts string
+ if readWrite {
+ opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir)
+ } else {
+ opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":"))
+ }
+ if len(optsList) > 0 {
+ opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ","))
+ }
+
mountData := label.FormatMountLabel(opts, options.MountLabel)
mountFunc := unix.Mount
mountTarget := mergedDir
pageSize := unix.Getpagesize()
- // Use relative paths and mountFrom when the mount data has exceeded
- // the page size. The mount syscall fails if the mount data cannot
- // fit within a page and relative links make the mount data much
- // smaller at the expense of requiring a fork exec to chroot.
if d.options.mountProgram != "" {
mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
if !disableShifting {
label = d.optsAppendMappings(label, options.UidMaps, options.GidMaps)
}
+ // if forceMask is in place, tell fuse-overlayfs to write the permissions mask to an unprivileged xattr as well.
+ if d.options.forceMask != nil {
+ label = label + ",xattr_permissions=2"
+ }
+
mountProgram := exec.Command(d.options.mountProgram, "-o", label, target)
mountProgram.Dir = d.home
var b bytes.Buffer
@@ -1440,22 +1599,30 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if output == "" {
output = ""
}
- return errors.Wrapf(err, "using mount program %s: %s", d.options.mountProgram, output)
+ return fmt.Errorf("using mount program %s: %s: %w", d.options.mountProgram, output, err)
}
return nil
}
- } else if len(mountData) > pageSize {
+ } else if len(mountData) >= pageSize {
+ // Use relative paths and mountFrom when the mount data has exceeded
+ // the page size. The mount syscall fails if the mount data cannot
+ // fit within a page and relative links make the mount data much
+ // smaller at the expense of requiring a fork exec to chroot.
+
workdir = path.Join(id, "work")
//FIXME: We need to figure out to get this to work with additional stores
if readWrite {
diffDir := path.Join(id, "diff")
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, workdir)
} else {
- opts = fmt.Sprintf("lowerdir=%s", strings.Join(absLowers, ":"))
+ opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(relLowers, ":"))
+ }
+ if len(optsList) > 0 {
+ opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ","))
}
mountData = label.FormatMountLabel(opts, options.MountLabel)
- if len(mountData) > pageSize {
- return "", fmt.Errorf("cannot mount layer, mount label %q too large %d > page size %d", options.MountLabel, len(mountData), pageSize)
+ if len(mountData) >= pageSize {
+ return "", fmt.Errorf("cannot mount layer, mount label %q too large %d >= page size %d", options.MountLabel, len(mountData), pageSize)
}
mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
return mountFrom(d.home, source, target, mType, flags, label)
@@ -1473,7 +1640,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
flags, data := mount.ParseOptions(mountData)
logrus.Debugf("overlay: mount_data=%s", mountData)
if err := mountFunc("overlay", mountTarget, "overlay", uintptr(flags), data); err != nil {
- return "", fmt.Errorf("error creating overlay mount to %s, mount_data=%q: %v", mountTarget, mountData, err)
+ return "", fmt.Errorf("creating overlay mount to %s, mount_data=%q: %w", mountTarget, mountData, err)
}
return mergedDir, nil
@@ -1481,8 +1648,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
// Put unmounts the mount path created for the give id.
func (d *Driver) Put(id string) error {
- d.locker.Lock(id)
- defer d.locker.Unlock(id)
dir := d.dir(id)
if _, err := os.Stat(dir); err != nil {
return err
@@ -1491,18 +1656,30 @@ func (d *Driver) Put(id string) error {
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
- if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
+ if _, err := os.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
return err
}
unmounted := false
+ mappedRoot := filepath.Join(d.home, id, "mapped")
+ // It should not happen, but cleanup any mapped mount if it was leaked.
+ if _, err := os.Stat(mappedRoot); err == nil {
+ mounts, err := os.ReadDir(mappedRoot)
+ if err == nil {
+ // Go through all of the mapped mounts.
+ for _, m := range mounts {
+ _ = unix.Unmount(filepath.Join(mappedRoot, m.Name()), unix.MNT_DETACH)
+ }
+ }
+ }
+
if d.options.mountProgram != "" {
// Attempt to unmount the FUSE mount using either fusermount or fusermount3.
// If they fail, fallback to unix.Unmount
for _, v := range []string{"fusermount3", "fusermount"} {
err := exec.Command(v, "-u", mountpoint).Run()
- if err != nil && errors.Cause(err) != exec.ErrNotFound {
+ if err != nil && !errors.Is(err, exec.ErrNotFound) {
logrus.Debugf("Error unmounting %s with %s - %v", mountpoint, v, err)
}
if err == nil {
@@ -1574,11 +1751,24 @@ func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat {
return whiteoutFormat
}
-type fileGetNilCloser struct {
- storage.FileGetter
+type overlayFileGetter struct {
+ diffDirs []string
}
-func (f fileGetNilCloser) Close() error {
+func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) {
+ for _, d := range g.diffDirs {
+ f, err := os.Open(filepath.Join(d, path))
+ if err == nil {
+ return f, nil
+ }
+ }
+ if len(g.diffDirs) > 0 {
+ return os.Open(filepath.Join(g.diffDirs[0], path))
+ }
+ return nil, fmt.Errorf("%s: %w", path, os.ErrNotExist)
+}
+
+func (g *overlayFileGetter) Close() error {
return nil
}
@@ -1587,13 +1777,18 @@ func (d *Driver) getStagingDir() string {
}
// DiffGetter returns a FileGetCloser that can read files from the directory that
-// contains files for the layer differences. Used for direct access for tar-split.
+// contains files for the layer differences, either for this layer, or one of our
+// lowers if we're just a template directory. Used for direct access for tar-split.
func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
p, err := d.getDiffPath(id)
if err != nil {
return nil, err
}
- return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
+ paths, err := d.getLowerDiffPaths(id)
+ if err != nil {
+ return nil, err
+ }
+ return &overlayFileGetter{diffDirs: append([]string{p}, paths...)}, nil
}
// CleanupStagingDirectory cleanups the staging directory.
@@ -1618,7 +1813,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
if err != nil && !os.IsExist(err) {
return graphdriver.DriverWithDifferOutput{}, err
}
- applyDir, err = ioutil.TempDir(d.getStagingDir(), "")
+ applyDir, err = os.MkdirTemp(d.getStagingDir(), "")
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
@@ -1638,7 +1833,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
GIDMaps: idMappings.GIDs(),
IgnoreChownErrors: d.options.ignoreChownErrors,
WhiteoutFormat: d.getWhiteoutFormat(),
- InUserNS: userns.RunningInUserNS(),
+ InUserNS: unshare.IsRootless(),
})
out.Target = applyDir
return out, err
@@ -1696,7 +1891,7 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts)
IgnoreChownErrors: d.options.ignoreChownErrors,
ForceMask: d.options.forceMask,
WhiteoutFormat: d.getWhiteoutFormat(),
- InUserNS: userns.RunningInUserNS(),
+ InUserNS: unshare.IsRootless(),
}); err != nil {
return 0, err
}
@@ -1820,7 +2015,7 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
err = graphdriver.ChownPathByMaps(layerFs, toContainer, toHost)
if err != nil {
if err2 := d.Put(id); err2 != nil {
- logrus.Errorf("%v; error unmounting %v: %v", err, id, err2)
+ logrus.Errorf("%v; unmounting %v: %v", err, id, err2)
}
return err
}
@@ -1868,12 +2063,31 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
return nil
}
+// supportsIDmappedMounts returns whether the kernel supports using idmapped mounts with
+// overlay lower layers.
+func (d *Driver) supportsIDmappedMounts() bool {
+ if d.supportsIDMappedMounts != nil {
+ return *d.supportsIDMappedMounts
+ }
+
+ supportsIDMappedMounts, err := checkAndRecordIDMappedSupport(d.home, d.runhome)
+ d.supportsIDMappedMounts = &supportsIDMappedMounts
+ if err == nil {
+ return supportsIDMappedMounts
+ }
+ logrus.Debugf("Check for idmapped mounts support %v", err)
+ return false
+}
+
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
func (d *Driver) SupportsShifting() bool {
if os.Getenv("_TEST_FORCE_SUPPORT_SHIFTING") == "yes-please" {
return true
}
- return d.options.mountProgram != ""
+ if d.options.mountProgram != "" {
+ return true
+ }
+ return d.supportsIDmappedMounts()
}
// dumbJoin is more or less a dumber version of filepath.Join, but one which
@@ -1908,22 +2122,21 @@ func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string,
filepath.Join(target, "blob"),
} {
if _, err := os.Stat(p); err != nil {
- return "", errors.Wrapf(graphdriver.ErrLayerUnknown,
- "failed to stat additional layer %q: %v", p, err)
+ wrapped := fmt.Errorf("failed to stat additional layer %q: %w", p, err)
+ return "", fmt.Errorf("%v: %w", wrapped, graphdriver.ErrLayerUnknown)
}
}
return target, nil
}
- return "", errors.Wrapf(graphdriver.ErrLayerUnknown,
- "additional layer (%q, %q) not found", dgst, ref)
+ return "", fmt.Errorf("additional layer (%q, %q) not found: %w", dgst, ref, graphdriver.ErrLayerUnknown)
}
func (d *Driver) releaseAdditionalLayerByID(id string) {
if al, err := d.getAdditionalLayerPathByID(id); err == nil {
notifyReleaseAdditionalLayer(al)
} else if !os.IsNotExist(err) {
- logrus.Warnf("unexpected error on reading Additional Layer Store pointer %v", err)
+ logrus.Warnf("Unexpected error on reading Additional Layer Store pointer %v", err)
}
}
@@ -1961,7 +2174,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error {
}
// tell the additional layer store that we use this layer.
// mark this layer as "additional layer"
- if err := ioutil.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil {
+ if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil {
return err
}
notifyUseAdditionalLayer(al.path)
@@ -1969,7 +2182,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error {
}
func (d *Driver) getAdditionalLayerPathByID(id string) (string, error) {
- al, err := ioutil.ReadFile(path.Join(d.dir(id), "additionallayer"))
+ al, err := os.ReadFile(path.Join(d.dir(id), "additionallayer"))
if err != nil {
return "", err
}
@@ -2004,10 +2217,10 @@ func notifyUseAdditionalLayer(al string) {
} else if err == nil {
f.Close()
if err := os.Remove(useFile); err != nil {
- logrus.Warnf("failed to remove use file")
+ logrus.Warnf("Failed to remove use file")
}
}
- logrus.Warnf("unexpected error by Additional Layer Store %v during use; GC doesn't seem to be supported", err)
+ logrus.Warnf("Unexpected error by Additional Layer Store %v during use; GC doesn't seem to be supported", err)
}
// notifyReleaseAdditionalLayer notifies Additional Layer Store that we don't use the specified
@@ -2024,7 +2237,7 @@ func notifyReleaseAdditionalLayer(al string) {
if os.IsNotExist(err) {
return
}
- logrus.Warnf("unexpected error by Additional Layer Store %v during release; GC doesn't seem to be supported", err)
+ logrus.Warnf("Unexpected error by Additional Layer Store %v during release; GC doesn't seem to be supported", err)
}
// redirectDiffIfAdditionalLayer checks if the passed diff path is Additional Layer and
@@ -2042,3 +2255,15 @@ func redirectDiffIfAdditionalLayer(diffPath string) (string, error) {
}
return diffPath, nil
}
+
+// getMappedMountRoot is a heuristic that calculates the parent directory where
+// the idmapped mount should be applied.
+// It is useful to minimize the number of idmapped mounts and at the same time use
+// a common path as long as possible to reduce the length of the mount data argument.
+func getMappedMountRoot(path string) string {
+ dirName := filepath.Dir(path)
+ if filepath.Base(dirName) == linkDir {
+ return filepath.Dir(dirName)
+ }
+ return dirName
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go
index fc565ef0ba6..65199008936 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/randomid.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/randomid.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package overlay
@@ -47,13 +48,13 @@ func generateID(l int) string {
if retryOnError(err) && retries < maxretries {
count += n
retries++
- logrus.Errorf("error generating version 4 uuid, retrying: %v", err)
+ logrus.Errorf("Generating version 4 uuid, retrying: %v", err)
continue
}
// Any other errors represent a system problem. What did someone
// do to /dev/urandom?
- panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
+ panic(fmt.Errorf("reading random number generator, retried for %v: %w", totalBackoff.String(), err))
}
break
diff --git a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
index 9fc57b36bf9..2670ef3df9e 100644
--- a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
+++ b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package overlayutils
@@ -6,7 +7,6 @@ import (
"fmt"
graphdriver "github.com/containers/storage/drivers"
- "github.com/pkg/errors"
)
// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type.
@@ -16,5 +16,5 @@ func ErrDTypeNotSupported(driver, backingFs string) error {
msg += " Reformat the filesystem with ftype=1 to enable d_type support."
}
msg += " Running without d_type is not supported."
- return errors.Wrap(graphdriver.ErrNotSupported, msg)
+ return fmt.Errorf("%s: %w", msg, graphdriver.ErrNotSupported)
}
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
index 0609f970c28..ed4c7eaa5de 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
@@ -1,3 +1,4 @@
+//go:build linux && !exclude_disk_quota && cgo
// +build linux,!exclude_disk_quota,cgo
//
@@ -51,7 +52,6 @@ struct fsxattr {
import "C"
import (
"fmt"
- "io/ioutil"
"math"
"os"
"path"
@@ -91,7 +91,7 @@ func generateUniqueProjectID(path string) (uint32, error) {
}
stat, ok := fileinfo.Sys().(*syscall.Stat_t)
if !ok {
- return 0, fmt.Errorf("Not a syscall.Stat_t %s", path)
+ return 0, fmt.Errorf("not a syscall.Stat_t %s", path)
}
projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome)
@@ -122,11 +122,9 @@ func generateUniqueProjectID(path string) (uint32, error) {
// This is a way to prevent xfs_quota management from conflicting with
// containers/storage.
-//
// Then try to create a test directory with the next project id and set a quota
// on it. If that works, continue to scan existing containers to map allocated
// project ids.
-//
func NewControl(basePath string) (*Control, error) {
//
// Get project id of parent dir as minimal id to be used by driver
@@ -234,8 +232,8 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
uintptr(unsafe.Pointer(&d)), 0, 0)
if errno != 0 {
- return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v",
- projectID, backingFsBlockDev, errno.Error())
+ return fmt.Errorf("failed to set quota limit for projid %d on %s: %w",
+ projectID, backingFsBlockDev, errno)
}
return nil
@@ -282,8 +280,8 @@ func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, err
uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
uintptr(unsafe.Pointer(&d)), 0, 0)
if errno != 0 {
- return d, fmt.Errorf("Failed to get quota limit for projid %d on %s: %v",
- projectID, q.backingFsBlockDev, errno.Error())
+ return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w",
+ projectID, q.backingFsBlockDev, errno)
}
return d, nil
@@ -301,7 +299,7 @@ func getProjectID(targetPath string) (uint32, error) {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
- return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error())
+ return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
}
return uint32(fsx.fsx_projid), nil
@@ -319,14 +317,14 @@ func setProjectID(targetPath string, projectID uint32) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
- return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error())
+ return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
}
fsx.fsx_projid = C.__u32(projectID)
fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
- return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error())
+ return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno)
}
return nil
@@ -335,7 +333,7 @@ func setProjectID(targetPath string, projectID uint32) error {
// findNextProjectID - find the next project id to be used for containers
// by scanning driver home directory to find used project ids
func (q *Control) findNextProjectID(home string) error {
- files, err := ioutil.ReadDir(home)
+ files, err := os.ReadDir(home)
if err != nil {
return fmt.Errorf("read directory failed : %s", home)
}
@@ -369,7 +367,7 @@ func openDir(path string) (*C.DIR, error) {
dir := C.opendir(Cpath)
if dir == nil {
- return nil, fmt.Errorf("Can't open dir")
+ return nil, fmt.Errorf("can't open dir %v", Cpath)
}
return dir, nil
}
@@ -394,10 +392,13 @@ func makeBackingFsDev(home string) (string, error) {
}
backingFsBlockDev := path.Join(home, "backingFsBlockDev")
+ backingFsBlockDevTmp := backingFsBlockDev + ".tmp"
// Re-create just in case someone copied the home directory over to a new device
- unix.Unlink(backingFsBlockDev)
- if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil {
- return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err)
+ if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0600, int(stat.Dev)); err != nil {
+ return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
+ }
+ if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil {
+ return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err)
}
return backingFsBlockDev, nil
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
index 7469138db7a..a15e91de26d 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
@@ -1,9 +1,10 @@
+//go:build !linux || exclude_disk_quota || !cgo
// +build !linux exclude_disk_quota !cgo
package quota
import (
- "github.com/pkg/errors"
+ "errors"
)
// Quota limit params - currently we only control blocks hard limit
diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go
index c748468e5cb..4623e7f4648 100644
--- a/vendor/github.com/containers/storage/drivers/register/register_zfs.go
+++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go
@@ -1,4 +1,4 @@
-// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris
+// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris
package register
diff --git a/vendor/github.com/containers/storage/drivers/template.go b/vendor/github.com/containers/storage/drivers/template.go
index 5d80b886599..d40d71cfc1e 100644
--- a/vendor/github.com/containers/storage/drivers/template.go
+++ b/vendor/github.com/containers/storage/drivers/template.go
@@ -31,7 +31,7 @@ func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMa
diff, err := d.Diff(template, templateIDMappings, parent, parentIDMappings, opts.MountLabel)
if err != nil {
if err2 := d.Remove(id); err2 != nil {
- logrus.Errorf("error removing layer %q: %v", id, err2)
+ logrus.Errorf("Removing layer %q: %v", id, err2)
}
return err
}
@@ -44,7 +44,7 @@ func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMa
}
if _, err = d.ApplyDiff(id, parent, applyOptions); err != nil {
if err2 := d.Remove(id); err2 != nil {
- logrus.Errorf("error removing layer %q: %v", id, err2)
+ logrus.Errorf("Removing layer %q: %v", id, err2)
}
return err
}
diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go
index 1b58e2f63e1..b1073d55fe5 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/driver.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go
@@ -5,6 +5,7 @@ import (
"io"
"os"
"path/filepath"
+ "runtime"
"strconv"
"strings"
@@ -170,6 +171,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
}()
rootPerms := defaultPerms
+ if runtime.GOOS == "darwin" {
+ rootPerms = os.FileMode(0700)
+ }
+
if parent != "" {
st, err := system.Stat(d.dir(parent))
if err != nil {
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
index 1491517413c..7def16cd393 100644
--- a/vendor/github.com/containers/storage/drivers/windows/windows.go
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -1,4 +1,5 @@
-//+build windows
+//go:build windows
+// +build windows
package windows
@@ -9,7 +10,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"path"
"path/filepath"
@@ -23,7 +23,7 @@ import (
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/backuptar"
"github.com/Microsoft/hcsshim"
- "github.com/containers/storage/drivers"
+ graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
@@ -103,7 +103,7 @@ func InitFilter(home string, options graphdriver.Options) (graphdriver.Driver, e
}
if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil {
- return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err)
+ return nil, fmt.Errorf("windowsfilter failed to create '%s': %w", home, err)
}
d := &Driver{
@@ -252,7 +252,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt
storageOptions, err := parseStorageOpt(storageOpt)
if err != nil {
- return fmt.Errorf("Failed to parse storage options - %s", err)
+ return fmt.Errorf("failed to parse storage options - %s", err)
}
if storageOptions.size != 0 {
@@ -266,7 +266,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt
if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil {
logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2)
}
- return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err)
+ return fmt.Errorf("cannot create layer with missing parent %s: %s", parent, err)
}
if err := d.setLayerChain(id, layerChain); err != nil {
@@ -474,7 +474,7 @@ func (d *Driver) Put(id string) error {
// We use this opportunity to cleanup any -removing folders which may be
// still left if the daemon was killed while it was removing a layer.
func (d *Driver) Cleanup() error {
- items, err := ioutil.ReadDir(d.info.HomeDir)
+ items, err := os.ReadDir(d.info.HomeDir)
if err != nil {
if os.IsNotExist(err) {
return nil
@@ -810,7 +810,7 @@ func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []
}
if err = cmd.Wait(); err != nil {
- return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output)
+ return 0, fmt.Errorf("re-exec output: %s: error: %w", output, err)
}
return strconv.ParseInt(output.String(), 10, 64)
@@ -869,7 +869,7 @@ func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ..
// resolveID computes the layerID information based on the given id.
func (d *Driver) resolveID(id string) (string, error) {
- content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID"))
+ content, err := os.ReadFile(filepath.Join(d.dir(id), "layerID"))
if os.IsNotExist(err) {
return id, nil
} else if err != nil {
@@ -880,23 +880,23 @@ func (d *Driver) resolveID(id string) (string, error) {
// setID stores the layerId in disk.
func (d *Driver) setID(id, altID string) error {
- return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
+ return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
}
// getLayerChain returns the layer chain information.
func (d *Driver) getLayerChain(id string) ([]string, error) {
jPath := filepath.Join(d.dir(id), "layerchain.json")
- content, err := ioutil.ReadFile(jPath)
+ content, err := os.ReadFile(jPath)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
- return nil, fmt.Errorf("Unable to read layerchain file - %s", err)
+ return nil, fmt.Errorf("unable to read layerchain file - %s", err)
}
var layerChain []string
err = json.Unmarshal(content, &layerChain)
if err != nil {
- return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err)
+ return nil, fmt.Errorf("failed to unmarshall layerchain json - %s", err)
}
return layerChain, nil
@@ -906,13 +906,13 @@ func (d *Driver) getLayerChain(id string) ([]string, error) {
func (d *Driver) setLayerChain(id string, chain []string) error {
content, err := json.Marshal(&chain)
if err != nil {
- return fmt.Errorf("Failed to marshall layerchain json - %s", err)
+ return fmt.Errorf("failed to marshall layerchain json - %s", err)
}
jPath := filepath.Join(d.dir(id), "layerchain.json")
- err = ioutil.WriteFile(jPath, content, 0600)
+ err = os.WriteFile(jPath, content, 0600)
if err != nil {
- return fmt.Errorf("Unable to write layerchain file - %s", err)
+ return fmt.Errorf("unable to write layerchain file - %s", err)
}
return nil
@@ -999,7 +999,7 @@ func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) {
}
options.size = uint64(size)
default:
- return nil, fmt.Errorf("Unknown storage option: %s", key)
+ return nil, fmt.Errorf("unknown storage option: %s", key)
}
}
return &options, nil
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
index e034bf152c9..0d4001783aa 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
@@ -1,3 +1,4 @@
+//go:build linux || freebsd
// +build linux freebsd
package zfs
@@ -17,9 +18,8 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
- "github.com/mistifyio/go-zfs"
+ zfs "github.com/mistifyio/go-zfs/v3"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -54,13 +54,13 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
if _, err := exec.LookPath("zfs"); err != nil {
logger.Debugf("zfs command is not available: %v", err)
- return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available")
+ return nil, fmt.Errorf("the 'zfs' command is not available: %w", graphdriver.ErrPrerequisites)
}
file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600)
if err != nil {
logger.Debugf("cannot open /dev/zfs: %v", err)
- return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err)
+ return nil, fmt.Errorf("could not open /dev/zfs: %v: %w", err, graphdriver.ErrPrerequisites)
}
defer file.Close()
@@ -90,7 +90,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
filesystems, err := zfs.Filesystems(options.fsName)
if err != nil {
- return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err)
+ return nil, fmt.Errorf("cannot find root filesystem %s: %w", options.fsName, err)
}
filesystemsCache := make(map[string]bool, len(filesystems))
@@ -103,15 +103,15 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
}
if rootDataset == nil {
- return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName)
+ return nil, fmt.Errorf("zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName)
}
rootUID, rootGID, err := idtools.GetRootUIDGID(opt.UIDMaps, opt.GIDMaps)
if err != nil {
- return nil, fmt.Errorf("Failed to get root uid/gid: %v", err)
+ return nil, fmt.Errorf("failed to get root uid/gid: %w", err)
}
if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil {
- return nil, fmt.Errorf("Failed to create '%s': %v", base, err)
+ return nil, fmt.Errorf("failed to create '%s': %w", base, err)
}
d := &Driver{
@@ -140,7 +140,7 @@ func parseOptions(opt []string) (zfsOptions, error) {
case "zfs.mountopt":
options.mountOptions = val
default:
- return options, fmt.Errorf("Unknown option %s", key)
+ return options, fmt.Errorf("unknown option %s", key)
}
}
return options, nil
@@ -149,7 +149,7 @@ func parseOptions(opt []string) (zfsOptions, error) {
func lookupZfsDataset(rootdir string) (string, error) {
var stat unix.Stat_t
if err := unix.Stat(rootdir, &stat); err != nil {
- return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err)
+ return "", fmt.Errorf("failed to access '%s': %w", rootdir, err)
}
wantedDev := stat.Dev
@@ -168,7 +168,7 @@ func lookupZfsDataset(rootdir string) (string, error) {
}
}
- return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir)
+ return "", fmt.Errorf("failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir)
}
// Driver holds information about the driver, such as zfs dataset, options and cache.
@@ -315,7 +315,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error {
if opts != nil {
rootUID, rootGID, err = idtools.GetRootUIDGID(opts.UIDs(), opts.GIDs())
if err != nil {
- return fmt.Errorf("Failed to get root uid/gid: %v", err)
+ return fmt.Errorf("failed to get root uid/gid: %w", err)
}
mountLabel = opts.MountLabel
}
@@ -341,22 +341,22 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error {
mountOpts := label.FormatMountLabel(d.options.mountOptions, mountLabel)
if err := mount.Mount(name, mountpoint, "zfs", mountOpts); err != nil {
- return errors.Wrap(err, "error creating zfs mount")
+ return fmt.Errorf("creating zfs mount: %w", err)
}
defer func() {
- if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
- logrus.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err)
+ if err := detachUnmount(mountpoint); err != nil {
+ logrus.Warnf("failed to unmount %s mount %s: %v", id, mountpoint, err)
}
}()
if err := os.Chmod(mountpoint, defaultPerms); err != nil {
- return errors.Wrap(err, "error setting permissions on zfs mount")
+ return fmt.Errorf("setting permissions on zfs mount: %w", err)
}
// this is our first mount after creation of the filesystem, and the root dir may still have root
// permissions instead of the remapped root uid:gid (if user namespaces are enabled):
if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
- return errors.Wrapf(err, "modifying zfs mountpoint (%s) ownership", mountpoint)
+ return fmt.Errorf("modifying zfs mountpoint (%s) ownership: %w", mountpoint, err)
}
}
@@ -377,7 +377,7 @@ func parseStorageOpt(storageOpt map[string]string) (string, error) {
case "size":
return v, nil
default:
- return "0", fmt.Errorf("Unknown option %s", key)
+ return "0", fmt.Errorf("unknown option %s", key)
}
}
return "0", nil
@@ -459,13 +459,13 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr
}
if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil {
- return "", errors.Wrap(err, "error creating zfs mount")
+ return "", fmt.Errorf("creating zfs mount: %w", err)
}
if remountReadOnly {
opts = label.FormatMountLabel("remount,ro", options.MountLabel)
if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil {
- return "", errors.Wrap(err, "error remounting zfs mount read-only")
+ return "", fmt.Errorf("remounting zfs mount read-only: %w", err)
}
}
@@ -483,7 +483,7 @@ func (d *Driver) Put(id string) error {
logger.Debugf(`unmount("%s")`, mountpoint)
- if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
+ if err := detachUnmount(mountpoint); err != nil {
logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err)
}
if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
index bf690515984..c3c73c61e7f 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
@@ -2,10 +2,8 @@ package zfs
import (
"fmt"
- "strings"
- "github.com/containers/storage/drivers"
- "github.com/pkg/errors"
+ graphdriver "github.com/containers/storage/drivers"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -13,27 +11,23 @@ import (
func checkRootdirFs(rootdir string) error {
var buf unix.Statfs_t
if err := unix.Statfs(rootdir, &buf); err != nil {
- return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
+ return fmt.Errorf("failed to access '%s': %s", rootdir, err)
}
// on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ]
if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) {
logrus.WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir)
- return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
+ return fmt.Errorf("no zfs dataset found for rootdir '%s': %w", rootdir, graphdriver.ErrPrerequisites)
}
return nil
}
func getMountpoint(id string) string {
- maxlen := 12
-
- // we need to preserve filesystem suffix
- suffix := strings.SplitN(id, "-", 2)
-
- if len(suffix) > 1 {
- return id[:maxlen] + "-" + suffix[1]
- }
+ return id
+}
- return id[:maxlen]
+func detachUnmount(mountpoint string) error {
+ // FreeBSD's MNT_FORCE is roughly equivalent to MNT_DETACH
+ return unix.Unmount(mountpoint, unix.MNT_FORCE)
}
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
index edcb1da36b7..d43ba5c2b14 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
@@ -1,9 +1,11 @@
package zfs
import (
+ "fmt"
+
graphdriver "github.com/containers/storage/drivers"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
)
func checkRootdirFs(rootDir string) error {
@@ -18,7 +20,7 @@ func checkRootdirFs(rootDir string) error {
if fsMagic != graphdriver.FsMagicZfs {
logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root")
- return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootDir)
+ return fmt.Errorf("no zfs dataset found for rootdir '%s': %w", rootDir, graphdriver.ErrPrerequisites)
}
return nil
@@ -27,3 +29,7 @@ func checkRootdirFs(rootDir string) error {
func getMountpoint(id string) string {
return id
}
+
+func detachUnmount(mountpoint string) error {
+ return unix.Unmount(mountpoint, unix.MNT_DETACH)
+}
diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go
index 5fc810b89d4..de6e377541c 100644
--- a/vendor/github.com/containers/storage/errors.go
+++ b/vendor/github.com/containers/storage/errors.go
@@ -1,6 +1,8 @@
package storage
import (
+ "errors"
+
"github.com/containers/storage/types"
)
@@ -55,4 +57,9 @@ var (
ErrStoreIsReadOnly = types.ErrStoreIsReadOnly
// ErrNotSupported is returned when the requested functionality is not supported.
ErrNotSupported = types.ErrNotSupported
+ // ErrInvalidMappings is returned when the specified mappings are invalid.
+ ErrInvalidMappings = types.ErrInvalidMappings
+ // ErrInvalidNameOperation is returned when updateName is called with invalid operation.
+ // Internal error
+ errInvalidUpdateNameOperation = errors.New("invalid update name operation")
)
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 9114691827b..1ab01ab3364 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -1,38 +1,33 @@
-go 1.14
+go 1.16
module github.com/containers/storage
require (
- github.com/BurntSushi/toml v0.4.1
- github.com/Microsoft/go-winio v0.5.0
- github.com/Microsoft/hcsshim v0.8.22
- github.com/containerd/stargz-snapshotter/estargz v0.8.0
- github.com/docker/go-units v0.4.0
- github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
+ github.com/BurntSushi/toml v1.2.0
+ github.com/Microsoft/go-winio v0.5.2
+ github.com/Microsoft/hcsshim v0.9.4
+ github.com/containerd/stargz-snapshotter/estargz v0.12.0
+ github.com/cyphar/filepath-securejoin v0.2.3
+ github.com/docker/go-units v0.5.0
github.com/google/go-intervals v0.0.2
- github.com/google/uuid v1.2.0 // indirect
github.com/hashicorp/go-multierror v1.1.1
- github.com/json-iterator/go v1.1.11
- github.com/klauspost/compress v1.13.5
+ github.com/json-iterator/go v1.1.12
+ github.com/klauspost/compress v1.15.9
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.12
- github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
- github.com/moby/sys/mountinfo v0.4.1
- github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.1 // indirect
+ github.com/mistifyio/go-zfs/v3 v3.0.0
+ github.com/moby/sys/mountinfo v0.6.2
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/runc v1.0.2
+ github.com/opencontainers/runc v1.1.4
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
- github.com/opencontainers/selinux v1.8.5
- github.com/pkg/errors v0.9.1
- github.com/sirupsen/logrus v1.8.1
- github.com/stretchr/testify v1.7.0
+ github.com/opencontainers/selinux v1.10.1
+ github.com/sirupsen/logrus v1.9.0
+ github.com/stretchr/testify v1.8.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
github.com/tchap/go-patricia v2.3.0+incompatible
github.com/ulikunitz/xz v0.5.10
github.com/vbatts/tar-split v0.11.2
- golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
- golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55
- gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
+ golang.org/x/net v0.0.0-20210825183410-e898025ed96a
+ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8
gotest.tools v2.2.0+incompatible
)
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index 5cc5da6d3ef..2ceb26c5763 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -1,85 +1,334 @@
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
+github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
-github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
+github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
+github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU=
-github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/hcsshim v0.8.22 h1:CulZ3GW8sNJExknToo+RWD+U+6ZM5kkNfuxywSDPd08=
-github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0=
+github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
+github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
+github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
+github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
+github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
+github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
+github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
+github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I=
+github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
+github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
+github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
+github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
+github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
+github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
+github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
+github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
+github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
+github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
+github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
+github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
+github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
+github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ=
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
-github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
+github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
+github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
+github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
+github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
+github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
+github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
+github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
+github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
+github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
+github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
+github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
+github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
+github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/stargz-snapshotter/estargz v0.8.0 h1:oA1wx8kTFfImfsT5bScbrZd8gK+WtQnn15q82Djvm0Y=
-github.com/containerd/stargz-snapshotter/estargz v0.8.0/go.mod h1:mwIwuwb+D8FX2t45Trwi0hmWmZm5VW7zPP/rekwhWQU=
+github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
+github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
+github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
+github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
+github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
+github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
+github.com/containerd/stargz-snapshotter/estargz v0.12.0 h1:idtwRTLjk2erqiYhPWy2L844By8NRFYEwYHcXhoIWPM=
+github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M=
+github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
+github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
+github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
+github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
+github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
+github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
+github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
+github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
+github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
+github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
+github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
+github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI=
+github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
+github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
+github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
+github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
+github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
+github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
+github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
+github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
+github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
+github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
+github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@@ -87,243 +336,656 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
+github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4=
-github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
+github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
+github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/mistifyio/go-zfs/v3 v3.0.0 h1:J5QK618xRcXnQYZ2GE5FdmpS1ufIrWue+lR/mpe6/14=
+github.com/mistifyio/go-zfs/v3 v3.0.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM=
+github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
+github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
+github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
+github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
+github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
+github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
+github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
+github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg=
+github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
+github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg=
+github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
+github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc=
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
+github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
-github.com/opencontainers/selinux v1.8.5 h1:OkT6bMHOQ1JQQO4ihjQ49sj0+wciDcjziSVTRn8VeTA=
-github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo=
+github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
+github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w=
+github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
+github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
+github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
+github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
+github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
+github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw=
+golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55 h1:rw6UNGRMfarCepjI8qOepea/SXwIBVfTKjztZ5gBbq4=
-golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -332,22 +994,89 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
+k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
+k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
+k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
+k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
+k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
+k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
+k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
+k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
+k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
+k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
+k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
+k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
+k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
+k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
+k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
+k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
+k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
+k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/vendor/github.com/containers/storage/idset.go b/vendor/github.com/containers/storage/idset.go
index f870b9ceed3..9567fe90c62 100644
--- a/vendor/github.com/containers/storage/idset.go
+++ b/vendor/github.com/containers/storage/idset.go
@@ -1,9 +1,12 @@
package storage
import (
+ "errors"
+ "fmt"
+ "strings"
+
"github.com/containers/storage/pkg/idtools"
"github.com/google/go-intervals/intervalset"
- "github.com/pkg/errors"
)
// idSet represents a set of integer IDs. It is stored as an ordered set of intervals.
@@ -218,3 +221,45 @@ func maxInt(a, b int) int {
}
return a
}
+
+func hasOverlappingRanges(mappings []idtools.IDMap) error {
+ hostIntervals := intervalset.Empty()
+ containerIntervals := intervalset.Empty()
+
+ var conflicts []string
+
+ for _, m := range mappings {
+ c := interval{start: m.ContainerID, end: m.ContainerID + m.Size}
+ h := interval{start: m.HostID, end: m.HostID + m.Size}
+
+ added := false
+ overlaps := false
+
+ containerIntervals.IntervalsBetween(c, func(x intervalset.Interval) bool {
+ overlaps = true
+ return false
+ })
+ if overlaps {
+ conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size))
+ added = true
+ }
+ containerIntervals.Add(intervalset.NewSet([]intervalset.Interval{c}))
+
+ hostIntervals.IntervalsBetween(h, func(x intervalset.Interval) bool {
+ overlaps = true
+ return false
+ })
+ if overlaps && !added {
+ conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size))
+ }
+ hostIntervals.Add(intervalset.NewSet([]intervalset.Interval{h}))
+ }
+
+ if conflicts != nil {
+ if len(conflicts) == 1 {
+ return fmt.Errorf("the specified UID and/or GID mapping %s conflicts with other mappings: %w", conflicts[0], ErrInvalidMappings)
+ }
+ return fmt.Errorf("the specified UID and/or GID mappings %s conflict with other mappings: %w", strings.Join(conflicts, ", "), ErrInvalidMappings)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go
index bca25a65b8c..c76a6c9f952 100644
--- a/vendor/github.com/containers/storage/images.go
+++ b/vendor/github.com/containers/storage/images.go
@@ -1,7 +1,8 @@
package storage
import (
- "io/ioutil"
+ "errors"
+ "fmt"
"os"
"path/filepath"
"strings"
@@ -13,7 +14,6 @@ import (
"github.com/containers/storage/pkg/stringutils"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
)
const (
@@ -136,8 +136,19 @@ type ImageStore interface {
// SetNames replaces the list of names associated with an image with the
// supplied values. The values are expected to be valid normalized
// named image references.
+ // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
SetNames(id string, names []string) error
+ // AddNames adds the supplied values to the list of names associated with the image with
+ // the specified id. The values are expected to be valid normalized
+ // named image references.
+ AddNames(id string, names []string) error
+
+ // RemoveNames removes the supplied values from the list of names associated with the image with
+ // the specified id. The values are expected to be valid normalized
+ // named image references.
+ RemoveNames(id string, names []string) error
+
// Delete removes the record of the image.
Delete(id string) error
@@ -221,7 +232,7 @@ func (i *Image) recomputeDigests() error {
digests := make(map[digest.Digest]struct{})
if i.Digest != "" {
if err := i.Digest.Validate(); err != nil {
- return errors.Wrapf(err, "error validating image digest %q", string(i.Digest))
+ return fmt.Errorf("validating image digest %q: %w", string(i.Digest), err)
}
digests[i.Digest] = struct{}{}
validDigests = append(validDigests, i.Digest)
@@ -230,8 +241,8 @@ func (i *Image) recomputeDigests() error {
if !bigDataNameIsManifest(name) {
continue
}
- if digest.Validate() != nil {
- return errors.Wrapf(digest.Validate(), "error validating digest %q for big data item %q", string(digest), name)
+ if err := digest.Validate(); err != nil {
+ return fmt.Errorf("validating digest %q for big data item %q: %w", string(digest), name, err)
}
// Deduplicate the digest values.
if _, known := digests[digest]; !known {
@@ -249,7 +260,7 @@ func (i *Image) recomputeDigests() error {
func (r *imageStore) Load() error {
shouldSave := false
rpath := r.imagespath()
- data, err := ioutil.ReadFile(rpath)
+ data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -272,7 +283,7 @@ func (r *imageStore) Load() error {
// Compute the digest list.
err = image.recomputeDigests()
if err != nil {
- return errors.Wrapf(err, "error computing digests for image with ID %q (%v)", image.ID, image.Names)
+ return fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err)
}
for _, name := range image.Names {
names[name] = image
@@ -300,7 +311,7 @@ func (r *imageStore) Load() error {
func (r *imageStore) Save() error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath())
+ return fmt.Errorf("not allowed to modify the image store at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
if !r.Locked() {
return errors.New("image store is not locked for writing")
@@ -376,11 +387,11 @@ func (r *imageStore) lookup(id string) (*Image, bool) {
func (r *imageStore) ClearFlag(id string, flag string) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on images at %q", r.imagespath())
+ return fmt.Errorf("not allowed to clear flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
image, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
delete(image.Flags, flag)
return r.Save()
@@ -388,11 +399,11 @@ func (r *imageStore) ClearFlag(id string, flag string) error {
func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on images at %q", r.imagespath())
+ return fmt.Errorf("not allowed to set flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
image, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
if image.Flags == nil {
image.Flags = make(map[string]interface{})
@@ -403,7 +414,7 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) {
if !r.IsReadWrite() {
- return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath())
+ return nil, fmt.Errorf("not allowed to create new images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
if id == "" {
id = stringid.GenerateRandomID()
@@ -414,48 +425,47 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
}
}
if _, idInUse := r.byid[id]; idInUse {
- return nil, errors.Wrapf(ErrDuplicateID, "an image with ID %q already exists", id)
+ return nil, fmt.Errorf("an image with ID %q already exists: %w", id, ErrDuplicateID)
}
names = dedupeNames(names)
for _, name := range names {
if image, nameInUse := r.byname[name]; nameInUse {
- return nil, errors.Wrapf(ErrDuplicateName, "image name %q is already associated with image %q", name, image.ID)
+ return nil, fmt.Errorf("image name %q is already associated with image %q: %w", name, image.ID, ErrDuplicateName)
}
}
if created.IsZero() {
created = time.Now().UTC()
}
- if err == nil {
- image = &Image{
- ID: id,
- Digest: searchableDigest,
- Digests: nil,
- Names: names,
- TopLayer: layer,
- Metadata: metadata,
- BigDataNames: []string{},
- BigDataSizes: make(map[string]int64),
- BigDataDigests: make(map[string]digest.Digest),
- Created: created,
- Flags: make(map[string]interface{}),
- }
- err := image.recomputeDigests()
- if err != nil {
- return nil, errors.Wrapf(err, "error validating digests for new image")
- }
- r.images = append(r.images, image)
- r.idindex.Add(id)
- r.byid[id] = image
- for _, name := range names {
- r.byname[name] = image
- }
- for _, digest := range image.Digests {
- list := r.bydigest[digest]
- r.bydigest[digest] = append(list, image)
- }
- err = r.Save()
- image = copyImage(image)
+
+ image = &Image{
+ ID: id,
+ Digest: searchableDigest,
+ Digests: nil,
+ Names: names,
+ TopLayer: layer,
+ Metadata: metadata,
+ BigDataNames: []string{},
+ BigDataSizes: make(map[string]int64),
+ BigDataDigests: make(map[string]digest.Digest),
+ Created: created,
+ Flags: make(map[string]interface{}),
+ }
+ err = image.recomputeDigests()
+ if err != nil {
+ return nil, fmt.Errorf("validating digests for new image: %w", err)
}
+ r.images = append(r.images, image)
+ r.idindex.Add(id)
+ r.byid[id] = image
+ for _, name := range names {
+ r.byname[name] = image
+ }
+ for _, digest := range image.Digests {
+ list := r.bydigest[digest]
+ r.bydigest[digest] = append(list, image)
+ }
+ err = r.Save()
+ image = copyImage(image)
return image, err
}
@@ -464,7 +474,7 @@ func (r *imageStore) addMappedTopLayer(id, layer string) error {
image.MappedTopLayers = append(image.MappedTopLayers, layer)
return r.Save()
}
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (r *imageStore) removeMappedTopLayer(id, layer string) error {
@@ -477,25 +487,25 @@ func (r *imageStore) removeMappedTopLayer(id, layer string) error {
}
return r.Save()
}
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (r *imageStore) Metadata(id string) (string, error) {
if image, ok := r.lookup(id); ok {
return image.Metadata, nil
}
- return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (r *imageStore) SetMetadata(id, metadata string) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify image metadata at %q", r.imagespath())
+ return fmt.Errorf("not allowed to modify image metadata at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
if image, ok := r.lookup(id); ok {
image.Metadata = metadata
return r.Save()
}
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (r *imageStore) removeName(image *Image, name string) {
@@ -506,35 +516,53 @@ func (i *Image) addNameToHistory(name string) {
i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...))
}
+// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
func (r *imageStore) SetNames(id string, names []string) error {
+ return r.updateNames(id, names, setNames)
+}
+
+func (r *imageStore) AddNames(id string, names []string) error {
+ return r.updateNames(id, names, addNames)
+}
+
+func (r *imageStore) RemoveNames(id string, names []string) error {
+ return r.updateNames(id, names, removeNames)
+}
+
+func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath())
+ return fmt.Errorf("not allowed to change image name assignments at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
- names = dedupeNames(names)
- if image, ok := r.lookup(id); ok {
- for _, name := range image.Names {
- delete(r.byname, name)
- }
- for _, name := range names {
- if otherImage, ok := r.byname[name]; ok {
- r.removeName(otherImage, name)
- }
- r.byname[name] = image
- image.addNameToHistory(name)
+ image, ok := r.lookup(id)
+ if !ok {
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
+ }
+ oldNames := image.Names
+ names, err := applyNameOperation(oldNames, names, op)
+ if err != nil {
+ return err
+ }
+ for _, name := range oldNames {
+ delete(r.byname, name)
+ }
+ for _, name := range names {
+ if otherImage, ok := r.byname[name]; ok {
+ r.removeName(otherImage, name)
}
- image.Names = names
- return r.Save()
+ r.byname[name] = image
+ image.addNameToHistory(name)
}
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ image.Names = names
+ return r.Save()
}
func (r *imageStore) Delete(id string) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
+ return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
image, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
id = image.ID
toDeleteIndex := -1
@@ -577,14 +605,14 @@ func (r *imageStore) Get(id string) (*Image, error) {
if image, ok := r.lookup(id); ok {
return copyImage(image), nil
}
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (r *imageStore) Lookup(name string) (id string, err error) {
if image, ok := r.lookup(name); ok {
return image.ID, nil
}
- return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (r *imageStore) Exists(id string) bool {
@@ -596,27 +624,27 @@ func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) {
if images, ok := r.bydigest[d]; ok {
return copyImageSlice(images), nil
}
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with digest %q", d)
+ return nil, fmt.Errorf("locating image with digest %q: %w", d, ErrImageUnknown)
}
func (r *imageStore) BigData(id, key string) ([]byte, error) {
if key == "" {
- return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name")
+ return nil, fmt.Errorf("can't retrieve image big data value for empty name: %w", ErrInvalidBigDataName)
}
image, ok := r.lookup(id)
if !ok {
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
- return ioutil.ReadFile(r.datapath(image.ID, key))
+ return os.ReadFile(r.datapath(image.ID, key))
}
func (r *imageStore) BigDataSize(id, key string) (int64, error) {
if key == "" {
- return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name")
+ return -1, fmt.Errorf("can't retrieve size of image big data with empty name: %w", ErrInvalidBigDataName)
}
image, ok := r.lookup(id)
if !ok {
- return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
if image.BigDataSizes == nil {
image.BigDataSizes = make(map[string]int64)
@@ -632,11 +660,11 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) {
func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
if key == "" {
- return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name")
+ return "", fmt.Errorf("can't retrieve digest of image big data value with empty name: %w", ErrInvalidBigDataName)
}
image, ok := r.lookup(id)
if !ok {
- return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
if image.BigDataDigests == nil {
image.BigDataDigests = make(map[string]digest.Digest)
@@ -650,7 +678,7 @@ func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
func (r *imageStore) BigDataNames(id string) ([]string, error) {
image, ok := r.lookup(id)
if !ok {
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
return copyStringSlice(image.BigDataNames), nil
}
@@ -668,14 +696,14 @@ func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
if key == "" {
- return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item")
+ return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName)
}
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath())
+ return fmt.Errorf("not allowed to save data items associated with images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
image, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
err := os.MkdirAll(r.datadir(image.ID), 0700)
if err != nil {
@@ -684,10 +712,10 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
var newDigest digest.Digest
if bigDataNameIsManifest(key) {
if digestManifest == nil {
- return errors.Wrapf(ErrDigestUnknown, "error digesting manifest: no manifest digest callback provided")
+ return fmt.Errorf("digesting manifest: no manifest digest callback provided: %w", ErrDigestUnknown)
}
if newDigest, err = digestManifest(data); err != nil {
- return errors.Wrapf(err, "error digesting manifest")
+ return fmt.Errorf("digesting manifest: %w", err)
}
} else {
newDigest = digest.Canonical.FromBytes(data)
@@ -731,7 +759,7 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
}
}
if err = image.recomputeDigests(); err != nil {
- return errors.Wrapf(err, "error loading recomputing image digest information for %s", image.ID)
+ return fmt.Errorf("loading recomputing image digest information for %s: %w", image.ID, err)
}
for _, newDigest := range image.Digests {
// add the image to the list of images in the digest-based index which
@@ -752,7 +780,7 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
func (r *imageStore) Wipe() error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
+ return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index 32ba2068540..c23f0b26bba 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -2,9 +2,9 @@ package storage
import (
"bytes"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"os"
"path"
"path/filepath"
@@ -23,10 +23,10 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/tarlog"
"github.com/containers/storage/pkg/truncindex"
+ multierror "github.com/hashicorp/go-multierror"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/asm"
@@ -220,8 +220,17 @@ type LayerStore interface {
// SetNames replaces the list of names associated with a layer with the
// supplied values.
+ // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
SetNames(id string, names []string) error
+ // AddNames adds the supplied values to the list of names associated with the layer with the
+ // specified id.
+ AddNames(id string, names []string) error
+
+ // RemoveNames remove the supplied values from the list of names associated with the layer with the
+ // specified id.
+ RemoveNames(id string, names []string) error
+
// Delete deletes a layer with the specified name or ID.
Delete(id string) error
@@ -334,7 +343,15 @@ func (r *layerStore) layerspath() string {
func (r *layerStore) Load() error {
shouldSave := false
rpath := r.layerspath()
- data, err := ioutil.ReadFile(rpath)
+ info, err := os.Stat(rpath)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ } else {
+ r.layerspathModified = info.ModTime()
+ }
+ data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -398,14 +415,13 @@ func (r *layerStore) Load() error {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
}
- if cleanup, ok := layer.Flags[incompleteFlag]; ok {
- if b, ok := cleanup.(bool); ok && b {
- err = r.deleteInternal(layer.ID)
- if err != nil {
- break
- }
- shouldSave = true
+ if layerHasIncompleteFlag(layer) {
+ logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
+ err = r.deleteInternal(layer.ID)
+ if err != nil {
+ break
}
+ shouldSave = true
}
}
}
@@ -426,7 +442,7 @@ func (r *layerStore) LoadLocked() error {
func (r *layerStore) loadMounts() error {
mounts := make(map[string]*Layer)
mpath := r.mountspath()
- data, err := ioutil.ReadFile(mpath)
+ data, err := os.ReadFile(mpath)
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -472,7 +488,7 @@ func (r *layerStore) Save() error {
func (r *layerStore) saveLayers() error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
+ return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
if !r.Locked() {
return errors.New("layer store is not locked for writing")
@@ -491,7 +507,7 @@ func (r *layerStore) saveLayers() error {
func (r *layerStore) saveMounts() error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
+ return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
if !r.mountsLockfile.Locked() {
return errors.New("layer store mount information is not locked for writing")
@@ -547,6 +563,8 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
uidMap: copyIDMap(s.uidMap),
gidMap: copyIDMap(s.gidMap),
}
+ rlstore.Lock()
+ defer rlstore.Unlock()
if err := rlstore.Load(); err != nil {
return nil, err
}
@@ -568,6 +586,8 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROL
bymount: make(map[string]*Layer),
byname: make(map[string]*Layer),
}
+ rlstore.RLock()
+ defer rlstore.Unlock()
if err := rlstore.Load(); err != nil {
return nil, err
}
@@ -602,7 +622,7 @@ func (r *layerStore) Size(name string) (int64, error) {
func (r *layerStore) ClearFlag(id string, flag string) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath())
+ return fmt.Errorf("not allowed to clear flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
@@ -614,7 +634,7 @@ func (r *layerStore) ClearFlag(id string, flag string) error {
func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath())
+ return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
@@ -674,7 +694,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
}
if layer.UncompressedDigest != "" {
- r.byuncompressedsum[layer.CompressedDigest] = append(r.byuncompressedsum[layer.CompressedDigest], layer.ID)
+ r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
}
if err := r.Save(); err != nil {
r.driver.Remove(id)
@@ -683,11 +703,10 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
return copyLayer(layer), nil
}
-func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) {
+func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) {
if !r.IsReadWrite() {
- return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath())
+ return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
- size = -1
if err := os.MkdirAll(r.rundir, 0700); err != nil {
return nil, -1, err
}
@@ -716,12 +735,32 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
parent = parentLayer.ID
}
var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings
+ var (
+ templateMetadata string
+ templateCompressedDigest digest.Digest
+ templateCompressedSize int64
+ templateUncompressedDigest digest.Digest
+ templateUncompressedSize int64
+ templateCompressionType archive.Compression
+ templateUIDs, templateGIDs []uint32
+ templateTSdata []byte
+ )
if moreOptions.TemplateLayer != "" {
+ var tserr error
templateLayer, ok := r.lookup(moreOptions.TemplateLayer)
if !ok {
return nil, -1, ErrLayerUnknown
}
+ templateMetadata = templateLayer.Metadata
templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap)
+ templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize
+ templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
+ templateCompressionType = templateLayer.CompressionType
+ templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...)
+ templateTSdata, tserr = os.ReadFile(r.tspath(templateLayer.ID))
+ if tserr != nil && !os.IsNotExist(tserr) {
+ return nil, -1, tserr
+ }
} else {
templateIDMappings = &idtools.IDMappings{}
}
@@ -733,6 +772,60 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
if mountLabel != "" {
label.ReserveLabel(mountLabel)
}
+
+ // Before actually creating the layer, make a persistent record of it with incompleteFlag,
+ // so that future processes have a chance to delete it.
+ layer := &Layer{
+ ID: id,
+ Parent: parent,
+ Names: names,
+ MountLabel: mountLabel,
+ Metadata: templateMetadata,
+ Created: time.Now().UTC(),
+ CompressedDigest: templateCompressedDigest,
+ CompressedSize: templateCompressedSize,
+ UncompressedDigest: templateUncompressedDigest,
+ UncompressedSize: templateUncompressedSize,
+ CompressionType: templateCompressionType,
+ UIDs: templateUIDs,
+ GIDs: templateGIDs,
+ Flags: make(map[string]interface{}),
+ UIDMap: copyIDMap(moreOptions.UIDMap),
+ GIDMap: copyIDMap(moreOptions.GIDMap),
+ BigDataNames: []string{},
+ }
+ r.layers = append(r.layers, layer)
+ r.idindex.Add(id)
+ r.byid[id] = layer
+ for _, name := range names {
+ r.byname[name] = layer
+ }
+ for flag, value := range flags {
+ layer.Flags[flag] = value
+ }
+ layer.Flags[incompleteFlag] = true
+
+ succeeded := false
+ cleanupFailureContext := ""
+ defer func() {
+ if !succeeded {
+ // On any error, try both removing the driver's data as well
+ // as the in-memory layer record.
+ if err2 := r.Delete(layer.ID); err2 != nil {
+ if cleanupFailureContext == "" {
+ cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site"
+ }
+ logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, layer.ID, err2)
+ }
+ }
+ }()
+
+ err := r.Save()
+ if err != nil {
+ cleanupFailureContext = "saving incomplete layer metadata"
+ return nil, -1, err
+ }
+
idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap)
opts := drivers.CreateOpts{
MountLabel: mountLabel,
@@ -740,89 +833,67 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
IDMappings: idMappings,
}
if moreOptions.TemplateLayer != "" {
- if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
- if id != "" {
- return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id)
- }
- return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q", moreOptions.TemplateLayer)
+ if err := r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
+ cleanupFailureContext = "creating a layer from template"
+ return nil, -1, fmt.Errorf("creating copy of template layer %q with ID %q: %w", moreOptions.TemplateLayer, id, err)
}
oldMappings = templateIDMappings
} else {
if writeable {
- if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil {
- if id != "" {
- return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id)
- }
- return nil, -1, errors.Wrapf(err, "error creating read-write layer")
+ if err := r.driver.CreateReadWrite(id, parent, &opts); err != nil {
+ cleanupFailureContext = "creating a read-write layer"
+ return nil, -1, fmt.Errorf("creating read-write layer with ID %q: %w", id, err)
}
} else {
- if err = r.driver.Create(id, parent, &opts); err != nil {
- if id != "" {
- return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id)
- }
- return nil, -1, errors.Wrapf(err, "error creating layer")
+ if err := r.driver.Create(id, parent, &opts); err != nil {
+ cleanupFailureContext = "creating a read-only layer"
+ return nil, -1, fmt.Errorf("creating layer with ID %q: %w", id, err)
}
}
oldMappings = parentMappings
}
if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) {
- if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
- // We don't have a record of this layer, but at least
- // try to clean it up underneath us.
- r.driver.Remove(id)
+ if err := r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
+ cleanupFailureContext = "in UpdateLayerIDMap"
return nil, -1, err
}
}
- if err == nil {
- layer = &Layer{
- ID: id,
- Parent: parent,
- Names: names,
- MountLabel: mountLabel,
- Created: time.Now().UTC(),
- Flags: make(map[string]interface{}),
- UIDMap: copyIDMap(moreOptions.UIDMap),
- GIDMap: copyIDMap(moreOptions.GIDMap),
- BigDataNames: []string{},
- }
- r.layers = append(r.layers, layer)
- r.idindex.Add(id)
- r.byid[id] = layer
- for _, name := range names {
- r.byname[name] = layer
- }
- for flag, value := range flags {
- layer.Flags[flag] = value
- }
- if diff != nil {
- layer.Flags[incompleteFlag] = true
- err = r.Save()
- if err != nil {
- // We don't have a record of this layer, but at least
- // try to clean it up underneath us.
- r.driver.Remove(id)
- return nil, -1, err
- }
- size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff)
- if err != nil {
- if r.Delete(layer.ID) != nil {
- // Either a driver error or an error saving.
- // We now have a layer that's been marked for
- // deletion but which we failed to remove.
- }
- return nil, -1, err
- }
- delete(layer.Flags, incompleteFlag)
+ if len(templateTSdata) > 0 {
+ if err := os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil {
+ cleanupFailureContext = "creating tar-split parent directory for a copy from template"
+ return nil, -1, err
+ }
+ if err := ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil {
+ cleanupFailureContext = "creating a tar-split copy from template"
+ return nil, -1, err
}
- err = r.Save()
+ }
+
+ var size int64 = -1
+ if diff != nil {
+ size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff)
if err != nil {
- // We don't have a record of this layer, but at least
- // try to clean it up underneath us.
- r.driver.Remove(id)
+ cleanupFailureContext = "applying layer diff"
return nil, -1, err
}
- layer = copyLayer(layer)
+ } else {
+ // applyDiffWithOptions in the `diff != nil` case handles this bit for us
+ if layer.CompressedDigest != "" {
+ r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
+ }
+ if layer.UncompressedDigest != "" {
+ r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
+ }
+ }
+ delete(layer.Flags, incompleteFlag)
+ err = r.Save()
+ if err != nil {
+ cleanupFailureContext = "saving finished layer metadata"
+ return nil, -1, err
}
+
+ layer = copyLayer(layer)
+ succeeded = true
return layer, size, err
}
@@ -837,7 +908,7 @@ func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel
func (r *layerStore) Mounted(id string) (int, error) {
if !r.IsReadWrite() {
- return 0, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
+ return 0, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
@@ -854,7 +925,6 @@ func (r *layerStore) Mounted(id string) (int, error) {
}
func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) {
-
// check whether options include ro option
hasReadOnlyOpt := func(opts []string) bool {
for _, item := range opts {
@@ -868,7 +938,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
// You are not allowed to mount layers from readonly stores if they
// are not mounted read/only.
if !r.IsReadWrite() && !hasReadOnlyOpt(options.Options) {
- return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
+ return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
@@ -919,7 +989,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
func (r *layerStore) Unmount(id string, force bool) (bool, error) {
if !r.IsReadWrite() {
- return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
+ return false, fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
@@ -958,7 +1028,7 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
if !r.IsReadWrite() {
- return nil, nil, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
+ return nil, nil, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
@@ -981,7 +1051,7 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
}
rootuid, rootgid, err := idtools.GetRootUIDGID(layer.UIDMap, layer.GIDMap)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error reading root ID values for layer %q", layer.ID)
+ return nil, nil, fmt.Errorf("reading root ID values for layer %q: %w", layer.ID, err)
}
m := idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap)
fsuids := make(map[int]struct{})
@@ -989,7 +1059,7 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
for dir := filepath.Dir(layer.MountPoint); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) {
st, err := system.Stat(dir)
if err != nil {
- return nil, nil, errors.Wrap(err, "read directory ownership")
+ return nil, nil, fmt.Errorf("read directory ownership: %w", err)
}
lst, err := system.Lstat(dir)
if err != nil {
@@ -1031,25 +1101,43 @@ func (r *layerStore) removeName(layer *Layer, name string) {
layer.Names = stringSliceWithoutValue(layer.Names, name)
}
+// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
func (r *layerStore) SetNames(id string, names []string) error {
+ return r.updateNames(id, names, setNames)
+}
+
+func (r *layerStore) AddNames(id string, names []string) error {
+ return r.updateNames(id, names, addNames)
+}
+
+func (r *layerStore) RemoveNames(id string, names []string) error {
+ return r.updateNames(id, names, removeNames)
+}
+
+func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath())
+ return fmt.Errorf("not allowed to change layer name assignments at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
- names = dedupeNames(names)
- if layer, ok := r.lookup(id); ok {
- for _, name := range layer.Names {
- delete(r.byname, name)
- }
- for _, name := range names {
- if otherLayer, ok := r.byname[name]; ok {
- r.removeName(otherLayer, name)
- }
- r.byname[name] = layer
+ layer, ok := r.lookup(id)
+ if !ok {
+ return ErrLayerUnknown
+ }
+ oldNames := layer.Names
+ names, err := applyNameOperation(oldNames, names, op)
+ if err != nil {
+ return err
+ }
+ for _, name := range oldNames {
+ delete(r.byname, name)
+ }
+ for _, name := range names {
+ if otherLayer, ok := r.byname[name]; ok {
+ r.removeName(otherLayer, name)
}
- layer.Names = names
- return r.Save()
+ r.byname[name] = layer
}
- return ErrLayerUnknown
+ layer.Names = names
+ return r.Save()
}
func (r *layerStore) datadir(id string) string {
@@ -1062,25 +1150,25 @@ func (r *layerStore) datapath(id, key string) string {
func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) {
if key == "" {
- return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve layer big data value for empty name")
+ return nil, fmt.Errorf("can't retrieve layer big data value for empty name: %w", ErrInvalidBigDataName)
}
layer, ok := r.lookup(id)
if !ok {
- return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id)
+ return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown)
}
return os.Open(r.datapath(layer.ID, key))
}
func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
if key == "" {
- return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for layer big data item")
+ return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
}
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with layers at %q", r.layerspath())
+ return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
- return errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q to write bigdata", id)
+ return fmt.Errorf("locating layer with ID %q to write bigdata: %w", id, ErrLayerUnknown)
}
err := os.MkdirAll(r.datadir(layer.ID), 0700)
if err != nil {
@@ -1092,16 +1180,16 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
// so that it is either accessing the old data or the new one.
writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0600)
if err != nil {
- return errors.Wrapf(err, "error opening bigdata file")
+ return fmt.Errorf("opening bigdata file: %w", err)
}
if _, err := io.Copy(writer, data); err != nil {
writer.Close()
- return errors.Wrapf(err, "error copying bigdata for the layer")
+ return fmt.Errorf("copying bigdata for the layer: %w", err)
}
if err := writer.Close(); err != nil {
- return errors.Wrapf(err, "error closing bigdata file for the layer")
+ return fmt.Errorf("closing bigdata file for the layer: %w", err)
}
addName := true
@@ -1121,7 +1209,7 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
func (r *layerStore) BigDataNames(id string) ([]string, error) {
layer, ok := r.lookup(id)
if !ok {
- return nil, errors.Wrapf(ErrImageUnknown, "error locating layer with ID %q to retrieve bigdata names", id)
+ return nil, fmt.Errorf("locating layer with ID %q to retrieve bigdata names: %w", id, ErrImageUnknown)
}
return copyStringSlice(layer.BigDataNames), nil
}
@@ -1135,7 +1223,7 @@ func (r *layerStore) Metadata(id string) (string, error) {
func (r *layerStore) SetMetadata(id, metadata string) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath())
+ return fmt.Errorf("not allowed to modify layer metadata at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
if layer, ok := r.lookup(id); ok {
layer.Metadata = metadata
@@ -1148,14 +1236,37 @@ func (r *layerStore) tspath(id string) string {
return filepath.Join(r.layerdir, id+tarSplitSuffix)
}
+// layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true
+func layerHasIncompleteFlag(layer *Layer) bool {
+ // layer.Flags[…] is defined to succeed and return ok == false if Flags == nil
+ if flagValue, ok := layer.Flags[incompleteFlag]; ok {
+ if b, ok := flagValue.(bool); ok && b {
+ return true
+ }
+ }
+ return false
+}
+
func (r *layerStore) deleteInternal(id string) error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
+ return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
return ErrLayerUnknown
}
+ // Ensure that if we are interrupted, the layer will be cleaned up.
+ if !layerHasIncompleteFlag(layer) {
+ if layer.Flags == nil {
+ layer.Flags = make(map[string]interface{})
+ }
+ layer.Flags[incompleteFlag] = true
+ if err := r.Save(); err != nil {
+ return err
+ }
+ }
+ // We never unset incompleteFlag; below, we remove the entire object from r.layers.
+
id = layer.ID
err := r.driver.Remove(id)
if err != nil {
@@ -1237,7 +1348,7 @@ func (r *layerStore) Delete(id string) error {
// driver level.
mountCount, err := r.Mounted(id)
if err != nil {
- return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
+ return fmt.Errorf("checking if layer %q is still mounted: %w", id, err)
}
for mountCount > 0 {
if _, err := r.Unmount(id, false); err != nil {
@@ -1245,7 +1356,7 @@ func (r *layerStore) Delete(id string) error {
}
mountCount, err = r.Mounted(id)
if err != nil {
- return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
+ return fmt.Errorf("checking if layer %q is still mounted: %w", id, err)
}
}
if err := r.deleteInternal(id); err != nil {
@@ -1275,12 +1386,15 @@ func (r *layerStore) Get(id string) (*Layer, error) {
func (r *layerStore) Wipe() error {
if !r.IsReadWrite() {
- return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
+ return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
ids = append(ids, id)
}
+ sort.Slice(ids, func(i, j int) bool {
+ return r.byid[ids[i]].Created.After(r.byid[ids[j]].Created)
+ })
for _, id := range ids {
if err := r.Delete(id); err != nil {
return err
@@ -1430,7 +1544,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
diff, err := archive.DecompressStream(blob)
if err != nil {
if err2 := blob.Close(); err2 != nil {
- err = errors.Wrapf(err, "failed to close blob file: %v", err2)
+ err = fmt.Errorf("failed to close blob file: %v: %w", err2, err)
}
aLayer.Release()
return nil, err
@@ -1438,7 +1552,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
rc, err := maybeCompressReadCloser(diff)
if err != nil {
if err2 := closeAll(blob.Close, diff.Close); err2 != nil {
- err = errors.Wrapf(err, "failed to cleanup: %v", err2)
+ err = fmt.Errorf("failed to cleanup: %v: %w", err2, err)
}
aLayer.Release()
return nil, err
@@ -1463,34 +1577,48 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
}
return maybeCompressReadCloser(diff)
}
- defer tsfile.Close()
decompressor, err := pgzip.NewReader(tsfile)
if err != nil {
- return nil, err
- }
- defer decompressor.Close()
-
- tsbytes, err := ioutil.ReadAll(decompressor)
- if err != nil {
+ if e := tsfile.Close(); e != nil {
+ logrus.Debug(e)
+ }
return nil, err
}
- metadata = storage.NewJSONUnpacker(bytes.NewBuffer(tsbytes))
+ metadata = storage.NewJSONUnpacker(decompressor)
fgetter, err := r.newFileGetter(to)
if err != nil {
- return nil, err
+ errs := multierror.Append(nil, fmt.Errorf("creating file-getter: %w", err))
+ if err := decompressor.Close(); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err))
+ }
+ if err := tsfile.Close(); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err))
+ }
+ return nil, errs.ErrorOrNil()
}
tarstream := asm.NewOutputTarStream(fgetter, metadata)
rc := ioutils.NewReadCloserWrapper(tarstream, func() error {
- err1 := tarstream.Close()
- err2 := fgetter.Close()
- if err2 == nil {
- return err1
+ var errs *multierror.Error
+ if err := decompressor.Close(); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err))
+ }
+ if err := tsfile.Close(); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err))
+ }
+ if err := tarstream.Close(); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("closing reconstructed tarstream: %w", err))
}
- return err2
+ if err := fgetter.Close(); err != nil {
+ errs = multierror.Append(errs, fmt.Errorf("closing file-getter: %w", err))
+ }
+ if errs != nil {
+ return errs.ErrorOrNil()
+ }
+ return nil
})
return maybeCompressReadCloser(rc)
}
@@ -1510,7 +1638,7 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) {
if !r.IsReadWrite() {
- return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath())
+ return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
}
layer, ok := r.lookup(to)
@@ -1546,7 +1674,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
if compressedDigester != nil {
compressedWriter = compressedDigester.Hash()
} else {
- compressedWriter = ioutil.Discard
+ compressedWriter = io.Discard
}
compressedCounter := ioutils.NewWriteCounter(compressedWriter)
defragmented = io.TeeReader(defragmented, compressedCounter)
@@ -1557,7 +1685,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
compressor = pgzip.NewWriter(&tsdata)
}
if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that
- logrus.Infof("error setting compression concurrency threads to 1: %v; ignoring", err)
+ logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err)
}
metadata := storage.NewJSONPacker(compressor)
uncompressed, err := archive.DecompressStream(defragmented)
@@ -1806,11 +1934,10 @@ func (r *layerStore) Modified() (bool, error) {
// reload the storage in any case.
info, err := os.Stat(r.layerspath())
if err != nil && !os.IsNotExist(err) {
- return false, errors.Wrap(err, "stat layers file")
+ return false, fmt.Errorf("stat layers file: %w", err)
}
if info != nil {
tmodified = info.ModTime() != r.layerspathModified
- r.layerspathModified = info.ModTime()
}
return tmodified, nil
@@ -1843,10 +1970,10 @@ func closeAll(closes ...func() error) (rErr error) {
for _, f := range closes {
if err := f(); err != nil {
if rErr == nil {
- rErr = errors.Wrapf(err, "close error")
+ rErr = fmt.Errorf("close error: %w", err)
continue
}
- rErr = errors.Wrapf(rErr, "%v", err)
+ rErr = fmt.Errorf("%v: %w", err, rErr)
}
}
return
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 48e846f7ce3..82c0adeb7d0 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -5,12 +5,14 @@ import (
"bufio"
"bytes"
"compress/bzip2"
+ "errors"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"os"
"path/filepath"
"runtime"
+ "strconv"
"strings"
"sync"
"syscall"
@@ -22,8 +24,6 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
gzip "github.com/klauspost/pgzip"
- "github.com/opencontainers/runc/libcontainer/userns"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/ulikunitz/xz"
)
@@ -71,12 +71,17 @@ type (
)
const (
- tarExt = "tar"
- solaris = "solaris"
- windows = "windows"
- containersOverrideXattr = "user.containers.override_stat"
+ tarExt = "tar"
+ solaris = "solaris"
+ windows = "windows"
+ darwin = "darwin"
+ freebsd = "freebsd"
)
+var xattrsToIgnore = map[string]interface{}{
+ "security.selinux": true,
+}
+
// Archiver allows the reuse of most utility functions of this package with a
// pluggable Untar function. To facilitate the passing of specific id mappings
// for untar, an archiver can be created with maps which will then be passed to
@@ -214,7 +219,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
case Zstd:
return zstdReader(buf)
default:
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
}
}
@@ -235,9 +240,9 @@ func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, er
case Bzip2, Xz:
// archive/bzip2 does not support writing, and there is no xz support at all
// However, this is not a problem as docker only currently generates gzipped tars
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
default:
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
}
}
@@ -357,7 +362,7 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro
hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
name, err = canonicalTarName(name, fi.IsDir())
if err != nil {
- return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err)
+ return nil, fmt.Errorf("tar: cannot canonicalize path: %w", err)
}
hdr.Name = name
if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
@@ -400,7 +405,7 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
for _, xattr := range []string{"security.capability", "security.ima"} {
capability, err := system.Lgetxattr(path, xattr)
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
- return errors.Wrapf(err, "failed to read %q attribute from %q", xattr, path)
+ return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err)
}
if capability != nil {
hdr.Xattrs[xattr] = string(capability)
@@ -478,7 +483,7 @@ func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *
}
// canonicalTarName provides a platform-independent and consistent posix-style
-//path for files and directories to be archived regardless of the platform.
+// path for files and directories to be archived regardless of the platform.
func canonicalTarName(name string, isDir bool) (string, error) {
name, err := CanonicalTarNameForPath(name)
if err != nil {
@@ -507,6 +512,10 @@ func (ta *tarAppender) addTarFile(path, name string) error {
return err
}
}
+ if fi.Mode()&os.ModeSocket != 0 {
+ logrus.Warnf("archive: skipping %q since it is a socket", path)
+ return nil
+ }
hdr, err := FileInfoHeader(name, fi, link)
if err != nil {
@@ -663,7 +672,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
if !strings.HasPrefix(targetPath, extractDir) {
return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
}
- if err := os.Link(targetPath, path); err != nil {
+ if err := handleLLink(targetPath, path); err != nil {
return err
}
@@ -689,9 +698,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
}
- if forceMask != nil && hdr.Typeflag != tar.TypeSymlink {
+ if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") {
value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&07777)
- if err := system.Lsetxattr(path, containersOverrideXattr, []byte(value), 0); err != nil {
+ if err := system.Lsetxattr(path, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
return err
}
}
@@ -743,6 +752,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
var errs []string
for key, value := range hdr.Xattrs {
+ if _, found := xattrsToIgnore[key]; found {
+ continue
+ }
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) {
// We ignore errors here because not all graphdrivers support
@@ -852,14 +864,14 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
rebaseName := options.RebaseNames[include]
walkRoot := getWalkRoot(srcPath, include)
- filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
+ filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error {
if err != nil {
logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
return nil
}
relFilePath, err := filepath.Rel(srcPath, filePath)
- if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
+ if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) {
// Error getting relative path OR we are looking
// at the source directory path. Skip in both situations.
return nil
@@ -879,7 +891,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
if include != relFilePath {
matches, err := pm.IsMatch(relFilePath)
if err != nil {
- logrus.Errorf("Error matching %s: %v", relFilePath, err)
+ logrus.Errorf("Matching %s: %v", relFilePath, err)
return err
}
skip = matches
@@ -892,7 +904,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
// dir. If so then we can't skip this dir.
// Its not a dir then so we can just return/skip.
- if !f.IsDir() {
+ if !d.IsDir() {
return nil
}
@@ -962,11 +974,14 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
whiteoutConverter := GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
buffer := make([]byte, 1<<20)
+ doChown := !options.NoLchown
if options.ForceMask != nil {
+ // if ForceMask is in place, make sure lchown is disabled.
+ doChown = false
uid, gid, mode, err := GetFileOwner(dest)
if err == nil {
value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
- if err := system.Lsetxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil {
+ if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
return err
}
}
@@ -1067,7 +1082,7 @@ loop:
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
}
- if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
+ if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
return err
}
@@ -1091,7 +1106,9 @@ loop:
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
-// identity (uncompressed), gzip, bzip2, xz.
+//
+// identity (uncompressed), gzip, bzip2, xz.
+//
// FIXME: specify behavior when target path exists vs. doesn't exist.
func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
return untarHandler(tarArchive, dest, options, true)
@@ -1107,7 +1124,7 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) e
// Handler for teasing out the automatic decompression
func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
if tarArchive == nil {
- return fmt.Errorf("Empty archive")
+ return fmt.Errorf("empty archive")
}
dest = filepath.Clean(dest)
if options == nil {
@@ -1143,7 +1160,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
GIDMaps: tarMappings.GIDs(),
Compression: Uncompressed,
CopyPass: true,
- InUserNS: userns.RunningInUserNS(),
+ InUserNS: unshare.IsRootless(),
}
archive, err := TarWithOptions(src, options)
if err != nil {
@@ -1158,7 +1175,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
UIDMaps: untarMappings.UIDs(),
GIDMaps: untarMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
- InUserNS: userns.RunningInUserNS(),
+ InUserNS: unshare.IsRootless(),
}
return archiver.Untar(archive, dst, options)
}
@@ -1178,7 +1195,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
UIDMaps: untarMappings.UIDs(),
GIDMaps: untarMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
- InUserNS: userns.RunningInUserNS(),
+ InUserNS: unshare.IsRootless(),
}
return archiver.Untar(archive, dst, options)
}
@@ -1223,7 +1240,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
}
if srcSt.IsDir() {
- return fmt.Errorf("Can't copy a directory")
+ return fmt.Errorf("can't copy a directory")
}
// Clean up the trailing slash. This must be done in an operating
@@ -1278,7 +1295,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
UIDMaps: archiver.UntarIDMappings.UIDs(),
GIDMaps: archiver.UntarIDMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
- InUserNS: userns.RunningInUserNS(),
+ InUserNS: unshare.IsRootless(),
NoOverwriteDirNonDir: true,
}
err = archiver.Untar(r, filepath.Dir(dst), options)
@@ -1298,6 +1315,21 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
if err != nil {
return err
}
+ } else if runtime.GOOS == darwin {
+ uid, gid = hdr.Uid, hdr.Gid
+ if xstat, ok := hdr.Xattrs[idtools.ContainersOverrideXattr]; ok {
+ attrs := strings.Split(string(xstat), ":")
+ if len(attrs) == 3 {
+ val, err := strconv.ParseUint(attrs[0], 10, 32)
+ if err != nil {
+ uid = int(val)
+ }
+ val, err = strconv.ParseUint(attrs[1], 10, 32)
+ if err != nil {
+ gid = int(val)
+ }
+ }
+ }
} else {
uid, gid = hdr.Uid, hdr.Gid
}
@@ -1317,7 +1349,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
// of that file as an archive. The archive can only be read once - as soon as reading completes,
// the file will be deleted.
func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
- f, err := ioutil.TempFile(dir, "")
+ f, err := os.CreateTemp(dir, "")
if err != nil {
return nil, err
}
@@ -1419,7 +1451,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error {
contentReader, contentWriter, err := os.Pipe()
if err != nil {
- return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
+ return fmt.Errorf("creating pipe extract data to %q: %w", dest, err)
}
defer contentReader.Close()
defer contentWriter.Close()
@@ -1438,11 +1470,11 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
hashWorker.Done()
}()
if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
- err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
+ err = fmt.Errorf("extracting data to %q while copying: %w", dest, err)
}
hashWorker.Wait()
- if err == nil {
- err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
+ if err == nil && hashError != nil {
+ err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError)
}
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go b/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go
index 7c307ffcfe5..36017c3bf05 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go
@@ -1,3 +1,4 @@
+//go:build freebsd
// +build freebsd
package archive
@@ -8,10 +9,11 @@ import (
"os"
"path/filepath"
"syscall"
+ "unsafe"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
- "github.com/opencontainers/runc/libcontainer/userns"
+ "github.com/containers/storage/pkg/unshare"
"golang.org/x/sys/unix"
)
@@ -87,7 +89,7 @@ func minor(device uint64) uint64 {
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
- if userns.RunningInUserNS() {
+ if unshare.IsRootless() {
// cannot create a device if running in user namespace
return nil
}
@@ -110,16 +112,18 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *
if forceMask != nil {
permissionsMask = *forceMask
}
- if hdr.Typeflag == tar.TypeLink {
- if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
- if err := os.Chmod(path, permissionsMask); err != nil {
- return err
- }
- }
- } else if hdr.Typeflag != tar.TypeSymlink {
- if err := os.Chmod(path, permissionsMask); err != nil {
- return err
- }
+ p, err := unix.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ _, _, e1 := unix.Syscall(unix.SYS_LCHMOD, uintptr(unsafe.Pointer(p)), uintptr(permissionsMask), 0)
+ if e1 != 0 {
+ return e1
}
return nil
}
+
+// Hardlink without following symlinks
+func handleLLink(targetPath string, path string) error {
+ return unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0)
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
index 2f548b661ce..51fbd9a2197 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
@@ -36,7 +36,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
// we just rename the file and make it normal
dir, filename := filepath.Split(hdr.Name)
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
- hdr.Mode = 0600
+ hdr.Mode = 0
hdr.Typeflag = tar.TypeReg
hdr.Size = 0
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
index 7c3e442dad9..d0fb3306646 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows && !freebsd
// +build !windows,!freebsd
package archive
@@ -97,7 +98,7 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
mode |= unix.S_IFIFO
}
- return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
+ return system.Mknod(path, mode, system.Mkdev(hdr.Devmajor, hdr.Devminor))
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
@@ -118,3 +119,13 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *
}
return nil
}
+
+// Hardlink without symlinks
+func handleLLink(targetPath, path string) error {
+ // Note: on Linux, the link syscall will not follow symlinks.
+ // This behavior is implementation-dependent since
+ // POSIX.1-2008 so to make it clear that we need non-symlink
+ // following here we use the linkat syscall which has a flags
+ // field to select symlink following or not.
+ return unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0)
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
index a0872444f32..e4401177553 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package archive
@@ -34,7 +35,7 @@ func CanonicalTarNameForPath(p string) (string, error) {
// in file names, it is mostly safe to replace however we must
// check just in case
if strings.Contains(p, "/") {
- return "", fmt.Errorf("Windows path contains forward slash: %s", p)
+ return "", fmt.Errorf("windows path contains forward slash: %s", p)
}
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
@@ -77,3 +78,8 @@ func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
// no notion of file ownership mapping yet on Windows
return idtools.IDPair{0, 0}, nil
}
+
+// Hardlink without following symlinks
+func handleLLink(targetPath string, path string) error {
+ return os.Link(targetPath, path)
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go
index c7bb25d0f17..6cd9e35e912 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes.go
@@ -5,7 +5,6 @@ import (
"bytes"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"reflect"
@@ -403,7 +402,7 @@ func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string,
oldRoot, newRoot *FileInfo
)
if oldDir == "" {
- emptyDir, err := ioutil.TempDir("", "empty")
+ emptyDir, err := os.MkdirTemp("", "empty")
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go
index bbbd8c9de87..8769f2291b6 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes_other.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go
@@ -1,9 +1,11 @@
+//go:build !linux
// +build !linux
package archive
import (
"fmt"
+ "io/fs"
"os"
"path/filepath"
"runtime"
@@ -41,7 +43,7 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool
func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) {
root := newRootFileInfo(idMappings)
- err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
+ err := filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go
index 6298a674d49..2c714e8da54 100644
--- a/vendor/github.com/containers/storage/pkg/archive/copy.go
+++ b/vendor/github.com/containers/storage/pkg/archive/copy.go
@@ -4,7 +4,6 @@ import (
"archive/tar"
"errors"
"io"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -255,7 +254,7 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir
// The destination exists as a directory. No alteration
// to srcContent is needed as its contents can be
// simply extracted to the destination directory.
- return dstInfo.Path, ioutil.NopCloser(srcContent), nil
+ return dstInfo.Path, io.NopCloser(srcContent), nil
case dstInfo.Exists && srcInfo.IsDir:
// The destination exists as some type of file and the source
// content is a directory. This is an error condition since
diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go
index 14ffad5c0d4..7e835d44b3a 100644
--- a/vendor/github.com/containers/storage/pkg/archive/diff.go
+++ b/vendor/github.com/containers/storage/pkg/archive/diff.go
@@ -4,7 +4,7 @@ import (
"archive/tar"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"os"
"path/filepath"
"runtime"
@@ -85,7 +85,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
- err = os.MkdirAll(parentPath, 0600)
+ err = os.MkdirAll(parentPath, 0755)
if err != nil {
return 0, err
}
@@ -101,7 +101,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
- if aufsTempdir, err = ioutil.TempDir("", "storageplnk"); err != nil {
+ if aufsTempdir, err = os.MkdirTemp("", "storageplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
@@ -134,7 +134,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
if err != nil {
return 0, err
}
- err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
@@ -183,7 +183,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
- return 0, fmt.Errorf("Invalid aufs hardlink")
+ return 0, fmt.Errorf("invalid aufs hardlink")
}
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
if err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
index e874eb74e05..b5d8961e50c 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
@@ -4,26 +4,15 @@ import (
stdtar "archive/tar"
"fmt"
"io"
- "io/ioutil"
- "net"
"os"
- "os/user"
"path/filepath"
"sync"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
- "github.com/opencontainers/runc/libcontainer/userns"
- "github.com/pkg/errors"
+ "github.com/containers/storage/pkg/unshare"
)
-func init() {
- // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host
- // environment not in the chroot from untrusted files.
- _, _ = user.Lookup("storage")
- _, _ = net.LookupHost("localhost")
-}
-
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
archiver := archive.NewArchiver(idMappings)
@@ -41,7 +30,8 @@ func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
-// identity (uncompressed), gzip, bzip2, xz.
+//
+// identity (uncompressed), gzip, bzip2, xz.
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
return untarHandler(tarArchive, dest, options, true, dest)
}
@@ -72,11 +62,11 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOp
// Handler for teasing out the automatic decompression
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error {
if tarArchive == nil {
- return fmt.Errorf("Empty archive")
+ return fmt.Errorf("empty archive")
}
if options == nil {
options = &archive.TarOptions{}
- options.InUserNS = userns.RunningInUserNS()
+ options.InUserNS = unshare.IsRootless()
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
@@ -92,7 +82,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
}
}
- r := ioutil.NopCloser(tarArchive)
+ r := io.NopCloser(tarArchive)
if decompress {
decompressedArchive, err := archive.DecompressStream(tarArchive)
if err != nil {
@@ -124,7 +114,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
contentReader, contentWriter, err := os.Pipe()
if err != nil {
- return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
+ return fmt.Errorf("creating pipe extract data to %q: %w", dest, err)
}
defer contentReader.Close()
defer contentWriter.Close()
@@ -143,11 +133,11 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
hashWorker.Done()
}()
if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
- err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
+ err = fmt.Errorf("extracting data to %q while copying: %w", dest, err)
}
hashWorker.Wait()
- if err == nil {
- err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
+ if err == nil && hashError != nil {
+ err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError)
}
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
new file mode 100644
index 00000000000..d257cc8e942
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
@@ -0,0 +1,21 @@
+package chrootarchive
+
+import (
+ "io"
+
+ "github.com/containers/storage/pkg/archive"
+)
+
+func chroot(path string) error {
+ return nil
+}
+
+func invokeUnpack(decompressedArchive io.ReadCloser,
+ dest string,
+ options *archive.TarOptions, root string) error {
+ return archive.Unpack(decompressedArchive, dest, options)
+}
+
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+ return archive.TarWithOptions(srcPath, options)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
index 9da10fe33cd..8cc0f33b31c 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
@@ -1,13 +1,14 @@
-// +build !windows
+//go:build !windows && !darwin
+// +build !windows,!darwin
package chrootarchive
import (
"bytes"
+ "errors"
"flag"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -15,7 +16,6 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/reexec"
- "github.com/pkg/errors"
)
// untar is the entry-point for storage-untar on re-exec. This is not used on
@@ -69,7 +69,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
// child
r, w, err := os.Pipe()
if err != nil {
- return fmt.Errorf("Untar pipe failure: %v", err)
+ return fmt.Errorf("untar pipe failure: %w", err)
}
if root != "" {
@@ -96,13 +96,13 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
if err := cmd.Start(); err != nil {
w.Close()
- return fmt.Errorf("Untar error on re-exec cmd: %v", err)
+ return fmt.Errorf("untar error on re-exec cmd: %w", err)
}
//write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
w.Close()
- return fmt.Errorf("Untar json encode to pipe failed: %v", err)
+ return fmt.Errorf("untar json encode to pipe failed: %w", err)
}
w.Close()
@@ -110,9 +110,9 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
// when `xz -d -c -q | storage-untar ...` failed on storage-untar side,
// we need to exhaust `xz`'s output, otherwise the `xz` side will be
// pending on write pipe forever
- io.Copy(ioutil.Discard, decompressedArchive)
+ io.Copy(io.Discard, decompressedArchive)
- return fmt.Errorf("Error processing tar file(%v): %s", err, output)
+ return fmt.Errorf("processing tar file(%s): %w", output, err)
}
return nil
}
@@ -184,22 +184,24 @@ func invokePack(srcPath string, options *archive.TarOptions, root string) (io.Re
stdin, err := cmd.StdinPipe()
if err != nil {
- return nil, errors.Wrap(err, "error getting options pipe for tar process")
+ return nil, fmt.Errorf("getting options pipe for tar process: %w", err)
}
if err := cmd.Start(); err != nil {
- return nil, errors.Wrap(err, "tar error on re-exec cmd")
+ return nil, fmt.Errorf("tar error on re-exec cmd: %w", err)
}
go func() {
err := cmd.Wait()
- err = errors.Wrapf(err, "error processing tar file: %s", errBuff)
+ if err != nil {
+ err = fmt.Errorf("processing tar file(%s): %w", errBuff, err)
+ }
tarW.CloseWithError(err)
}()
if err := json.NewEncoder(stdin).Encode(options); err != nil {
stdin.Close()
- return nil, errors.Wrap(err, "tar json encode to pipe failed")
+ return nil, fmt.Errorf("tar json encode to pipe failed: %w", err)
}
stdin.Close()
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
index 76c94c6c1e9..09ef6d5de45 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
@@ -2,8 +2,9 @@ package chrootarchive
import (
"fmt"
- "io/ioutil"
+ "net"
"os"
+ "os/user"
"path/filepath"
"github.com/containers/storage/pkg/mount"
@@ -23,13 +24,18 @@ func chroot(path string) (err error) {
return err
}
+ // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host
+ // environment not in the chroot from untrusted files.
+ _, _ = user.Lookup("storage")
+ _, _ = net.LookupHost("localhost")
+
// if the process doesn't have CAP_SYS_ADMIN, but does have CAP_SYS_CHROOT, we need to use the actual chroot
if !caps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) && caps.Get(capability.EFFECTIVE, capability.CAP_SYS_CHROOT) {
return realChroot(path)
}
if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
- return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
+ return fmt.Errorf("creating mount namespace before pivot: %w", err)
}
// make everything in new ns private
@@ -44,9 +50,9 @@ func chroot(path string) (err error) {
}
// setup oldRoot for pivot_root
- pivotDir, err := ioutil.TempDir(path, ".pivot_root")
+ pivotDir, err := os.MkdirTemp(path, ".pivot_root")
if err != nil {
- return fmt.Errorf("Error setting up pivot dir: %v", err)
+ return fmt.Errorf("setting up pivot dir: %w", err)
}
var mounted bool
@@ -65,7 +71,7 @@ func chroot(path string) (err error) {
// pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
// because we already cleaned it up on failed pivot_root
if errCleanup != nil && !os.IsNotExist(errCleanup) {
- errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
+ errCleanup = fmt.Errorf("cleaning up after pivot: %w", errCleanup)
if err == nil {
err = errCleanup
}
@@ -75,7 +81,7 @@ func chroot(path string) (err error) {
if err := unix.PivotRoot(path, pivotDir); err != nil {
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
if err := os.Remove(pivotDir); err != nil {
- return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
+ return fmt.Errorf("cleaning up after failed pivot: %w", err)
}
return realChroot(path)
}
@@ -86,17 +92,17 @@ func chroot(path string) (err error) {
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
if err := unix.Chdir("/"); err != nil {
- return fmt.Errorf("Error changing to new root: %v", err)
+ return fmt.Errorf("changing to new root: %w", err)
}
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil {
- return fmt.Errorf("Error making old root private after pivot: %v", err)
+ return fmt.Errorf("making old root private after pivot: %w", err)
}
// Now unmount the old root so it's no longer visible from the new root
if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil {
- return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
+ return fmt.Errorf("while unmounting old root after pivot: %w", err)
}
mounted = false
@@ -105,10 +111,10 @@ func chroot(path string) (err error) {
func realChroot(path string) error {
if err := unix.Chroot(path); err != nil {
- return fmt.Errorf("Error after fallback to chroot: %v", err)
+ return fmt.Errorf("after fallback to chroot: %w", err)
}
if err := unix.Chdir("/"); err != nil {
- return fmt.Errorf("Error changing to new root after chroot: %v", err)
+ return fmt.Errorf("changing to new root after chroot: %w", err)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
index 83278ee5051..d5aedd002e1 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
@@ -1,4 +1,4 @@
-// +build !windows,!linux
+// +build !windows,!linux,!darwin
package chrootarchive
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go
new file mode 100644
index 00000000000..52c677bc717
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go
@@ -0,0 +1,40 @@
+package chrootarchive
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/storage/pkg/archive"
+)
+
+// applyLayerHandler parses a diff in the standard layer format from `layer`, and
+// applies it to the directory `dest`. Returns the size in bytes of the
+// contents of the layer.
+func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
+ dest = filepath.Clean(dest)
+
+ if decompress {
+ decompressed, err := archive.DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ defer decompressed.Close()
+
+ layer = decompressed
+ }
+
+ tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract")
+ if err != nil {
+ return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err)
+ }
+
+ s, err := archive.UnpackLayer(dest, layer, options)
+ os.RemoveAll(tmpDir)
+ if err != nil {
+ return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err)
+ }
+
+ return s, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
index 84253c6aa9b..90f453913e5 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
@@ -1,4 +1,5 @@
-//+build !windows
+//go:build !windows && !darwin
+// +build !windows,!darwin
package chrootarchive
@@ -7,7 +8,6 @@ import (
"flag"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -15,7 +15,7 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/reexec"
"github.com/containers/storage/pkg/system"
- "github.com/opencontainers/runc/libcontainer/userns"
+ "github.com/containers/storage/pkg/unshare"
)
type applyLayerResponse struct {
@@ -35,7 +35,7 @@ func applyLayer() {
runtime.LockOSThread()
flag.Parse()
- inUserns := userns.RunningInUserNS()
+ inUserns := unshare.IsRootless()
if err := chroot(flag.Arg(0)); err != nil {
fatal(err)
}
@@ -55,7 +55,7 @@ func applyLayer() {
options.InUserNS = true
}
- if tmpDir, err = ioutil.TempDir("/", "temp-storage-extract"); err != nil {
+ if tmpDir, err = os.MkdirTemp("/", "temp-storage-extract"); err != nil {
fatal(err)
}
@@ -68,7 +68,7 @@ func applyLayer() {
encoder := json.NewEncoder(os.Stdout)
if err := encoder.Encode(applyLayerResponse{size}); err != nil {
- fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err))
+ fatal(fmt.Errorf("unable to encode layerSize JSON: %w", err))
}
if _, err := flush(os.Stdin); err != nil {
@@ -94,7 +94,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
}
if options == nil {
options = &archive.TarOptions{}
- if userns.RunningInUserNS() {
+ if unshare.IsRootless() {
options.InUserNS = true
}
}
@@ -104,25 +104,25 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
data, err := json.Marshal(options)
if err != nil {
- return 0, fmt.Errorf("ApplyLayer json encode: %v", err)
+ return 0, fmt.Errorf("ApplyLayer json encode: %w", err)
}
cmd := reexec.Command("storage-applyLayer", dest)
cmd.Stdin = layer
- cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
+ cmd.Env = append(os.Environ(), fmt.Sprintf("OPT=%s", data))
outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
cmd.Stdout, cmd.Stderr = outBuf, errBuf
if err = cmd.Run(); err != nil {
- return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf)
+ return 0, fmt.Errorf("ApplyLayer stdout: %s stderr: %s %w", outBuf, errBuf, err)
}
// Stdout should be a valid JSON struct representing an applyLayerResponse.
response := applyLayerResponse{}
decoder := json.NewDecoder(outBuf)
if err = decoder.Decode(&response); err != nil {
- return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err)
+ return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %w", err)
}
return response.LayerSize, nil
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go
index 8f8e88bfbea..8bfff5d65ba 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go
@@ -3,7 +3,6 @@ package chrootarchive
import (
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
@@ -30,7 +29,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
layer = decompressed
}
- tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract")
+ tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract")
if err != nil {
return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err)
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go
new file mode 100644
index 00000000000..fa17c9bf831
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go
@@ -0,0 +1,4 @@
+package chrootarchive
+
+func init() {
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
index ea08135e4d5..274a946e2fb 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
@@ -1,11 +1,11 @@
-// +build !windows
+//go:build !windows && !darwin
+// +build !windows,!darwin
package chrootarchive
import (
"fmt"
"io"
- "io/ioutil"
"os"
"github.com/containers/storage/pkg/reexec"
@@ -25,5 +25,5 @@ func fatal(err error) {
// flush consumes all the bytes from the reader discarding
// any errors
func flush(r io.Reader) (bytes int64, err error) {
- return io.Copy(ioutil.Discard, r)
+ return io.Copy(io.Discard, r)
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
new file mode 100644
index 00000000000..7279567999a
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
@@ -0,0 +1,626 @@
+package chunked
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unsafe"
+
+ storage "github.com/containers/storage"
+ "github.com/containers/storage/pkg/chunked/internal"
+ "github.com/containers/storage/pkg/ioutils"
+ jsoniter "github.com/json-iterator/go"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ cacheKey = "chunked-manifest-cache"
+ cacheVersion = 1
+)
+
+type metadata struct {
+ tagLen int
+ digestLen int
+ tags []byte
+ vdata []byte
+}
+
+type layer struct {
+ id string
+ metadata *metadata
+ target string
+}
+
+type layersCache struct {
+ layers []layer
+ refs int
+ store storage.Store
+ mutex sync.RWMutex
+ created time.Time
+}
+
+var cacheMutex sync.Mutex
+var cache *layersCache
+
+func (c *layersCache) release() {
+ cacheMutex.Lock()
+ defer cacheMutex.Unlock()
+
+ c.refs--
+ if c.refs == 0 {
+ cache = nil
+ }
+}
+
+func getLayersCacheRef(store storage.Store) *layersCache {
+ cacheMutex.Lock()
+ defer cacheMutex.Unlock()
+ if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 {
+ cache.refs++
+ return cache
+ }
+ cache := &layersCache{
+ store: store,
+ refs: 1,
+ created: time.Now(),
+ }
+ return cache
+}
+
+func getLayersCache(store storage.Store) (*layersCache, error) {
+ c := getLayersCacheRef(store)
+
+ if err := c.load(); err != nil {
+ c.release()
+ return nil, err
+ }
+ return c, nil
+}
+
+func (c *layersCache) load() error {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ allLayers, err := c.store.Layers()
+ if err != nil {
+ return err
+ }
+ existingLayers := make(map[string]string)
+ for _, r := range c.layers {
+ existingLayers[r.id] = r.target
+ }
+
+ currentLayers := make(map[string]string)
+ for _, r := range allLayers {
+ currentLayers[r.ID] = r.ID
+ if _, found := existingLayers[r.ID]; found {
+ continue
+ }
+
+ bigData, err := c.store.LayerBigData(r.ID, cacheKey)
+ // if the cache areadly exists, read and use it
+ if err == nil {
+ defer bigData.Close()
+ metadata, err := readMetadataFromCache(bigData)
+ if err == nil {
+ c.addLayer(r.ID, metadata)
+ continue
+ }
+ logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err)
+ } else if !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+
+ // otherwise create it from the layer TOC.
+ manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey)
+ if err != nil {
+ continue
+ }
+ defer manifestReader.Close()
+
+ manifest, err := io.ReadAll(manifestReader)
+ if err != nil {
+ return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
+ }
+
+ metadata, err := writeCache(manifest, r.ID, c.store)
+ if err == nil {
+ c.addLayer(r.ID, metadata)
+ }
+ }
+
+ var newLayers []layer
+ for _, l := range c.layers {
+ if _, found := currentLayers[l.id]; found {
+ newLayers = append(newLayers, l)
+ }
+ }
+ c.layers = newLayers
+
+ return nil
+}
+
+// calculateHardLinkFingerprint calculates a hash that can be used to verify if a file
+// is usable for deduplication with hardlinks.
+// To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs.
+func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) {
+ digester := digest.Canonical.Digester()
+
+ modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode)
+ hash := digester.Hash()
+
+ if _, err := hash.Write([]byte(f.Digest)); err != nil {
+ return "", err
+ }
+
+ if _, err := hash.Write([]byte(modeString)); err != nil {
+ return "", err
+ }
+
+ if len(f.Xattrs) > 0 {
+ keys := make([]string, 0, len(f.Xattrs))
+ for k := range f.Xattrs {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ if _, err := hash.Write([]byte(k)); err != nil {
+ return "", err
+ }
+ if _, err := hash.Write([]byte(f.Xattrs[k])); err != nil {
+ return "", err
+ }
+ }
+ }
+ return string(digester.Digest()), nil
+}
+
+// generateFileLocation generates a file location in the form $OFFSET@$PATH
+func generateFileLocation(path string, offset uint64) []byte {
+ return []byte(fmt.Sprintf("%d@%s", offset, path))
+}
+
+// generateTag generates a tag in the form $DIGEST$OFFSET@LEN.
+// the [OFFSET; LEN] points to the variable length data where the file locations
+// are stored. $DIGEST has length digestLen stored in the metadata file header.
+func generateTag(digest string, offset, len uint64) string {
+ return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len)
+}
+
+type setBigData interface {
+ // SetLayerBigData stores a (possibly large) chunk of named data
+ SetLayerBigData(id, key string, data io.Reader) error
+}
+
+// writeCache write a cache for the layer ID.
+// It generates a sorted list of digests with their offset to the path location and offset.
+// The same cache is used to lookup files, chunks and candidates for deduplication with hard links.
+// There are 3 kind of digests stored:
+// - digest(file.payload))
+// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs)
+// - digest(i) for each i in chunks(file payload)
+func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error) {
+ var vdata bytes.Buffer
+ tagLen := 0
+ digestLen := 0
+ var tagsBuffer bytes.Buffer
+
+ toc, err := prepareMetadata(manifest)
+ if err != nil {
+ return nil, err
+ }
+
+ var tags []string
+ for _, k := range toc {
+ if k.Digest != "" {
+ location := generateFileLocation(k.Name, 0)
+
+ off := uint64(vdata.Len())
+ l := uint64(len(location))
+
+ d := generateTag(k.Digest, off, l)
+ if tagLen == 0 {
+ tagLen = len(d)
+ }
+ if tagLen != len(d) {
+ return nil, errors.New("digest with different length found")
+ }
+ tags = append(tags, d)
+
+ fp, err := calculateHardLinkFingerprint(k)
+ if err != nil {
+ return nil, err
+ }
+ d = generateTag(fp, off, l)
+ if tagLen != len(d) {
+ return nil, errors.New("digest with different length found")
+ }
+ tags = append(tags, d)
+
+ if _, err := vdata.Write(location); err != nil {
+ return nil, err
+ }
+
+ digestLen = len(k.Digest)
+ }
+ if k.ChunkDigest != "" {
+ location := generateFileLocation(k.Name, uint64(k.ChunkOffset))
+ off := uint64(vdata.Len())
+ l := uint64(len(location))
+ d := generateTag(k.ChunkDigest, off, l)
+ if tagLen == 0 {
+ tagLen = len(d)
+ }
+ if tagLen != len(d) {
+ return nil, errors.New("digest with different length found")
+ }
+ tags = append(tags, d)
+
+ if _, err := vdata.Write(location); err != nil {
+ return nil, err
+ }
+ digestLen = len(k.ChunkDigest)
+ }
+ }
+
+ sort.Strings(tags)
+
+ for _, t := range tags {
+ if _, err := tagsBuffer.Write([]byte(t)); err != nil {
+ return nil, err
+ }
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+ errChan := make(chan error, 1)
+ go func() {
+ defer pipeWriter.Close()
+ defer close(errChan)
+
+ // version
+ if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil {
+ errChan <- err
+ return
+ }
+
+ // len of a tag
+ if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil {
+ errChan <- err
+ return
+ }
+
+ // len of a digest
+ if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil {
+ errChan <- err
+ return
+ }
+
+ // tags length
+ if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil {
+ errChan <- err
+ return
+ }
+
+ // vdata length
+ if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil {
+ errChan <- err
+ return
+ }
+
+ // tags
+ if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil {
+ errChan <- err
+ return
+ }
+
+ // variable length data
+ if _, err := pipeWriter.Write(vdata.Bytes()); err != nil {
+ errChan <- err
+ return
+ }
+
+ errChan <- nil
+ }()
+ defer pipeReader.Close()
+
+ counter := ioutils.NewWriteCounter(io.Discard)
+
+ r := io.TeeReader(pipeReader, counter)
+
+ if err := dest.SetLayerBigData(id, cacheKey, r); err != nil {
+ return nil, err
+ }
+
+ if err := <-errChan; err != nil {
+ return nil, err
+ }
+
+ logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count)
+
+ return &metadata{
+ digestLen: digestLen,
+ tagLen: tagLen,
+ tags: tagsBuffer.Bytes(),
+ vdata: vdata.Bytes(),
+ }, nil
+}
+
+func readMetadataFromCache(bigData io.Reader) (*metadata, error) {
+ var version, tagLen, digestLen, tagsLen, vdataLen uint64
+ if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil {
+ return nil, err
+ }
+ if version != cacheVersion {
+ return nil, nil
+ }
+ if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil {
+ return nil, err
+ }
+ if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil {
+ return nil, err
+ }
+ if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil {
+ return nil, err
+ }
+ if err := binary.Read(bigData, binary.LittleEndian, &vdataLen); err != nil {
+ return nil, err
+ }
+
+ tags := make([]byte, tagsLen)
+ if _, err := bigData.Read(tags); err != nil {
+ return nil, err
+ }
+
+ vdata := make([]byte, vdataLen)
+ if _, err := bigData.Read(vdata); err != nil {
+ return nil, err
+ }
+
+ return &metadata{
+ tagLen: int(tagLen),
+ digestLen: int(digestLen),
+ tags: tags,
+ vdata: vdata,
+ }, nil
+}
+
+func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) {
+ toc, err := unmarshalToc(manifest)
+ if err != nil {
+ // ignore errors here. They might be caused by a different manifest format.
+ return nil, nil
+ }
+
+ var r []*internal.FileMetadata
+ chunkSeen := make(map[string]bool)
+ for i := range toc.Entries {
+ d := toc.Entries[i].Digest
+ if d != "" {
+ r = append(r, &toc.Entries[i])
+ continue
+ }
+
+ // chunks do not use hard link dedup so keeping just one candidate is enough
+ cd := toc.Entries[i].ChunkDigest
+ if cd != "" && !chunkSeen[cd] {
+ r = append(r, &toc.Entries[i])
+ chunkSeen[cd] = true
+ }
+ }
+ return r, nil
+}
+
+func (c *layersCache) addLayer(id string, metadata *metadata) error {
+ target, err := c.store.DifferTarget(id)
+ if err != nil {
+ return fmt.Errorf("get checkout directory layer %q: %w", id, err)
+ }
+
+ l := layer{
+ id: id,
+ metadata: metadata,
+ target: target,
+ }
+ c.layers = append(c.layers, l)
+ return nil
+}
+
+func byteSliceAsString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+func findTag(digest string, metadata *metadata) (string, uint64, uint64) {
+ if len(digest) != metadata.digestLen {
+ return "", 0, 0
+ }
+
+ nElements := len(metadata.tags) / metadata.tagLen
+
+ i := sort.Search(nElements, func(i int) bool {
+ d := byteSliceAsString(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+metadata.digestLen])
+ return strings.Compare(d, digest) >= 0
+ })
+ if i < nElements {
+ d := string(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+len(digest)])
+ if digest == d {
+ startOff := i*metadata.tagLen + metadata.digestLen
+ parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@")
+ off, _ := strconv.ParseInt(parts[0], 10, 64)
+ len, _ := strconv.ParseInt(parts[1], 10, 64)
+ return digest, uint64(off), uint64(len)
+ }
+ }
+ return "", 0, 0
+}
+
+func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) {
+ if digest == "" {
+ return "", "", -1, nil
+ }
+
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
+ for _, layer := range c.layers {
+ digest, off, len := findTag(digest, layer.metadata)
+ if digest != "" {
+ position := string(layer.metadata.vdata[off : off+len])
+ parts := strings.SplitN(position, "@", 2)
+ offFile, _ := strconv.ParseInt(parts[0], 10, 64)
+ return layer.target, parts[1], offFile, nil
+ }
+ }
+
+ return "", "", -1, nil
+}
+
+// findFileInOtherLayers finds the specified file in other layers.
+// file is the file to look for.
+func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, useHardLinks bool) (string, string, error) {
+ digest := file.Digest
+ if useHardLinks {
+ var err error
+ digest, err = calculateHardLinkFingerprint(file)
+ if err != nil {
+ return "", "", err
+ }
+ }
+ target, name, off, err := c.findDigestInternal(digest)
+ if off == 0 {
+ return target, name, err
+ }
+ return "", "", nil
+}
+
+func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) {
+ return c.findDigestInternal(chunk.ChunkDigest)
+}
+
+func unmarshalToc(manifest []byte) (*internal.TOC, error) {
+ var buf bytes.Buffer
+ count := 0
+ var toc internal.TOC
+
+ iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
+ for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
+ if field != "entries" {
+ iter.Skip()
+ continue
+ }
+ for iter.ReadArray() {
+ for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
+ switch field {
+ case "type", "name", "linkName", "digest", "chunkDigest", "chunkType":
+ count += len(iter.ReadStringAsSlice())
+ case "xattrs":
+ for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
+ count += len(iter.ReadStringAsSlice())
+ }
+ default:
+ iter.Skip()
+ }
+ }
+ }
+ break
+ }
+
+ buf.Grow(count)
+
+ getString := func(b []byte) string {
+ from := buf.Len()
+ buf.Write(b)
+ to := buf.Len()
+ return byteSliceAsString(buf.Bytes()[from:to])
+ }
+
+ iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
+ for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
+ if field == "version" {
+ toc.Version = iter.ReadInt()
+ continue
+ }
+ if field != "entries" {
+ iter.Skip()
+ continue
+ }
+ for iter.ReadArray() {
+ var m internal.FileMetadata
+ for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
+ switch field {
+ case "type":
+ m.Type = getString(iter.ReadStringAsSlice())
+ case "name":
+ m.Name = getString(iter.ReadStringAsSlice())
+ case "linkName":
+ m.Linkname = getString(iter.ReadStringAsSlice())
+ case "mode":
+ m.Mode = iter.ReadInt64()
+ case "size":
+ m.Size = iter.ReadInt64()
+ case "UID":
+ m.UID = iter.ReadInt()
+ case "GID":
+ m.GID = iter.ReadInt()
+ case "ModTime":
+ time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
+ if err != nil {
+ return nil, err
+ }
+ m.ModTime = &time
+ case "accesstime":
+ time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
+ if err != nil {
+ return nil, err
+ }
+ m.AccessTime = &time
+ case "changetime":
+ time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
+ if err != nil {
+ return nil, err
+ }
+ m.ChangeTime = &time
+ case "devMajor":
+ m.Devmajor = iter.ReadInt64()
+ case "devMinor":
+ m.Devminor = iter.ReadInt64()
+ case "digest":
+ m.Digest = getString(iter.ReadStringAsSlice())
+ case "offset":
+ m.Offset = iter.ReadInt64()
+ case "endOffset":
+ m.EndOffset = iter.ReadInt64()
+ case "chunkSize":
+ m.ChunkSize = iter.ReadInt64()
+ case "chunkOffset":
+ m.ChunkOffset = iter.ReadInt64()
+ case "chunkDigest":
+ m.ChunkDigest = getString(iter.ReadStringAsSlice())
+ case "chunkType":
+ m.ChunkType = getString(iter.ReadStringAsSlice())
+ case "xattrs":
+ m.Xattrs = make(map[string]string)
+ for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
+ value := iter.ReadStringAsSlice()
+ m.Xattrs[key] = getString(value)
+ }
+ default:
+ iter.Skip()
+ }
+ }
+ toc.Entries = append(toc.Entries, m)
+ }
+ break
+ }
+ toc.StringsBuf = buf
+ return &toc, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression.go b/vendor/github.com/containers/storage/pkg/chunked/compression.go
index 96254bc4e54..8d4d3c4a74e 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compression.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compression.go
@@ -4,6 +4,7 @@ import (
archivetar "archive/tar"
"bytes"
"encoding/binary"
+ "errors"
"fmt"
"io"
"strconv"
@@ -14,7 +15,6 @@ import (
"github.com/klauspost/compress/zstd"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/vbatts/tar-split/archive/tar"
)
@@ -92,7 +92,7 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
*/
tocOffset, err := strconv.ParseInt(string(footer[16:16+22-6]), 16, 64)
if err != nil {
- return nil, 0, errors.Wrap(err, "parse ToC offset")
+ return nil, 0, fmt.Errorf("parse ToC offset: %w", err)
}
size := int64(blobSize - footerSize - tocOffset)
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
index 092cf584af3..362c168d02a 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
@@ -5,9 +5,9 @@ package compressor
// larger software like the graph drivers.
import (
+ "bufio"
"encoding/base64"
"io"
- "io/ioutil"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/ioutils"
@@ -15,6 +15,187 @@ import (
"github.com/vbatts/tar-split/archive/tar"
)
+const RollsumBits = 16
+const holesThreshold = int64(1 << 10)
+
+type holesFinder struct {
+ reader *bufio.Reader
+ zeros int64
+ threshold int64
+
+ state int
+}
+
+const (
+ holesFinderStateRead = iota
+ holesFinderStateAccumulate
+ holesFinderStateFound
+ holesFinderStateEOF
+)
+
+// ReadByte reads a single byte from the underlying reader.
+// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil).
+// If there are at least f.THRESHOLD consecutive zeros, then the
+// return value is (N_CONSECUTIVE_ZEROS, '\x00').
+func (f *holesFinder) ReadByte() (int64, byte, error) {
+ for {
+ switch f.state {
+ // reading the file stream
+ case holesFinderStateRead:
+ if f.zeros > 0 {
+ f.zeros--
+ return 0, 0, nil
+ }
+ b, err := f.reader.ReadByte()
+ if err != nil {
+ return 0, b, err
+ }
+
+ if b != 0 {
+ return 0, b, err
+ }
+
+ f.zeros = 1
+ if f.zeros == f.threshold {
+ f.state = holesFinderStateFound
+ } else {
+ f.state = holesFinderStateAccumulate
+ }
+ // accumulating zeros, but still didn't reach the threshold
+ case holesFinderStateAccumulate:
+ b, err := f.reader.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ f.state = holesFinderStateEOF
+ continue
+ }
+ return 0, b, err
+ }
+
+ if b == 0 {
+ f.zeros++
+ if f.zeros == f.threshold {
+ f.state = holesFinderStateFound
+ }
+ } else {
+ if f.reader.UnreadByte(); err != nil {
+ return 0, 0, err
+ }
+ f.state = holesFinderStateRead
+ }
+ // found a hole. Number of zeros >= threshold
+ case holesFinderStateFound:
+ b, err := f.reader.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ f.state = holesFinderStateEOF
+ }
+ holeLen := f.zeros
+ f.zeros = 0
+ return holeLen, 0, nil
+ }
+ if b != 0 {
+ if f.reader.UnreadByte(); err != nil {
+ return 0, 0, err
+ }
+ f.state = holesFinderStateRead
+
+ holeLen := f.zeros
+ f.zeros = 0
+ return holeLen, 0, nil
+ }
+ f.zeros++
+ // reached EOF. Flush pending zeros if any.
+ case holesFinderStateEOF:
+ if f.zeros > 0 {
+ f.zeros--
+ return 0, 0, nil
+ }
+ return 0, 0, io.EOF
+ }
+ }
+}
+
+type rollingChecksumReader struct {
+ reader *holesFinder
+ closed bool
+ rollsum *RollSum
+ pendingHole int64
+
+ // WrittenOut is the total number of bytes read from
+ // the stream.
+ WrittenOut int64
+
+ // IsLastChunkZeros tells whether the last generated
+ // chunk is a hole (made of consecutive zeros). If it
+ // is false, then the last chunk is a data chunk
+ // generated by the rolling checksum.
+ IsLastChunkZeros bool
+}
+
+func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) {
+ rc.IsLastChunkZeros = false
+
+ if rc.pendingHole > 0 {
+ toCopy := int64(len(b))
+ if rc.pendingHole < toCopy {
+ toCopy = rc.pendingHole
+ }
+ rc.pendingHole -= toCopy
+ for i := int64(0); i < toCopy; i++ {
+ b[i] = 0
+ }
+
+ rc.WrittenOut += toCopy
+
+ rc.IsLastChunkZeros = true
+
+ // if there are no other zeros left, terminate the chunk
+ return rc.pendingHole == 0, int(toCopy), nil
+ }
+
+ if rc.closed {
+ return false, 0, io.EOF
+ }
+
+ for i := 0; i < len(b); i++ {
+ holeLen, n, err := rc.reader.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ rc.closed = true
+ if i == 0 {
+ return false, 0, err
+ }
+ return false, i, nil
+ }
+ // Report any other error type
+ return false, -1, err
+ }
+ if holeLen > 0 {
+ for j := int64(0); j < holeLen; j++ {
+ rc.rollsum.Roll(0)
+ }
+ rc.pendingHole = holeLen
+ return true, i, nil
+ }
+ b[i] = n
+ rc.WrittenOut++
+ rc.rollsum.Roll(n)
+ if rc.rollsum.OnSplitWithBits(RollsumBits) {
+ return true, i + 1, nil
+ }
+ }
+ return false, len(b), nil
+}
+
+type chunk struct {
+ ChunkOffset int64
+ Offset int64
+ Checksum string
+ ChunkSize int64
+ ChunkType string
+}
+
func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
// total written so far. Used to retrieve partial offsets in the file
dest := ioutils.NewWriteCounter(destFile)
@@ -64,40 +245,78 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
if _, err := zstdWriter.Write(rawBytes); err != nil {
return err
}
- payloadDigester := digest.Canonical.Digester()
- payloadChecksum := payloadDigester.Hash()
- payloadDest := io.MultiWriter(payloadChecksum, zstdWriter)
+ payloadDigester := digest.Canonical.Digester()
+ chunkDigester := digest.Canonical.Digester()
// Now handle the payload, if any
- var startOffset, endOffset int64
+ startOffset := int64(0)
+ lastOffset := int64(0)
+ lastChunkOffset := int64(0)
+
checksum := ""
+
+ chunks := []chunk{}
+
+ hf := &holesFinder{
+ threshold: holesThreshold,
+ reader: bufio.NewReader(tr),
+ }
+
+ rcReader := &rollingChecksumReader{
+ reader: hf,
+ rollsum: NewRollSum(),
+ }
+
+ payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter)
for {
- read, errRead := tr.Read(buf)
+ mustSplit, read, errRead := rcReader.Read(buf)
if errRead != nil && errRead != io.EOF {
return err
}
-
- // restart the compression only if there is
- // a payload.
+ // restart the compression only if there is a payload.
if read > 0 {
if startOffset == 0 {
startOffset, err = restartCompression()
if err != nil {
return err
}
+ lastOffset = startOffset
+ }
+
+ if _, err := payloadDest.Write(buf[:read]); err != nil {
+ return err
}
- _, err := payloadDest.Write(buf[:read])
+ }
+ if (mustSplit || errRead == io.EOF) && startOffset > 0 {
+ off, err := restartCompression()
if err != nil {
return err
}
+
+ chunkSize := rcReader.WrittenOut - lastChunkOffset
+ if chunkSize > 0 {
+ chunkType := internal.ChunkTypeData
+ if rcReader.IsLastChunkZeros {
+ chunkType = internal.ChunkTypeZeros
+ }
+
+ chunks = append(chunks, chunk{
+ ChunkOffset: lastChunkOffset,
+ Offset: lastOffset,
+ Checksum: chunkDigester.Digest().String(),
+ ChunkSize: chunkSize,
+ ChunkType: chunkType,
+ })
+ }
+
+ lastOffset = off
+ lastChunkOffset = rcReader.WrittenOut
+ chunkDigester = digest.Canonical.Digester()
+ payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter)
}
if errRead == io.EOF {
if startOffset > 0 {
- endOffset, err = restartCompression()
- if err != nil {
- return err
- }
checksum = payloadDigester.Digest().String()
}
break
@@ -112,30 +331,42 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
for k, v := range hdr.Xattrs {
xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
}
- m := internal.FileMetadata{
- Type: typ,
- Name: hdr.Name,
- Linkname: hdr.Linkname,
- Mode: hdr.Mode,
- Size: hdr.Size,
- UID: hdr.Uid,
- GID: hdr.Gid,
- ModTime: hdr.ModTime,
- AccessTime: hdr.AccessTime,
- ChangeTime: hdr.ChangeTime,
- Devmajor: hdr.Devmajor,
- Devminor: hdr.Devminor,
- Xattrs: xattrs,
- Digest: checksum,
- Offset: startOffset,
- EndOffset: endOffset,
-
- // ChunkSize is 0 for the last chunk
- ChunkSize: 0,
- ChunkOffset: 0,
- ChunkDigest: checksum,
- }
- metadata = append(metadata, m)
+ entries := []internal.FileMetadata{
+ {
+ Type: typ,
+ Name: hdr.Name,
+ Linkname: hdr.Linkname,
+ Mode: hdr.Mode,
+ Size: hdr.Size,
+ UID: hdr.Uid,
+ GID: hdr.Gid,
+ ModTime: &hdr.ModTime,
+ AccessTime: &hdr.AccessTime,
+ ChangeTime: &hdr.ChangeTime,
+ Devmajor: hdr.Devmajor,
+ Devminor: hdr.Devminor,
+ Xattrs: xattrs,
+ Digest: checksum,
+ Offset: startOffset,
+ EndOffset: lastOffset,
+ },
+ }
+ for i := 1; i < len(chunks); i++ {
+ entries = append(entries, internal.FileMetadata{
+ Type: internal.TypeChunk,
+ Name: hdr.Name,
+ ChunkOffset: chunks[i].ChunkOffset,
+ })
+ }
+ if len(chunks) > 1 {
+ for i := range chunks {
+ entries[i].ChunkSize = chunks[i].ChunkSize
+ entries[i].Offset = chunks[i].Offset
+ entries[i].ChunkDigest = chunks[i].Checksum
+ entries[i].ChunkType = chunks[i].ChunkType
+ }
+ }
+ metadata = append(metadata, entries...)
}
rawBytes := tr.RawBytes()
@@ -198,7 +429,7 @@ func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level
go func() {
ch <- writeZstdChunkedStream(out, metadata, r, level)
- io.Copy(ioutil.Discard, r)
+ io.Copy(io.Discard, r)
r.Close()
close(ch)
}()
@@ -212,7 +443,7 @@ func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
if level == nil {
- l := 3
+ l := 10
level = &l
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go
new file mode 100644
index 00000000000..f4dfad822e9
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2011 The Perkeep Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package rollsum implements rolling checksums similar to apenwarr's bup, which
+// is similar to librsync.
+//
+// The bup project is at https://github.com/apenwarr/bup and its splitting in
+// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c
+package compressor
+
+import (
+ "math/bits"
+)
+
+const windowSize = 64 // Roll assumes windowSize is a power of 2
+const charOffset = 31
+
+const blobBits = 13
+const blobSize = 1 << blobBits // 8k
+
+type RollSum struct {
+ s1, s2 uint32
+ window [windowSize]uint8
+ wofs int
+}
+
+func NewRollSum() *RollSum {
+ return &RollSum{
+ s1: windowSize * charOffset,
+ s2: windowSize * (windowSize - 1) * charOffset,
+ }
+}
+
+func (rs *RollSum) add(drop, add uint32) {
+ s1 := rs.s1 + add - drop
+ rs.s1 = s1
+ rs.s2 += s1 - uint32(windowSize)*(drop+charOffset)
+}
+
+// Roll adds ch to the rolling sum.
+func (rs *RollSum) Roll(ch byte) {
+ wp := &rs.window[rs.wofs]
+ rs.add(uint32(*wp), uint32(ch))
+ *wp = ch
+ rs.wofs = (rs.wofs + 1) & (windowSize - 1)
+}
+
+// OnSplit reports whether at least 13 consecutive trailing bits of
+// the current checksum are set the same way.
+func (rs *RollSum) OnSplit() bool {
+ return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1))
+}
+
+// OnSplitWithBits reports whether at least n consecutive trailing bits
+// of the current checksum are set the same way.
+func (rs *RollSum) OnSplitWithBits(n uint32) bool {
+ mask := (uint32(1) << n) - 1
+ return rs.s2&mask == (^uint32(0))&mask
+}
+
+func (rs *RollSum) Bits() int {
+ rsum := rs.Digest() >> (blobBits + 1)
+ return blobBits + bits.TrailingZeros32(^rsum)
+}
+
+func (rs *RollSum) Digest() uint32 {
+ return (rs.s1 << 16) | (rs.s2 & 0xffff)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
index c91c43d85cd..3bb5286d92d 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
@@ -8,11 +8,11 @@ import (
"archive/tar"
"bytes"
"encoding/binary"
- "encoding/json"
"fmt"
"io"
"time"
+ jsoniter "github.com/json-iterator/go"
"github.com/klauspost/compress/zstd"
"github.com/opencontainers/go-digest"
)
@@ -20,6 +20,9 @@ import (
type TOC struct {
Version int `json:"version"`
Entries []FileMetadata `json:"entries"`
+
+ // internal: used by unmarshalToc
+ StringsBuf bytes.Buffer `json:"-"`
}
type FileMetadata struct {
@@ -27,25 +30,33 @@ type FileMetadata struct {
Name string `json:"name"`
Linkname string `json:"linkName,omitempty"`
Mode int64 `json:"mode,omitempty"`
- Size int64 `json:"size"`
- UID int `json:"uid"`
- GID int `json:"gid"`
- ModTime time.Time `json:"modtime"`
- AccessTime time.Time `json:"accesstime"`
- ChangeTime time.Time `json:"changetime"`
- Devmajor int64 `json:"devMajor"`
- Devminor int64 `json:"devMinor"`
+ Size int64 `json:"size,omitempty"`
+ UID int `json:"uid,omitempty"`
+ GID int `json:"gid,omitempty"`
+ ModTime *time.Time `json:"modtime,omitempty"`
+ AccessTime *time.Time `json:"accesstime,omitempty"`
+ ChangeTime *time.Time `json:"changetime,omitempty"`
+ Devmajor int64 `json:"devMajor,omitempty"`
+ Devminor int64 `json:"devMinor,omitempty"`
Xattrs map[string]string `json:"xattrs,omitempty"`
Digest string `json:"digest,omitempty"`
Offset int64 `json:"offset,omitempty"`
EndOffset int64 `json:"endOffset,omitempty"`
- // Currently chunking is not supported.
ChunkSize int64 `json:"chunkSize,omitempty"`
ChunkOffset int64 `json:"chunkOffset,omitempty"`
ChunkDigest string `json:"chunkDigest,omitempty"`
+ ChunkType string `json:"chunkType,omitempty"`
+
+ // internal: computed by mergeTOCEntries.
+ Chunks []*FileMetadata `json:"-"`
}
+const (
+ ChunkTypeData = ""
+ ChunkTypeZeros = "zeros"
+)
+
const (
TypeReg = "reg"
TypeChunk = "chunk"
@@ -123,6 +134,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
Entries: metadata,
}
+ var json = jsoniter.ConfigCompatibleWithStandardLibrary
// Generate the manifest
manifest, err := json.Marshal(toc)
if err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage.go b/vendor/github.com/containers/storage/pkg/chunked/storage.go
index 9212cbbcff8..f0bd36273e6 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage.go
@@ -1,7 +1,6 @@
package chunked
import (
- "fmt"
"io"
)
@@ -22,5 +21,5 @@ type ErrBadRequest struct {
}
func (e ErrBadRequest) Error() string {
- return fmt.Sprintf("bad request")
+ return "bad request"
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
index 7bd804c4484..83d6e2f88c9 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -4,15 +4,17 @@ import (
archivetar "archive/tar"
"context"
"encoding/base64"
- "encoding/json"
+ "errors"
"fmt"
+ "hash"
"io"
- "io/ioutil"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
+ "sync"
+ "sync/atomic"
"syscall"
"time"
@@ -25,10 +27,10 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/types"
+ securejoin "github.com/cyphar/filepath-securejoin"
"github.com/klauspost/compress/zstd"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/archive/tar"
"golang.org/x/sys/unix"
@@ -41,24 +43,35 @@ const (
bigDataKey = "zstd-chunked-manifest"
fileTypeZstdChunked = iota
- fileTypeEstargz = iota
+ fileTypeEstargz
+ fileTypeNoCompression
+ fileTypeHole
+
+ copyGoRoutines = 32
)
type compressedFileType int
type chunkedDiffer struct {
- stream ImageSourceSeekable
- manifest []byte
- layersMetadata map[string][]internal.FileMetadata
- layersTarget map[string]string
- tocOffset int64
- fileType compressedFileType
+ stream ImageSourceSeekable
+ manifest []byte
+ layersCache *layersCache
+ tocOffset int64
+ fileType compressedFileType
+
+ copyBuffer []byte
gzipReader *pgzip.Reader
+ zstdReader *zstd.Decoder
+ rawReader io.Reader
+}
+
+var xattrsToIgnore = map[string]interface{}{
+ "security.selinux": true,
}
-func timeToTimespec(time time.Time) (ts unix.Timespec) {
- if time.IsZero() {
+func timeToTimespec(time *time.Time) (ts unix.Timespec) {
+ if time == nil || time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = ((1 << 30) - 2)
@@ -67,11 +80,29 @@ func timeToTimespec(time time.Time) (ts unix.Timespec) {
return unix.NsecToTimespec(time.UnixNano())
}
+func doHardLink(srcFd int, destDirFd int, destBase string) error {
+ doLink := func() error {
+ // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses
+ // /proc/self/fd doesn't and can be used with rootless.
+ srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd)
+ return unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW)
+ }
+
+ err := doLink()
+
+ // if the destination exists, unlink it first and try again
+ if err != nil && os.IsExist(err) {
+ unix.Unlinkat(destDirFd, destBase, 0)
+ return doLink()
+ }
+ return err
+}
+
func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) {
src := fmt.Sprintf("/proc/self/fd/%d", srcFd)
st, err := os.Stat(src)
if err != nil {
- return nil, -1, err
+ return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err)
}
copyWithFileRange, copyWithFileClone := true, true
@@ -83,20 +114,7 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us
if err == nil {
defer destDir.Close()
- doLink := func() error {
- // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses
- // /proc/self/fd doesn't and can be used with rootless.
- srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd)
- return unix.Linkat(unix.AT_FDCWD, srcPath, int(destDir.Fd()), destBase, unix.AT_SYMLINK_FOLLOW)
- }
-
- err := doLink()
-
- // if the destination exists, unlink it first and try again
- if err != nil && os.IsExist(err) {
- unix.Unlinkat(int(destDir.Fd()), destBase, 0)
- err = doLink()
- }
+ err := doHardLink(srcFd, int(destDir.Fd()), destBase)
if err == nil {
return nil, st.Size(), nil
}
@@ -106,63 +124,15 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us
// If the destination file already exists, we shouldn't blow it away
dstFile, err := openFileUnderRoot(destFile, dirfd, newFileFlags, mode)
if err != nil {
- return nil, -1, err
+ return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err)
}
err = driversCopy.CopyRegularToFile(src, dstFile, st, ©WithFileRange, ©WithFileClone)
if err != nil {
dstFile.Close()
- return nil, -1, err
+ return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err)
}
- return dstFile, st.Size(), err
-}
-
-func prepareOtherLayersCache(layersMetadata map[string][]internal.FileMetadata) map[string]map[string][]*internal.FileMetadata {
- maps := make(map[string]map[string][]*internal.FileMetadata)
-
- for layerID, v := range layersMetadata {
- r := make(map[string][]*internal.FileMetadata)
- for i := range v {
- if v[i].Digest != "" {
- r[v[i].Digest] = append(r[v[i].Digest], &v[i])
- }
- }
- maps[layerID] = r
- }
- return maps
-}
-
-func getLayersCache(store storage.Store) (map[string][]internal.FileMetadata, map[string]string, error) {
- allLayers, err := store.Layers()
- if err != nil {
- return nil, nil, err
- }
-
- layersMetadata := make(map[string][]internal.FileMetadata)
- layersTarget := make(map[string]string)
- for _, r := range allLayers {
- manifestReader, err := store.LayerBigData(r.ID, bigDataKey)
- if err != nil {
- continue
- }
- defer manifestReader.Close()
- manifest, err := ioutil.ReadAll(manifestReader)
- if err != nil {
- return nil, nil, err
- }
- var toc internal.TOC
- if err := json.Unmarshal(manifest, &toc); err != nil {
- continue
- }
- layersMetadata[r.ID] = toc.Entries
- target, err := store.DifferTarget(r.ID)
- if err != nil {
- return nil, nil, err
- }
- layersTarget[r.ID] = target
- }
-
- return layersMetadata, layersTarget, nil
+ return dstFile, st.Size(), nil
}
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
@@ -179,67 +149,71 @@ func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotat
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) {
manifest, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
}
- layersMetadata, layersTarget, err := getLayersCache(store)
+ layersCache, err := getLayersCache(store)
if err != nil {
return nil, err
}
return &chunkedDiffer{
- stream: iss,
- manifest: manifest,
- layersMetadata: layersMetadata,
- layersTarget: layersTarget,
- tocOffset: tocOffset,
- fileType: fileTypeZstdChunked,
+ copyBuffer: makeCopyBuffer(),
+ stream: iss,
+ manifest: manifest,
+ layersCache: layersCache,
+ tocOffset: tocOffset,
+ fileType: fileTypeZstdChunked,
}, nil
}
func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) {
manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
}
- layersMetadata, layersTarget, err := getLayersCache(store)
+ layersCache, err := getLayersCache(store)
if err != nil {
return nil, err
}
return &chunkedDiffer{
- stream: iss,
- manifest: manifest,
- layersMetadata: layersMetadata,
- layersTarget: layersTarget,
- tocOffset: tocOffset,
- fileType: fileTypeEstargz,
+ copyBuffer: makeCopyBuffer(),
+ stream: iss,
+ manifest: manifest,
+ layersCache: layersCache,
+ tocOffset: tocOffset,
+ fileType: fileTypeEstargz,
}, nil
}
+func makeCopyBuffer() []byte {
+ return make([]byte, 2<<20)
+}
+
// copyFileFromOtherLayer copies a file from another layer
// file is the file to look for.
// source is the path to the source layer checkout.
-// otherFile contains the metadata for the file.
+// name is the path to the file to copy in source.
// dirfd is an open file descriptor to the destination root directory.
// useHardLinks defines whether the deduplication can be performed using hard links.
-func copyFileFromOtherLayer(file *internal.FileMetadata, source string, otherFile *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
+func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0)
if err != nil {
- return false, nil, 0, err
+ return false, nil, 0, fmt.Errorf("open source file: %w", err)
}
defer unix.Close(srcDirfd)
- srcFile, err := openFileUnderRoot(otherFile.Name, srcDirfd, unix.O_RDONLY, 0)
+ srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0)
if err != nil {
- return false, nil, 0, err
+ return false, nil, 0, fmt.Errorf("open source file under target rootfs: %w", err)
}
defer srcFile.Close()
dstFile, written, err := copyFileContent(int(srcFile.Fd()), file.Name, dirfd, 0, useHardLinks)
if err != nil {
- return false, nil, 0, err
+ return false, nil, 0, fmt.Errorf("copy content to %q: %w", file.Name, err)
}
- return true, dstFile, written, err
+ return true, dstFile, written, nil
}
// canDedupMetadataWithHardLink says whether it is possible to deduplicate file with otherFile.
@@ -275,10 +249,6 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo
return false
}
- xattrsToIgnore := map[string]interface{}{
- "security.selinux": true,
- }
-
xattrs := make(map[string]string)
for _, x := range listXattrs {
v, err := system.Lgetxattr(path, x)
@@ -301,117 +271,67 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo
return canDedupMetadataWithHardLink(file, &otherFile)
}
-// findFileInOtherLayers finds the specified file in other layers.
+// findFileInOSTreeRepos checks whether the requested file already exist in one of the OSTree repo and copies the file content from there if possible.
// file is the file to look for.
-// dirfd is an open file descriptor to the checkout root directory.
-// layersMetadata contains the metadata for each layer in the storage.
-// layersTarget maps each layer to its checkout on disk.
+// ostreeRepos is a list of OSTree repos.
+// dirfd is an open fd to the destination checkout.
// useHardLinks defines whether the deduplication can be performed using hard links.
-func findFileInOtherLayers(file *internal.FileMetadata, dirfd int, layersMetadata map[string]map[string][]*internal.FileMetadata, layersTarget map[string]string, useHardLinks bool) (bool, *os.File, int64, error) {
- // this is ugly, needs to be indexed
- for layerID, checksums := range layersMetadata {
- source, ok := layersTarget[layerID]
- if !ok {
+func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
+ digest, err := digest.Parse(file.Digest)
+ if err != nil {
+ return false, nil, 0, nil
+ }
+ payloadLink := digest.Encoded() + ".payload-link"
+ if len(payloadLink) < 2 {
+ return false, nil, 0, nil
+ }
+
+ for _, repo := range ostreeRepos {
+ sourceFile := filepath.Join(repo, "objects", payloadLink[:2], payloadLink[2:])
+ st, err := os.Stat(sourceFile)
+ if err != nil || !st.Mode().IsRegular() {
continue
}
- files, found := checksums[file.Digest]
- if !found {
+ if st.Size() != file.Size {
continue
}
- for _, candidate := range files {
- // check if it is a valid candidate to dedup file
- if useHardLinks && !canDedupMetadataWithHardLink(file, candidate) {
- continue
- }
+ fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0)
+ if err != nil {
+ return false, nil, 0, nil
+ }
+ f := os.NewFile(uintptr(fd), "fd")
+ defer f.Close()
- found, dstFile, written, err := copyFileFromOtherLayer(file, source, candidate, dirfd, useHardLinks)
- if found && err == nil {
- return found, dstFile, written, err
- }
+ // check if the open file can be deduplicated with hard links
+ if useHardLinks && !canDedupFileWithHardLink(file, fd, st) {
+ continue
}
+
+ dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks)
+ if err != nil {
+ return false, nil, 0, nil
+ }
+ return true, dstFile, written, nil
}
// If hard links deduplication was used and it has failed, try again without hard links.
if useHardLinks {
- return findFileInOtherLayers(file, dirfd, layersMetadata, layersTarget, false)
+ return findFileInOSTreeRepos(file, ostreeRepos, dirfd, false)
}
- return false, nil, 0, nil
-}
-func getFileDigest(f *os.File) (digest.Digest, error) {
- digester := digest.Canonical.Digester()
- if _, err := io.Copy(digester.Hash(), f); err != nil {
- return "", err
- }
- return digester.Digest(), nil
+ return false, nil, 0, nil
}
-// findFileOnTheHost checks whether the requested file already exist on the host and copies the file content from there if possible.
-// It is currently implemented to look only at the file with the same path. Ideally it can detect the same content also at different
-// paths.
+// findFileInOtherLayers finds the specified file in other layers.
+// cache is the layers cache to use.
// file is the file to look for.
-// dirfd is an open fd to the destination checkout.
+// dirfd is an open file descriptor to the checkout root directory.
// useHardLinks defines whether the deduplication can be performed using hard links.
-func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
- sourceFile := filepath.Clean(filepath.Join("/", file.Name))
- if !strings.HasPrefix(sourceFile, "/usr/") {
- // limit host deduplication to files under /usr.
- return false, nil, 0, nil
- }
-
- st, err := os.Stat(sourceFile)
- if err != nil || !st.Mode().IsRegular() {
- return false, nil, 0, nil
- }
-
- if st.Size() != file.Size {
- return false, nil, 0, nil
- }
-
- fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0)
- if err != nil {
- return false, nil, 0, nil
- }
-
- f := os.NewFile(uintptr(fd), "fd")
- defer f.Close()
-
- manifestChecksum, err := digest.Parse(file.Digest)
- if err != nil {
+func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
+ target, name, err := cache.findFileInOtherLayers(file, useHardLinks)
+ if err != nil || name == "" {
return false, nil, 0, err
}
-
- checksum, err := getFileDigest(f)
- if err != nil {
- return false, nil, 0, err
- }
-
- if checksum != manifestChecksum {
- return false, nil, 0, nil
- }
-
- // check if the open file can be deduplicated with hard links
- useHardLinks = useHardLinks && canDedupFileWithHardLink(file, fd, st)
-
- dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks)
- if err != nil {
- return false, nil, 0, nil
- }
-
- // calculate the checksum again to make sure the file wasn't modified while it was copied
- if _, err := f.Seek(0, 0); err != nil {
- dstFile.Close()
- return false, nil, 0, err
- }
- checksum, err = getFileDigest(f)
- if err != nil {
- dstFile.Close()
- return false, nil, 0, err
- }
- if checksum != manifestChecksum {
- dstFile.Close()
- return false, nil, 0, nil
- }
- return true, dstFile, written, nil
+ return copyFileFromOtherLayer(file, target, name, dirfd, useHardLinks)
}
func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error {
@@ -440,24 +360,52 @@ func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOption
return nil
}
-type missingFile struct {
- File *internal.FileMetadata
+type originFile struct {
+ Root string
+ Path string
+ Offset int64
+}
+
+type missingFileChunk struct {
Gap int64
+ Hole bool
+
+ File *internal.FileMetadata
+
+ CompressedSize int64
+ UncompressedSize int64
}
-func (m missingFile) Length() int64 {
- return m.File.EndOffset - m.File.Offset
+type missingPart struct {
+ Hole bool
+ SourceChunk *ImageSourceChunk
+ OriginFile *originFile
+ Chunks []missingFileChunk
}
-type missingChunk struct {
- RawChunk ImageSourceChunk
- Files []missingFile
+func (o *originFile) OpenFile() (io.ReadCloser, error) {
+ srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY, 0)
+ if err != nil {
+ return nil, fmt.Errorf("open source file: %w", err)
+ }
+ defer unix.Close(srcDirfd)
+
+ srcFile, err := openFileUnderRoot(o.Path, srcDirfd, unix.O_RDONLY, 0)
+ if err != nil {
+ return nil, fmt.Errorf("open source file under target rootfs: %w", err)
+ }
+
+ if _, err := srcFile.Seek(o.Offset, 0); err != nil {
+ srcFile.Close()
+ return nil, err
+ }
+ return srcFile, nil
}
// setFileAttrs sets the file attributes for file given metadata
-func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error {
+func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions, usePath bool) error {
if file == nil || file.Fd() < 0 {
- return errors.Errorf("invalid file")
+ return errors.New("invalid file")
}
fd := int(file.Fd())
@@ -465,211 +413,611 @@ func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.FileMetada
if err != nil {
return err
}
+
+ // If it is a symlink, force to use the path
if t == tar.TypeSymlink {
- return nil
+ usePath = true
+ }
+
+ baseName := ""
+ if usePath {
+ dirName := filepath.Dir(metadata.Name)
+ if dirName != "" {
+ parentFd, err := openFileUnderRoot(dirName, dirfd, unix.O_PATH|unix.O_DIRECTORY, 0)
+ if err != nil {
+ return err
+ }
+ defer parentFd.Close()
+
+ dirfd = int(parentFd.Fd())
+ }
+ baseName = filepath.Base(metadata.Name)
+ }
+
+ doChown := func() error {
+ if usePath {
+ return unix.Fchownat(dirfd, baseName, metadata.UID, metadata.GID, unix.AT_SYMLINK_NOFOLLOW)
+ }
+ return unix.Fchown(fd, metadata.UID, metadata.GID)
+ }
+
+ doSetXattr := func(k string, v []byte) error {
+ return unix.Fsetxattr(fd, k, v, 0)
+ }
+
+ doUtimes := func() error {
+ ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)}
+ if usePath {
+ return unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW)
+ }
+ return unix.UtimesNanoAt(unix.AT_FDCWD, fmt.Sprintf("/proc/self/fd/%d", fd), ts, 0)
+ }
+
+ doChmod := func() error {
+ if usePath {
+ return unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW)
+ }
+ return unix.Fchmod(fd, uint32(mode))
}
- if err := unix.Fchown(fd, metadata.UID, metadata.GID); err != nil {
+ if err := doChown(); err != nil {
if !options.IgnoreChownErrors {
- return err
+ return fmt.Errorf("chown %q to %d:%d: %w", metadata.Name, metadata.UID, metadata.GID, err)
}
}
+ canIgnore := func(err error) bool {
+ return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP)
+ }
+
for k, v := range metadata.Xattrs {
+ if _, found := xattrsToIgnore[k]; found {
+ continue
+ }
data, err := base64.StdEncoding.DecodeString(v)
if err != nil {
- return err
+ return fmt.Errorf("decode xattr %q: %w", v, err)
}
- if err := unix.Fsetxattr(fd, k, data, 0); err != nil {
- return err
+ if err := doSetXattr(k, data); !canIgnore(err) {
+ return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err)
}
}
- ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)}
- if err := unix.UtimesNanoAt(fd, "", ts, 0); err != nil && errors.Is(err, unix.ENOSYS) {
- return err
+ if err := doUtimes(); !canIgnore(err) {
+ return fmt.Errorf("set utimes for %q: %w", metadata.Name, err)
}
- if err := unix.Fchmod(fd, uint32(mode)); err != nil {
- return err
+ if err := doChmod(); !canIgnore(err) {
+ return fmt.Errorf("chmod %q: %w", metadata.Name, err)
}
return nil
}
-// openFileUnderRoot safely opens a file under the specified root directory using openat2
-// name is the path to open relative to dirfd.
-// dirfd is an open file descriptor to the target checkout directory.
-// flags are the flags top pass to the open syscall.
-// mode specifies the mode to use for newly created files.
-func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) {
+func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
+ root := fmt.Sprintf("/proc/self/fd/%d", dirfd)
+
+ targetRoot, err := os.Readlink(root)
+ if err != nil {
+ return -1, err
+ }
+
+ hasNoFollow := (flags & unix.O_NOFOLLOW) != 0
+
+ fd := -1
+ // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the
+ // last component as the path to openat().
+ if hasNoFollow {
+ dirName := filepath.Dir(name)
+ if dirName != "" {
+ newRoot, err := securejoin.SecureJoin(root, filepath.Dir(name))
+ if err != nil {
+ return -1, err
+ }
+ root = newRoot
+ }
+
+ parentDirfd, err := unix.Open(root, unix.O_PATH, 0)
+ if err != nil {
+ return -1, err
+ }
+ defer unix.Close(parentDirfd)
+
+ fd, err = unix.Openat(parentDirfd, filepath.Base(name), int(flags), uint32(mode))
+ if err != nil {
+ return -1, err
+ }
+ } else {
+ newPath, err := securejoin.SecureJoin(root, name)
+ if err != nil {
+ return -1, err
+ }
+ fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode))
+ if err != nil {
+ return -1, err
+ }
+ }
+
+ target, err := os.Readlink(fmt.Sprintf("/proc/self/fd/%d", fd))
+ if err != nil {
+ unix.Close(fd)
+ return -1, err
+ }
+
+ // Add an additional check to make sure the opened fd is inside the rootfs
+ if !strings.HasPrefix(target, targetRoot) {
+ unix.Close(fd)
+ return -1, fmt.Errorf("while resolving %q. It resolves outside the root directory", name)
+ }
+
+ return fd, err
+}
+
+func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
how := unix.OpenHow{
Flags: flags,
Mode: uint64(mode & 07777),
Resolve: unix.RESOLVE_IN_ROOT,
}
+ return unix.Openat2(dirfd, name, &how)
+}
- fd, err := unix.Openat2(dirfd, name, &how)
- if err != nil {
- return nil, err
+// skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid
+// using it again.
+var skipOpenat2 int32
+
+// openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a
+// userspace lookup.
+func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
+ var fd int
+ var err error
+ if atomic.LoadInt32(&skipOpenat2) > 0 {
+ fd, err = openFileUnderRootFallback(dirfd, name, flags, mode)
+ } else {
+ fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode)
+ // If the function failed with ENOSYS, switch off the support for openat2
+ // and fallback to using safejoin.
+ if err != nil && errors.Is(err, unix.ENOSYS) {
+ atomic.StoreInt32(&skipOpenat2, 1)
+ fd, err = openFileUnderRootFallback(dirfd, name, flags, mode)
+ }
}
- return os.NewFile(uintptr(fd), name), nil
+ return fd, err
}
-func (c *chunkedDiffer) createFileFromCompressedStream(dest string, dirfd int, reader io.Reader, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) (err error) {
- file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0)
- if err != nil {
- return err
- }
- defer func() {
- err2 := file.Close()
- if err == nil {
- err = err2
+// openFileUnderRoot safely opens a file under the specified root directory using openat2
+// name is the path to open relative to dirfd.
+// dirfd is an open file descriptor to the target checkout directory.
+// flags are the flags to pass to the open syscall.
+// mode specifies the mode to use for newly created files.
+func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) {
+ fd, err := openFileUnderRootRaw(dirfd, name, flags, mode)
+ if err == nil {
+ return os.NewFile(uintptr(fd), name), nil
+ }
+
+ hasCreate := (flags & unix.O_CREAT) != 0
+ if errors.Is(err, unix.ENOENT) && hasCreate {
+ parent := filepath.Dir(name)
+ if parent != "" {
+ newDirfd, err2 := openOrCreateDirUnderRoot(parent, dirfd, 0)
+ if err2 == nil {
+ defer newDirfd.Close()
+ fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode)
+ if err == nil {
+ return os.NewFile(uintptr(fd), name), nil
+ }
+ }
}
- }()
+ }
+ return nil, fmt.Errorf("open %q under the rootfs: %w", name, err)
+}
- digester := digest.Canonical.Digester()
- checksum := digester.Hash()
- to := io.MultiWriter(file, checksum)
+// openOrCreateDirUnderRoot safely opens a directory or create it if it is missing.
+// name is the path to open relative to dirfd.
+// dirfd is an open file descriptor to the target checkout directory.
+// mode specifies the mode to use for newly created files.
+func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.File, error) {
+ fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, mode)
+ if err == nil {
+ return os.NewFile(uintptr(fd), name), nil
+ }
+
+ if errors.Is(err, unix.ENOENT) {
+ parent := filepath.Dir(name)
+ if parent != "" {
+ pDir, err2 := openOrCreateDirUnderRoot(parent, dirfd, mode)
+ if err2 != nil {
+ return nil, err
+ }
+ defer pDir.Close()
- switch c.fileType {
- case fileTypeZstdChunked:
- z, err := zstd.NewReader(reader)
- if err != nil {
- return err
- }
- defer z.Close()
+ baseName := filepath.Base(name)
- if _, err := io.Copy(to, io.LimitReader(z, metadata.Size)); err != nil {
- return err
+ if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0755); err2 != nil {
+ return nil, err
+ }
+
+ fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, mode)
+ if err == nil {
+ return os.NewFile(uintptr(fd), name), nil
+ }
}
- if _, err := io.Copy(ioutil.Discard, reader); err != nil {
- return err
+ }
+ return nil, err
+}
+
+func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, from io.Reader, mf *missingFileChunk) (compressedFileType, error) {
+ switch {
+ case partCompression == fileTypeHole:
+ // The entire part is a hole. Do not need to read from a file.
+ c.rawReader = nil
+ return fileTypeHole, nil
+ case mf.Hole:
+ // Only the missing chunk in the requested part refers to a hole.
+ // The received data must be discarded.
+ limitReader := io.LimitReader(from, mf.CompressedSize)
+ _, err := io.CopyBuffer(io.Discard, limitReader, c.copyBuffer)
+ return fileTypeHole, err
+ case partCompression == fileTypeZstdChunked:
+ c.rawReader = io.LimitReader(from, mf.CompressedSize)
+ if c.zstdReader == nil {
+ r, err := zstd.NewReader(c.rawReader)
+ if err != nil {
+ return partCompression, err
+ }
+ c.zstdReader = r
+ } else {
+ if err := c.zstdReader.Reset(c.rawReader); err != nil {
+ return partCompression, err
+ }
}
- case fileTypeEstargz:
+ case partCompression == fileTypeEstargz:
+ c.rawReader = io.LimitReader(from, mf.CompressedSize)
if c.gzipReader == nil {
- r, err := pgzip.NewReader(reader)
+ r, err := pgzip.NewReader(c.rawReader)
if err != nil {
- return err
+ return partCompression, err
}
c.gzipReader = r
} else {
- if err := c.gzipReader.Reset(reader); err != nil {
- return err
+ if err := c.gzipReader.Reset(c.rawReader); err != nil {
+ return partCompression, err
}
}
- defer c.gzipReader.Close()
+ case partCompression == fileTypeNoCompression:
+ c.rawReader = io.LimitReader(from, mf.UncompressedSize)
+ default:
+ return partCompression, fmt.Errorf("unknown file type %q", c.fileType)
+ }
+ return partCompression, nil
+}
+
+// hashHole writes SIZE zeros to the specified hasher
+func hashHole(h hash.Hash, size int64, copyBuffer []byte) error {
+ count := int64(len(copyBuffer))
+ if size < count {
+ count = size
+ }
+ for i := int64(0); i < count; i++ {
+ copyBuffer[i] = 0
+ }
+ for size > 0 {
+ count = int64(len(copyBuffer))
+ if size < count {
+ count = size
+ }
+ if _, err := h.Write(copyBuffer[:count]); err != nil {
+ return err
+ }
+ size -= count
+ }
+ return nil
+}
+
+// appendHole creates a hole with the specified size at the open fd.
+func appendHole(fd int, size int64) error {
+ off, err := unix.Seek(fd, size, unix.SEEK_CUR)
+ if err != nil {
+ return err
+ }
+ // Make sure the file size is changed. It might be the last hole and no other data written afterwards.
+ if err := unix.Ftruncate(fd, off); err != nil {
+ return err
+ }
+ return nil
+}
- if _, err := io.Copy(to, io.LimitReader(c.gzipReader, metadata.Size)); err != nil {
+func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error {
+ switch compression {
+ case fileTypeZstdChunked:
+ defer c.zstdReader.Reset(nil)
+ if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil {
+ return err
+ }
+ case fileTypeEstargz:
+ defer c.gzipReader.Close()
+ if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.gzipReader, size), c.copyBuffer); err != nil {
+ return err
+ }
+ case fileTypeNoCompression:
+ if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.rawReader, size), c.copyBuffer); err != nil {
+ return err
+ }
+ case fileTypeHole:
+ if err := appendHole(int(destFile.file.Fd()), size); err != nil {
return err
}
- if _, err := io.Copy(ioutil.Discard, reader); err != nil {
+ if err := hashHole(destFile.hash, size, c.copyBuffer); err != nil {
return err
}
default:
return fmt.Errorf("unknown file type %q", c.fileType)
}
+ return nil
+}
+
+type destinationFile struct {
+ dirfd int
+ file *os.File
+ digester digest.Digester
+ hash hash.Hash
+ to io.Writer
+ metadata *internal.FileMetadata
+ options *archive.TarOptions
+}
+
+func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions) (*destinationFile, error) {
+ file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ digester := digest.Canonical.Digester()
+ hash := digester.Hash()
+ to := io.MultiWriter(file, hash)
+
+ return &destinationFile{
+ file: file,
+ digester: digester,
+ hash: hash,
+ to: to,
+ metadata: metadata,
+ options: options,
+ dirfd: dirfd,
+ }, nil
+}
- manifestChecksum, err := digest.Parse(metadata.Digest)
+func (d *destinationFile) Close() error {
+ manifestChecksum, err := digest.Parse(d.metadata.Digest)
if err != nil {
return err
}
- if digester.Digest() != manifestChecksum {
- return fmt.Errorf("checksum mismatch for %q", dest)
+ if d.digester.Digest() != manifestChecksum {
+ return fmt.Errorf("checksum mismatch for %q (got %q instead of %q)", d.file.Name(), d.digester.Digest(), manifestChecksum)
}
- return setFileAttrs(file, mode, metadata, options)
+
+ return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false)
}
-func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error {
- for mc := 0; ; mc++ {
- var part io.ReadCloser
- select {
- case p := <-streams:
- part = p
- case err := <-errs:
- return err
- }
- if part == nil {
- if mc == len(missingChunks) {
- break
+func closeDestinationFiles(files chan *destinationFile, errors chan error) {
+ for f := range files {
+ errors <- f.Close()
+ }
+ close(errors)
+}
+
+func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) {
+ var destFile *destinationFile
+
+ filesToClose := make(chan *destinationFile, 3)
+ closeFilesErrors := make(chan error, 2)
+
+ go closeDestinationFiles(filesToClose, closeFilesErrors)
+ defer func() {
+ close(filesToClose)
+ for e := range closeFilesErrors {
+ if e != nil && Err == nil {
+ Err = e
}
- return errors.Errorf("invalid stream returned")
}
- if mc == len(missingChunks) {
- part.Close()
- return errors.Errorf("too many chunks returned")
+ }()
+
+ for _, missingPart := range missingParts {
+ var part io.ReadCloser
+ partCompression := c.fileType
+ switch {
+ case missingPart.Hole:
+ partCompression = fileTypeHole
+ case missingPart.OriginFile != nil:
+ var err error
+ part, err = missingPart.OriginFile.OpenFile()
+ if err != nil {
+ return err
+ }
+ partCompression = fileTypeNoCompression
+ case missingPart.SourceChunk != nil:
+ select {
+ case p := <-streams:
+ part = p
+ case err := <-errs:
+ if err == nil {
+ return errors.New("not enough data returned from the server")
+ }
+ return err
+ }
+ if part == nil {
+ return errors.New("invalid stream returned")
+ }
+ default:
+ return errors.New("internal error: missing part misses both local and remote data stream")
}
- for _, mf := range missingChunks[mc].Files {
+ for _, mf := range missingPart.Chunks {
if mf.Gap > 0 {
limitReader := io.LimitReader(part, mf.Gap)
- _, err := io.Copy(ioutil.Discard, limitReader)
+ _, err := io.CopyBuffer(io.Discard, limitReader, c.copyBuffer)
if err != nil {
- part.Close()
- return err
+ Err = err
+ goto exit
}
continue
}
- limitReader := io.LimitReader(part, mf.Length())
+ if mf.File.Name == "" {
+ Err = errors.New("file name empty")
+ goto exit
+ }
- if err := c.createFileFromCompressedStream(dest, dirfd, limitReader, os.FileMode(mf.File.Mode), mf.File, options); err != nil {
- part.Close()
- return err
+ compression, err := c.prepareCompressedStreamToFile(partCompression, part, &mf)
+ if err != nil {
+ Err = err
+ goto exit
+ }
+
+ // Open the new file if it is different that what is already
+ // opened
+ if destFile == nil || destFile.metadata.Name != mf.File.Name {
+ var err error
+ if destFile != nil {
+ cleanup:
+ for {
+ select {
+ case err = <-closeFilesErrors:
+ if err != nil {
+ Err = err
+ goto exit
+ }
+ default:
+ break cleanup
+ }
+ }
+ filesToClose <- destFile
+ }
+ destFile, err = openDestinationFile(dirfd, mf.File, options)
+ if err != nil {
+ Err = err
+ goto exit
+ }
+ }
+
+ if err := c.appendCompressedStreamToFile(compression, destFile, mf.UncompressedSize); err != nil {
+ Err = err
+ goto exit
+ }
+ if c.rawReader != nil {
+ if _, err := io.CopyBuffer(io.Discard, c.rawReader, c.copyBuffer); err != nil {
+ Err = err
+ goto exit
+ }
+ }
+ }
+ exit:
+ if part != nil {
+ part.Close()
+ if Err != nil {
+ break
}
}
- part.Close()
}
+
+ if destFile != nil {
+ return destFile.Close()
+ }
+
return nil
}
-func mergeMissingChunks(missingChunks []missingChunk, target int) []missingChunk {
- if len(missingChunks) <= target {
- return missingChunks
+func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
+ getGap := func(missingParts []missingPart, i int) int {
+ prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length
+ return int(missingParts[i].SourceChunk.Offset - prev)
}
+ getCost := func(missingParts []missingPart, i int) int {
+ cost := getGap(missingParts, i)
+ if missingParts[i-1].OriginFile != nil {
+ cost += int(missingParts[i-1].SourceChunk.Length)
+ }
+ if missingParts[i].OriginFile != nil {
+ cost += int(missingParts[i].SourceChunk.Length)
+ }
+ return cost
+ }
+
+ // simple case: merge chunks from the same file.
+ newMissingParts := missingParts[0:1]
+ prevIndex := 0
+ for i := 1; i < len(missingParts); i++ {
+ gap := getGap(missingParts, i)
+ if gap == 0 && missingParts[prevIndex].OriginFile == nil &&
+ missingParts[i].OriginFile == nil &&
+ !missingParts[prevIndex].Hole && !missingParts[i].Hole &&
+ len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 &&
+ missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name {
+ missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length
+ missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize
+ missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize
+ } else {
+ newMissingParts = append(newMissingParts, missingParts[i])
+ prevIndex++
+ }
+ }
+ missingParts = newMissingParts
- getGap := func(missingChunks []missingChunk, i int) int {
- prev := missingChunks[i-1].RawChunk.Offset + missingChunks[i-1].RawChunk.Length
- return int(missingChunks[i].RawChunk.Offset - prev)
+ if len(missingParts) <= target {
+ return missingParts
}
// this implementation doesn't account for duplicates, so it could merge
// more than necessary to reach the specified target. Since target itself
// is a heuristic value, it doesn't matter.
- var gaps []int
- for i := 1; i < len(missingChunks); i++ {
- gaps = append(gaps, getGap(missingChunks, i))
+ costs := make([]int, len(missingParts)-1)
+ for i := 1; i < len(missingParts); i++ {
+ costs[i-1] = getCost(missingParts, i)
}
- sort.Ints(gaps)
+ sort.Ints(costs)
- toShrink := len(missingChunks) - target
- targetValue := gaps[toShrink-1]
+ toShrink := len(missingParts) - target
+ if toShrink >= len(costs) {
+ toShrink = len(costs) - 1
+ }
+ targetValue := costs[toShrink]
- newMissingChunks := missingChunks[0:1]
- for i := 1; i < len(missingChunks); i++ {
- gap := getGap(missingChunks, i)
- if gap > targetValue {
- newMissingChunks = append(newMissingChunks, missingChunks[i])
+ newMissingParts = missingParts[0:1]
+ for i := 1; i < len(missingParts); i++ {
+ if getCost(missingParts, i) > targetValue {
+ newMissingParts = append(newMissingParts, missingParts[i])
} else {
- prev := &newMissingChunks[len(newMissingChunks)-1]
- prev.RawChunk.Length += uint64(gap) + missingChunks[i].RawChunk.Length
+ gap := getGap(missingParts, i)
+ prev := &newMissingParts[len(newMissingParts)-1]
+ prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length
+ prev.Hole = false
+ prev.OriginFile = nil
if gap > 0 {
- gapFile := missingFile{
+ gapFile := missingFileChunk{
Gap: int64(gap),
}
- prev.Files = append(prev.Files, gapFile)
+ prev.Chunks = append(prev.Chunks, gapFile)
}
- prev.Files = append(prev.Files, missingChunks[i].Files...)
+ prev.Chunks = append(prev.Chunks, missingParts[i].Chunks...)
}
}
- return newMissingChunks
+ return newMissingParts
}
-func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error {
+func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error {
var chunksToRequest []ImageSourceChunk
- for _, c := range missingChunks {
- chunksToRequest = append(chunksToRequest, c.RawChunk)
+
+ calculateChunksToRequest := func() {
+ chunksToRequest = []ImageSourceChunk{}
+ for _, c := range missingParts {
+ if c.OriginFile == nil && !c.Hole {
+ chunksToRequest = append(chunksToRequest, *c.SourceChunk)
+ }
+ }
}
+ calculateChunksToRequest()
+
// There are some missing files. Prepare a multirange request for the missing chunks.
var streams chan io.ReadCloser
var err error
@@ -681,32 +1029,33 @@ func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingChun
}
if _, ok := err.(ErrBadRequest); ok {
- requested := len(missingChunks)
+ requested := len(missingParts)
// If the server cannot handle at least 64 chunks in a single request, just give up.
if requested < 64 {
return err
}
// Merge more chunks to request
- missingChunks = mergeMissingChunks(missingChunks, requested/2)
+ missingParts = mergeMissingChunks(missingParts, requested/2)
+ calculateChunksToRequest()
continue
}
return err
}
- if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingChunks, options); err != nil {
+ if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil {
return err
}
return nil
}
-func safeMkdir(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error {
- parent := filepath.Dir(metadata.Name)
- base := filepath.Base(metadata.Name)
+func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.FileMetadata, options *archive.TarOptions) error {
+ parent := filepath.Dir(name)
+ base := filepath.Base(name)
parentFd := dirfd
if parent != "." {
- parentFile, err := openFileUnderRoot(parent, dirfd, unix.O_DIRECTORY|unix.O_PATH|unix.O_RDONLY, 0)
+ parentFile, err := openOrCreateDirUnderRoot(parent, dirfd, 0)
if err != nil {
return err
}
@@ -716,21 +1065,21 @@ func safeMkdir(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opt
if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil {
if !os.IsExist(err) {
- return err
+ return fmt.Errorf("mkdir %q: %w", name, err)
}
}
- file, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_RDONLY, 0)
+ file, err := openFileUnderRoot(base, parentFd, unix.O_DIRECTORY|unix.O_RDONLY, 0)
if err != nil {
return err
}
defer file.Close()
- return setFileAttrs(file, mode, metadata, options)
+ return setFileAttrs(dirfd, file, mode, metadata, options, false)
}
func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error {
- sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_RDONLY, 0)
+ sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0)
if err != nil {
return err
}
@@ -739,7 +1088,7 @@ func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opti
destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
destDirFd := dirfd
if destDir != "." {
- f, err := openFileUnderRoot(destDir, dirfd, unix.O_RDONLY, 0)
+ f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0)
if err != nil {
return err
}
@@ -747,25 +1096,35 @@ func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opti
destDirFd = int(f.Fd())
}
- err = unix.Linkat(int(sourceFile.Fd()), "", destDirFd, destBase, unix.AT_EMPTY_PATH)
+ err = doHardLink(int(sourceFile.Fd()), destDirFd, destBase)
if err != nil {
- return err
+ return fmt.Errorf("create hardlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err)
}
- newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY, 0)
+ newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY|unix.O_NOFOLLOW, 0)
if err != nil {
+ // If the target is a symlink, open the file with O_PATH.
+ if errors.Is(err, unix.ELOOP) {
+ newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_PATH|unix.O_NOFOLLOW, 0)
+ if err != nil {
+ return err
+ }
+ defer newFile.Close()
+
+ return setFileAttrs(dirfd, newFile, mode, metadata, options, true)
+ }
return err
}
defer newFile.Close()
- return setFileAttrs(newFile, mode, metadata, options)
+ return setFileAttrs(dirfd, newFile, mode, metadata, options, false)
}
func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error {
destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
destDirFd := dirfd
if destDir != "." {
- f, err := openFileUnderRoot(destDir, dirfd, unix.O_RDONLY, 0)
+ f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0)
if err != nil {
return err
}
@@ -773,7 +1132,10 @@ func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, o
destDirFd = int(f.Fd())
}
- return unix.Symlinkat(metadata.Linkname, destDirFd, destBase)
+ if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil {
+ return fmt.Errorf("create symlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err)
+ }
+ return nil
}
type whiteoutHandler struct {
@@ -782,13 +1144,16 @@ type whiteoutHandler struct {
}
func (d whiteoutHandler) Setxattr(path, name string, value []byte) error {
- file, err := openFileUnderRoot(path, d.Dirfd, unix.O_RDONLY, 0)
+ file, err := openOrCreateDirUnderRoot(path, d.Dirfd, 0)
if err != nil {
return err
}
defer file.Close()
- return unix.Fsetxattr(int(file.Fd()), name, value, 0)
+ if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil {
+ return fmt.Errorf("set xattr %s=%q for %q: %w", name, value, path, err)
+ }
+ return nil
}
func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
@@ -797,7 +1162,7 @@ func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
dirfd := d.Dirfd
if dir != "" {
- dir, err := openFileUnderRoot(dir, d.Dirfd, unix.O_RDONLY, 0)
+ dir, err := openOrCreateDirUnderRoot(dir, d.Dirfd, 0)
if err != nil {
return err
}
@@ -806,12 +1171,16 @@ func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
dirfd = int(dir.Fd())
}
- return unix.Mknodat(dirfd, base, mode, dev)
+ if err := unix.Mknodat(dirfd, base, mode, dev); err != nil {
+ return fmt.Errorf("mknod %q: %w", path, err)
+ }
+
+ return nil
}
func checkChownErr(err error, name string, uid, gid int) error {
if errors.Is(err, syscall.EINVAL) {
- return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name)
+ return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err)
}
return err
}
@@ -849,7 +1218,56 @@ func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bo
return def
}
+type findAndCopyFileOptions struct {
+ useHardLinks bool
+ ostreeRepos []string
+ options *archive.TarOptions
+}
+
+func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) {
+ finalizeFile := func(dstFile *os.File) error {
+ if dstFile != nil {
+ defer dstFile.Close()
+ if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks)
+ if err != nil {
+ return false, err
+ }
+ if found {
+ if err := finalizeFile(dstFile); err != nil {
+ return false, err
+ }
+ return true, nil
+ }
+
+ found, dstFile, _, err = findFileInOSTreeRepos(r, copyOptions.ostreeRepos, dirfd, copyOptions.useHardLinks)
+ if err != nil {
+ return false, err
+ }
+ if found {
+ if err := finalizeFile(dstFile); err != nil {
+ return false, err
+ }
+ return true, nil
+ }
+
+ return false, nil
+}
+
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) {
+ defer c.layersCache.release()
+ defer func() {
+ if c.zstdReader != nil {
+ c.zstdReader.Close()
+ }
+ }()
+
bigData := map[string][]byte{
bigDataKey: c.manifest,
}
@@ -867,21 +1285,22 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
return output, errors.New("enable_partial_images not configured")
}
- enableHostDedup := parseBooleanPullOption(&storeOpts, "enable_host_deduplication", false)
-
// When the hard links deduplication is used, file attributes are ignored because setting them
// modifies the source file as well.
useHardLinks := parseBooleanPullOption(&storeOpts, "use_hard_links", false)
+ // List of OSTree repositories to use for deduplication
+ ostreeRepos := strings.Split(storeOpts.PullOptions["ostree_repos"], ":")
+
// Generate the manifest
- var toc internal.TOC
- if err := json.Unmarshal(c.manifest, &toc); err != nil {
+ toc, err := unmarshalToc(c.manifest)
+ if err != nil {
return output, err
}
whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
- var missingChunks []missingChunk
+ var missingParts []missingPart
mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries)
if err != nil {
@@ -903,17 +1322,60 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH, 0)
if err != nil {
- return output, err
+ return output, fmt.Errorf("cannot open %q: %w", dest, err)
}
defer unix.Close(dirfd)
- otherLayersCache := prepareOtherLayersCache(c.layersMetadata)
-
// hardlinks can point to missing files. So create them after all files
// are retrieved
var hardLinks []hardLinkToCreate
- missingChunksSize, totalChunksSize := int64(0), int64(0)
+ missingPartsSize, totalChunksSize := int64(0), int64(0)
+
+ copyOptions := findAndCopyFileOptions{
+ useHardLinks: useHardLinks,
+ ostreeRepos: ostreeRepos,
+ options: options,
+ }
+
+ type copyFileJob struct {
+ njob int
+ index int
+ mode os.FileMode
+ metadata *internal.FileMetadata
+
+ found bool
+ err error
+ }
+
+ var wg sync.WaitGroup
+
+ copyResults := make([]copyFileJob, len(mergedEntries))
+
+ copyFileJobs := make(chan copyFileJob)
+ defer func() {
+ if copyFileJobs != nil {
+ close(copyFileJobs)
+ }
+ wg.Wait()
+ }()
+
+ for i := 0; i < copyGoRoutines; i++ {
+ wg.Add(1)
+ jobs := copyFileJobs
+
+ go func() {
+ defer wg.Done()
+ for job := range jobs {
+ found, err := c.findAndCopyFile(dirfd, job.metadata, ©Options, job.mode)
+ job.err = err
+ job.found = found
+ copyResults[job.njob] = job
+ }
+ }()
+ }
+
+ filesToWaitFor := 0
for i, r := range mergedEntries {
if options.ForceMask != nil {
value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777)
@@ -963,7 +1425,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
return err
}
defer file.Close()
- if err := setFileAttrs(file, mode, &r, options); err != nil {
+ if err := setFileAttrs(dirfd, file, mode, &r, options, false); err != nil {
return err
}
return nil
@@ -975,7 +1437,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
}
case tar.TypeDir:
- if err := safeMkdir(dirfd, mode, &r, options); err != nil {
+ if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil {
return output, err
}
continue
@@ -1009,61 +1471,95 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
totalChunksSize += r.Size
- found, dstFile, _, err := findFileInOtherLayers(&r, dirfd, otherLayersCache, c.layersTarget, useHardLinks)
- if err != nil {
- return output, err
- }
- if dstFile != nil {
- if err := setFileAttrs(dstFile, mode, &r, options); err != nil {
- dstFile.Close()
- return output, err
+ if t == tar.TypeReg {
+ index := i
+ njob := filesToWaitFor
+ job := copyFileJob{
+ mode: mode,
+ metadata: &mergedEntries[index],
+ index: index,
+ njob: njob,
}
- dstFile.Close()
+ copyFileJobs <- job
+ filesToWaitFor++
+ }
+ }
+
+ close(copyFileJobs)
+ copyFileJobs = nil
+
+ wg.Wait()
+
+ for _, res := range copyResults[:filesToWaitFor] {
+ r := &mergedEntries[res.index]
+
+ if res.err != nil {
+ return output, res.err
}
- if found {
+ // the file was already copied to its destination
+ // so nothing left to do.
+ if res.found {
continue
}
- if enableHostDedup {
- found, dstFile, _, err = findFileOnTheHost(&r, dirfd, useHardLinks)
- if err != nil {
- return output, err
- }
- if dstFile != nil {
- if err := setFileAttrs(dstFile, mode, &r, options); err != nil {
- dstFile.Close()
- return output, err
- }
- dstFile.Close()
- }
- if found {
- continue
+ missingPartsSize += r.Size
+
+ remainingSize := r.Size
+
+ // the file is missing, attempt to find individual chunks.
+ for _, chunk := range r.Chunks {
+ compressedSize := int64(chunk.EndOffset - chunk.Offset)
+ size := remainingSize
+ if chunk.ChunkSize > 0 {
+ size = chunk.ChunkSize
}
- }
+ remainingSize = remainingSize - size
- missingChunksSize += r.Size
- if t == tar.TypeReg {
rawChunk := ImageSourceChunk{
- Offset: uint64(r.Offset),
- Length: uint64(r.EndOffset - r.Offset),
+ Offset: uint64(chunk.Offset),
+ Length: uint64(compressedSize),
}
-
- file := missingFile{
- File: &mergedEntries[i],
+ file := missingFileChunk{
+ File: &mergedEntries[res.index],
+ CompressedSize: compressedSize,
+ UncompressedSize: size,
}
-
- missingChunks = append(missingChunks, missingChunk{
- RawChunk: rawChunk,
- Files: []missingFile{
+ mp := missingPart{
+ SourceChunk: &rawChunk,
+ Chunks: []missingFileChunk{
file,
},
- })
+ }
+
+ switch chunk.ChunkType {
+ case internal.ChunkTypeData:
+ root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk)
+ if err != nil {
+ return output, err
+ }
+ if offset >= 0 && validateChunkChecksum(chunk, root, path, offset, c.copyBuffer) {
+ missingPartsSize -= size
+ mp.OriginFile = &originFile{
+ Root: root,
+ Path: path,
+ Offset: offset,
+ }
+ }
+ case internal.ChunkTypeZeros:
+ missingPartsSize -= size
+ mp.Hole = true
+ // Mark all chunks belonging to the missing part as holes
+ for i := range mp.Chunks {
+ mp.Chunks[i].Hole = true
+ }
+ }
+ missingParts = append(missingParts, mp)
}
}
// There are some missing files. Prepare a multirange request for the missing chunks.
- if len(missingChunks) > 0 {
- missingChunks = mergeMissingChunks(missingChunks, maxNumberMissingChunks)
- if err := c.retrieveMissingFiles(dest, dirfd, missingChunks, options); err != nil {
+ if len(missingParts) > 0 {
+ missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks)
+ if err := c.retrieveMissingFiles(dest, dirfd, missingParts, options); err != nil {
return output, err
}
}
@@ -1075,31 +1571,69 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
}
if totalChunksSize > 0 {
- logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingChunksSize, totalChunksSize, float32(missingChunksSize*100.0)/float32(totalChunksSize))
+ logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
}
return output, nil
}
+func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
+ // ignore the metadata files for the estargz format.
+ if fileType != fileTypeEstargz {
+ return false
+ }
+ switch e.Name {
+ // ignore the metadata files for the estargz format.
+ case estargz.PrefetchLandmark, estargz.NoPrefetchLandmark, estargz.TOCTarName:
+ return true
+ }
+ return false
+}
+
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, error) {
- var mergedEntries []internal.FileMetadata
- var prevEntry *internal.FileMetadata
- for _, entry := range entries {
- e := entry
+ countNextChunks := func(start int) int {
+ count := 0
+ for _, e := range entries[start:] {
+ if e.Type != TypeChunk {
+ return count
+ }
+ count++
+ }
+ return count
+ }
- // ignore the metadata files for the estargz format.
- if fileType == fileTypeEstargz && (e.Name == estargz.PrefetchLandmark || e.Name == estargz.NoPrefetchLandmark || e.Name == estargz.TOCTarName) {
+ size := 0
+ for _, entry := range entries {
+ if mustSkipFile(fileType, entry) {
continue
}
+ if entry.Type != TypeChunk {
+ size++
+ }
+ }
+ mergedEntries := make([]internal.FileMetadata, size)
+ m := 0
+ for i := 0; i < len(entries); i++ {
+ e := entries[i]
+ if mustSkipFile(fileType, e) {
+ continue
+ }
if e.Type == TypeChunk {
- if prevEntry == nil || prevEntry.Type != TypeReg {
- return nil, errors.New("chunk type without a regular file")
+ return nil, fmt.Errorf("chunk type without a regular file")
+ }
+
+ if e.Type == TypeReg {
+ nChunks := countNextChunks(i + 1)
+
+ e.Chunks = make([]*internal.FileMetadata, nChunks+1)
+ for j := 0; j <= nChunks; j++ {
+ e.Chunks[j] = &entries[i+j]
+ e.EndOffset = entries[i+j].EndOffset
}
- prevEntry.EndOffset = e.EndOffset
- continue
+ i += nChunks
}
- mergedEntries = append(mergedEntries, e)
- prevEntry = &e
+ mergedEntries[m] = e
+ m++
}
// stargz/estargz doesn't store EndOffset so let's calculate it here
lastOffset := c.tocOffset
@@ -1110,6 +1644,47 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
if mergedEntries[i].Offset != 0 {
lastOffset = mergedEntries[i].Offset
}
+
+ lastChunkOffset := mergedEntries[i].EndOffset
+ for j := len(mergedEntries[i].Chunks) - 1; j >= 0; j-- {
+ mergedEntries[i].Chunks[j].EndOffset = lastChunkOffset
+ mergedEntries[i].Chunks[j].Size = mergedEntries[i].Chunks[j].EndOffset - mergedEntries[i].Chunks[j].Offset
+ lastChunkOffset = mergedEntries[i].Chunks[j].Offset
+ }
}
return mergedEntries, nil
}
+
+// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the
+// same digest as chunk.ChunkDigest
+func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool {
+ parentDirfd, err := unix.Open(root, unix.O_PATH, 0)
+ if err != nil {
+ return false
+ }
+ defer unix.Close(parentDirfd)
+
+ fd, err := openFileUnderRoot(path, parentDirfd, unix.O_RDONLY, 0)
+ if err != nil {
+ return false
+ }
+ defer fd.Close()
+
+ if _, err := unix.Seek(int(fd.Fd()), offset, 0); err != nil {
+ return false
+ }
+
+ r := io.LimitReader(fd, chunk.ChunkSize)
+ digester := digest.Canonical.Digester()
+
+ if _, err := io.CopyBuffer(digester.Hash(), r, copyBuffer); err != nil {
+ return false
+ }
+
+ digest, err := digest.Parse(chunk.ChunkDigest)
+ if err != nil {
+ return false
+ }
+
+ return digester.Digest() == digest
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
index 3a406ba786a..4d952aba3fa 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
@@ -1,13 +1,14 @@
+//go:build !linux
// +build !linux
package chunked
import (
"context"
+ "errors"
storage "github.com/containers/storage"
graphdriver "github.com/containers/storage/drivers"
- "github.com/pkg/errors"
)
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go
index e6622cf1466..f6e0cfcfe86 100644
--- a/vendor/github.com/containers/storage/pkg/config/config.go
+++ b/vendor/github.com/containers/storage/pkg/config/config.go
@@ -12,109 +12,109 @@ type ThinpoolOptionsConfig struct {
// grown. This is specified in terms of % of pool size. So a value of
// 20 means that when threshold is hit, pool will be grown by 20% of
// existing pool size.
- AutoExtendPercent string `toml:"autoextend_percent"`
+ AutoExtendPercent string `toml:"autoextend_percent,omitempty"`
// AutoExtendThreshold determines the pool extension threshold in terms
// of percentage of pool size. For example, if threshold is 60, that
// means when pool is 60% full, threshold has been hit.
- AutoExtendThreshold string `toml:"autoextend_threshold"`
+ AutoExtendThreshold string `toml:"autoextend_threshold,omitempty"`
// BaseSize specifies the size to use when creating the base device,
// which limits the size of images and containers.
- BaseSize string `toml:"basesize"`
+ BaseSize string `toml:"basesize,omitempty"`
// BlockSize specifies a custom blocksize to use for the thin pool.
- BlockSize string `toml:"blocksize"`
+ BlockSize string `toml:"blocksize,omitempty"`
// DirectLvmDevice specifies a custom block storage device to use for
// the thin pool.
- DirectLvmDevice string `toml:"directlvm_device"`
+ DirectLvmDevice string `toml:"directlvm_device,omitempty"`
// DirectLvmDeviceForcewipes device even if device already has a
// filesystem
- DirectLvmDeviceForce string `toml:"directlvm_device_force"`
+ DirectLvmDeviceForce string `toml:"directlvm_device_force,omitempty"`
// Fs specifies the filesystem type to use for the base device.
- Fs string `toml:"fs"`
+ Fs string `toml:"fs,omitempty"`
// log_level sets the log level of devicemapper.
- LogLevel string `toml:"log_level"`
+ LogLevel string `toml:"log_level,omitempty"`
// MetadataSize specifies the size of the metadata for the thinpool
// It will be used with the `pvcreate --metadata` option.
- MetadataSize string `toml:"metadatasize"`
+ MetadataSize string `toml:"metadatasize,omitempty"`
// MinFreeSpace specifies the min free space percent in a thin pool
// require for new device creation to
- MinFreeSpace string `toml:"min_free_space"`
+ MinFreeSpace string `toml:"min_free_space,omitempty"`
// MkfsArg specifies extra mkfs arguments to be used when creating the
// basedevice.
- MkfsArg string `toml:"mkfsarg"`
+ MkfsArg string `toml:"mkfsarg,omitempty"`
// MountOpt specifies extra mount options used when mounting the thin
// devices.
- MountOpt string `toml:"mountopt"`
+ MountOpt string `toml:"mountopt,omitempty"`
// Size
- Size string `toml:"size"`
+ Size string `toml:"size,omitempty"`
// UseDeferredDeletion marks device for deferred deletion
- UseDeferredDeletion string `toml:"use_deferred_deletion"`
+ UseDeferredDeletion string `toml:"use_deferred_deletion,omitempty"`
// UseDeferredRemoval marks device for deferred removal
- UseDeferredRemoval string `toml:"use_deferred_removal"`
+ UseDeferredRemoval string `toml:"use_deferred_removal,omitempty"`
// XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of
// retries XFS should attempt to complete IO when ENOSPC (no space)
// error is returned by underlying storage device.
- XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"`
+ XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries,omitempty"`
}
type AufsOptionsConfig struct {
// MountOpt specifies extra mount options used when mounting
- MountOpt string `toml:"mountopt"`
+ MountOpt string `toml:"mountopt,omitempty"`
}
type BtrfsOptionsConfig struct {
// MinSpace is the minimal spaces allocated to the device
- MinSpace string `toml:"min_space"`
+ MinSpace string `toml:"min_space,omitempty"`
// Size
- Size string `toml:"size"`
+ Size string `toml:"size,omitempty"`
}
type OverlayOptionsConfig struct {
// IgnoreChownErrors is a flag for whether chown errors should be
// ignored when building an image.
- IgnoreChownErrors string `toml:"ignore_chown_errors"`
+ IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
// MountOpt specifies extra mount options used when mounting
- MountOpt string `toml:"mountopt"`
+ MountOpt string `toml:"mountopt,omitempty"`
// Alternative program to use for the mount of the file system
- MountProgram string `toml:"mount_program"`
+ MountProgram string `toml:"mount_program,omitempty"`
// Size
- Size string `toml:"size"`
+ Size string `toml:"size,omitempty"`
// Inodes is used to set a maximum inodes of the container image.
- Inodes string `toml:"inodes"`
+ Inodes string `toml:"inodes,omitempty"`
// Do not create a bind mount on the storage home
- SkipMountHome string `toml:"skip_mount_home"`
+ SkipMountHome string `toml:"skip_mount_home,omitempty"`
// ForceMask indicates the permissions mask (e.g. "0755") to use for new
// files and directories
- ForceMask string `toml:"force_mask"`
+ ForceMask string `toml:"force_mask,omitempty"`
}
type VfsOptionsConfig struct {
// IgnoreChownErrors is a flag for whether chown errors should be
// ignored when building an image.
- IgnoreChownErrors string `toml:"ignore_chown_errors"`
+ IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
}
type ZfsOptionsConfig struct {
// MountOpt specifies extra mount options used when mounting
- MountOpt string `toml:"mountopt"`
+ MountOpt string `toml:"mountopt,omitempty"`
// Name is the File System name of the ZFS File system
- Name string `toml:"fsname"`
+ Name string `toml:"fsname,omitempty"`
// Size
- Size string `toml:"size"`
+ Size string `toml:"size,omitempty"`
}
// OptionsConfig represents the "storage.options" TOML config table.
@@ -122,82 +122,82 @@ type OptionsConfig struct {
// AdditionalImagesStores is the location of additional read/only
// Image stores. Usually used to access Networked File System
// for shared image content
- AdditionalImageStores []string `toml:"additionalimagestores"`
+ AdditionalImageStores []string `toml:"additionalimagestores,omitempty"`
// AdditionalLayerStores is the location of additional read/only
// Layer stores. Usually used to access Networked File System
// for shared image content
// This API is experimental and can be changed without bumping the
// major version number.
- AdditionalLayerStores []string `toml:"additionallayerstores"`
+ AdditionalLayerStores []string `toml:"additionallayerstores,omitempty"`
// Size
- Size string `toml:"size"`
+ Size string `toml:"size,omitempty"`
// RemapUIDs is a list of default UID mappings to use for layers.
- RemapUIDs string `toml:"remap-uids"`
+ RemapUIDs string `toml:"remap-uids,omitempty"`
// RemapGIDs is a list of default GID mappings to use for layers.
- RemapGIDs string `toml:"remap-gids"`
+ RemapGIDs string `toml:"remap-gids,omitempty"`
// IgnoreChownErrors is a flag for whether chown errors should be
// ignored when building an image.
- IgnoreChownErrors string `toml:"ignore_chown_errors"`
+ IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
// ForceMask indicates the permissions mask (e.g. "0755") to use for new
// files and directories.
- ForceMask os.FileMode `toml:"force_mask"`
+ ForceMask os.FileMode `toml:"force_mask,omitempty"`
// RemapUser is the name of one or more entries in /etc/subuid which
// should be used to set up default UID mappings.
- RemapUser string `toml:"remap-user"`
+ RemapUser string `toml:"remap-user,omitempty"`
// RemapGroup is the name of one or more entries in /etc/subgid which
// should be used to set up default GID mappings.
- RemapGroup string `toml:"remap-group"`
+ RemapGroup string `toml:"remap-group,omitempty"`
// RootAutoUsernsUser is the name of one or more entries in /etc/subuid and
// /etc/subgid which should be used to set up automatically a userns.
- RootAutoUsernsUser string `toml:"root-auto-userns-user"`
+ RootAutoUsernsUser string `toml:"root-auto-userns-user,omitempty"`
// AutoUsernsMinSize is the minimum size for a user namespace that is
// created automatically.
- AutoUsernsMinSize uint32 `toml:"auto-userns-min-size"`
+ AutoUsernsMinSize uint32 `toml:"auto-userns-min-size,omitempty"`
// AutoUsernsMaxSize is the maximum size for a user namespace that is
// created automatically.
- AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size"`
+ AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size,omitempty"`
// Aufs container options to be handed to aufs drivers
- Aufs struct{ AufsOptionsConfig } `toml:"aufs"`
+ Aufs struct{ AufsOptionsConfig } `toml:"aufs,omitempty"`
// Btrfs container options to be handed to btrfs drivers
- Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs"`
+ Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"`
// Thinpool container options to be handed to thinpool drivers
- Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"`
+ Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool,omitempty"`
// Overlay container options to be handed to overlay drivers
- Overlay struct{ OverlayOptionsConfig } `toml:"overlay"`
+ Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"`
// Vfs container options to be handed to VFS drivers
- Vfs struct{ VfsOptionsConfig } `toml:"vfs"`
+ Vfs struct{ VfsOptionsConfig } `toml:"vfs,omitempty"`
// Zfs container options to be handed to ZFS drivers
- Zfs struct{ ZfsOptionsConfig } `toml:"zfs"`
+ Zfs struct{ ZfsOptionsConfig } `toml:"zfs,omitempty"`
// Do not create a bind mount on the storage home
- SkipMountHome string `toml:"skip_mount_home"`
+ SkipMountHome string `toml:"skip_mount_home,omitempty"`
// Alternative program to use for the mount of the file system
- MountProgram string `toml:"mount_program"`
+ MountProgram string `toml:"mount_program,omitempty"`
// MountOpt specifies extra mount options used when mounting
- MountOpt string `toml:"mountopt"`
+ MountOpt string `toml:"mountopt,omitempty"`
// PullOptions specifies options to be handed to pull managers
// This API is experimental and can be changed without bumping the major version number.
- PullOptions map[string]string `toml:"pull_options"`
+ PullOptions map[string]string `toml:"pull_options,omitempty"`
// DisableVolatile doesn't allow volatile mounts when it is set.
- DisableVolatile bool `toml:"disable-volatile"`
+ DisableVolatile bool `toml:"disable-volatile,omitempty"`
}
// GetGraphDriverOptions returns the driver specific options
diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
index 6a0ac246479..6e6852d4d78 100644
--- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
+++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
@@ -1,3 +1,4 @@
+//go:build linux && cgo
// +build linux,cgo
package devicemapper
@@ -805,7 +806,7 @@ func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDevice
if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil {
if doSuspend {
if err2 := ResumeDevice(baseName); err2 != nil {
- return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2)
+ return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: %w", err, err2)
}
}
return err
diff --git a/vendor/github.com/containers/storage/pkg/directory/directory.go b/vendor/github.com/containers/storage/pkg/directory/directory.go
index b0ce706e5ed..829fe59f3ae 100644
--- a/vendor/github.com/containers/storage/pkg/directory/directory.go
+++ b/vendor/github.com/containers/storage/pkg/directory/directory.go
@@ -1,7 +1,6 @@
package directory
import (
- "io/ioutil"
"os"
"path/filepath"
)
@@ -15,7 +14,7 @@ type DiskUsage struct {
// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path
func MoveToSubdir(oldpath, subdir string) error {
- infos, err := ioutil.ReadDir(oldpath)
+ infos, err := os.ReadDir(oldpath)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go
index 8d58d24cac8..36e1bdd5fc8 100644
--- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go
+++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go
@@ -1,8 +1,10 @@
+//go:build linux || darwin || freebsd || solaris
// +build linux darwin freebsd solaris
package directory
import (
+ "io/fs"
"os"
"path/filepath"
"syscall"
@@ -21,7 +23,7 @@ func Size(dir string) (size int64, err error) {
func Usage(dir string) (usage *DiskUsage, err error) {
usage = &DiskUsage{}
data := make(map[uint64]struct{})
- err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error {
+ err = filepath.WalkDir(dir, func(d string, entry fs.DirEntry, err error) error {
if err != nil {
// if dir does not exist, Usage() returns the error.
// if dir/x disappeared while walking, Usage() ignores dir/x.
@@ -31,8 +33,9 @@ func Usage(dir string) (usage *DiskUsage, err error) {
return err
}
- if fileInfo == nil {
- return nil
+ fileInfo, err := entry.Info()
+ if err != nil {
+ return err
}
// Check inode to only count the sizes of files with multiple hard links once.
@@ -44,9 +47,8 @@ func Usage(dir string) (usage *DiskUsage, err error) {
// inode is not a uint64 on all platforms. Cast it to avoid issues.
data[uint64(inode)] = struct{}{}
-
// Ignore directory sizes
- if fileInfo.IsDir() {
+ if entry.IsDir() {
return nil
}
diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go
index a7a81240bc2..482bc51a26e 100644
--- a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go
+++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go
@@ -1,8 +1,10 @@
+//go:build windows
// +build windows
package directory
import (
+ "io/fs"
"os"
"path/filepath"
)
@@ -19,11 +21,11 @@ func Size(dir string) (size int64, err error) {
// Usage walks a directory tree and returns its total size in bytes and the number of inodes.
func Usage(dir string) (usage *DiskUsage, err error) {
usage = &DiskUsage{}
- err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error {
+ err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
// if dir does not exist, Size() returns the error.
// if dir/x disappeared while walking, Size() ignores dir/x.
- if os.IsNotExist(err) && d != dir {
+ if os.IsNotExist(err) && path != dir {
return nil
}
return err
@@ -32,16 +34,15 @@ func Usage(dir string) (usage *DiskUsage, err error) {
usage.InodeCount++
// Ignore directory sizes
- if fileInfo == nil {
+ if d.IsDir() {
return nil
}
- s := fileInfo.Size()
- if fileInfo.IsDir() || s == 0 {
- return nil
+ fileInfo, err := d.Info()
+ if err != nil {
+ return err
}
-
- usage.Size += s
+ usage.Size += fileInfo.Size()
return nil
})
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
index 5be98165ef7..abf6e2f85ef 100644
--- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
@@ -1,6 +1,7 @@
package fileutils
import (
+ "errors"
"fmt"
"io"
"os"
@@ -9,7 +10,6 @@ import (
"strings"
"text/scanner"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -340,13 +340,13 @@ func ReadSymlinkedDirectory(path string) (string, error) {
// The target of the symbolic link can be a file and a directory.
func ReadSymlinkedPath(path string) (realPath string, err error) {
if realPath, err = filepath.Abs(path); err != nil {
- return "", errors.Wrapf(err, "unable to get absolute path for %q", path)
+ return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err)
}
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
- return "", errors.Wrapf(err, "failed to canonicalise path for %q", path)
+ return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err)
}
if _, err := os.Stat(realPath); err != nil {
- return "", errors.Wrapf(err, "failed to stat target %q of %q", realPath, path)
+ return "", fmt.Errorf("failed to stat target %q of %q: %w", realPath, path, err)
}
return realPath, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
index 9e0e97bd64a..92e0263d818 100644
--- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
@@ -1,10 +1,10 @@
+//go:build linux || freebsd
// +build linux freebsd
package fileutils
import (
"fmt"
- "io/ioutil"
"os"
"github.com/sirupsen/logrus"
@@ -13,8 +13,8 @@ import (
// GetTotalUsedFds Returns the number of used File Descriptors by
// reading it via /proc filesystem.
func GetTotalUsedFds() int {
- if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
- logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
+ if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
+ logrus.Errorf("%v", err)
} else {
return len(fds)
}
diff --git a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
index e6094b55b71..9854cac1c29 100644
--- a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
+++ b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
@@ -1,10 +1,10 @@
+//go:build linux
// +build linux
package fsutils
import (
"fmt"
- "io/ioutil"
"os"
"unsafe"
@@ -12,14 +12,14 @@ import (
)
func locateDummyIfEmpty(path string) (string, error) {
- children, err := ioutil.ReadDir(path)
+ children, err := os.ReadDir(path)
if err != nil {
return "", err
}
if len(children) != 0 {
return "", nil
}
- dummyFile, err := ioutil.TempFile(path, "fsutils-dummy")
+ dummyFile, err := os.CreateTemp(path, "fsutils-dummy")
if err != nil {
return "", err
}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/github.com/containers/storage/pkg/homedir/homedir.go
new file mode 100644
index 00000000000..85c5e76c844
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir.go
@@ -0,0 +1,52 @@
+package homedir
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+)
+
+// GetConfigHome returns XDG_CONFIG_HOME.
+// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetConfigHome() (string, error) {
+ if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
+ return xdgConfigHome, nil
+ }
+ home := Get()
+ if home == "" {
+ return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
+ }
+ return filepath.Join(home, ".config"), nil
+}
+
+// GetDataHome returns XDG_DATA_HOME.
+// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetDataHome() (string, error) {
+ if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" {
+ return xdgDataHome, nil
+ }
+ home := Get()
+ if home == "" {
+ return "", errors.New("could not get either XDG_DATA_HOME or HOME")
+ }
+ return filepath.Join(home, ".local", "share"), nil
+}
+
+// GetCacheHome returns XDG_CACHE_HOME.
+// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetCacheHome() (string, error) {
+ if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" {
+ return xdgCacheHome, nil
+ }
+ home := Get()
+ if home == "" {
+ return "", errors.New("could not get either XDG_CACHE_HOME or HOME")
+ }
+ return filepath.Join(home, ".cache"), nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
index 06b53854b93..027db259c19 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
@@ -18,18 +18,3 @@ func GetRuntimeDir() (string, error) {
func StickRuntimeDirContents(files []string) ([]string, error) {
return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system")
}
-
-// GetDataHome is unsupported on non-linux system.
-func GetDataHome() (string, error) {
- return "", errors.New("homedir.GetDataHome() is not supported on this system")
-}
-
-// GetConfigHome is unsupported on non-linux system.
-func GetConfigHome() (string, error) {
- return "", errors.New("homedir.GetConfigHome() is not supported on this system")
-}
-
-// GetCacheHome is unsupported on non-linux system.
-func GetCacheHome() (string, error) {
- return "", errors.New("homedir.GetCacheHome() is not supported on this system")
-}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
index 2475e351bb5..37dc9159fc7 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package homedir
@@ -46,7 +47,7 @@ func GetShortcutString() string {
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetRuntimeDir() (string, error) {
if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" {
- return xdgRuntimeDir, nil
+ return filepath.EvalSymlinks(xdgRuntimeDir)
}
return "", errors.New("could not get XDG_RUNTIME_DIR")
}
@@ -93,48 +94,3 @@ func stick(f string) error {
m |= os.ModeSticky
return os.Chmod(f, m)
}
-
-// GetDataHome returns XDG_DATA_HOME.
-// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
-//
-// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
-func GetDataHome() (string, error) {
- if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" {
- return xdgDataHome, nil
- }
- home := Get()
- if home == "" {
- return "", errors.New("could not get either XDG_DATA_HOME or HOME")
- }
- return filepath.Join(home, ".local", "share"), nil
-}
-
-// GetConfigHome returns XDG_CONFIG_HOME.
-// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
-//
-// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
-func GetConfigHome() (string, error) {
- if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
- return xdgConfigHome, nil
- }
- home := Get()
- if home == "" {
- return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
- }
- return filepath.Join(home, ".config"), nil
-}
-
-// GetCacheHome returns XDG_CACHE_HOME.
-// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set.
-//
-// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
-func GetCacheHome() (string, error) {
- if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" {
- return xdgCacheHome, nil
- }
- home := Get()
- if home == "" {
- return "", errors.New("could not get either XDG_CACHE_HOME or HOME")
- }
- return filepath.Join(home, ".cache"), nil
-}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
index 4f2615ed32f..af65f2c03de 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
@@ -17,7 +17,12 @@ func Key() string {
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
func Get() string {
- return os.Getenv(Key())
+ home := os.Getenv(Key())
+ if home != "" {
+ return home
+ }
+ home, _ = os.UserHomeDir()
+ return home
}
// GetShortcutString returns the string that is shortcut to user's home directory
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
index 83bc8c34ff4..a57609067b2 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
@@ -5,13 +5,15 @@ import (
"fmt"
"os"
"os/user"
+ "runtime"
"sort"
"strconv"
"strings"
+ "sync"
"syscall"
"github.com/containers/storage/pkg/system"
- "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// IDMap contains a single entry for user namespace range remapping. An array
@@ -35,8 +37,9 @@ func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
const (
- subuidFileName string = "/etc/subuid"
- subgidFileName string = "/etc/subgid"
+ subuidFileName string = "/etc/subuid"
+ subgidFileName string = "/etc/subgid"
+ ContainersOverrideXattr = "user.containers.override_stat"
)
// MkdirAllAs creates a directory (include any along the path) and then modifies
@@ -82,7 +85,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
if len(uidMap) == 1 && uidMap[0].Size == 1 {
uid = uidMap[0].HostID
} else {
- uid, err = toHost(0, uidMap)
+ uid, err = RawToHost(0, uidMap)
if err != nil {
return -1, -1, err
}
@@ -90,7 +93,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
if len(gidMap) == 1 && gidMap[0].Size == 1 {
gid = gidMap[0].HostID
} else {
- gid, err = toHost(0, gidMap)
+ gid, err = RawToHost(0, gidMap)
if err != nil {
return -1, -1, err
}
@@ -98,10 +101,14 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
return uid, gid, nil
}
-// toContainer takes an id mapping, and uses it to translate a
-// host ID to the remapped ID. If no map is provided, then the translation
-// assumes a 1-to-1 mapping and returns the passed in id
-func toContainer(hostID int, idMap []IDMap) (int, error) {
+// RawToContainer takes an id mapping, and uses it to translate a host ID to
+// the remapped ID. If no map is provided, then the translation assumes a
+// 1-to-1 mapping and returns the passed in id.
+//
+// If you wish to map a (uid,gid) combination you should use the corresponding
+// IDMappings methods, which ensure that you are mapping the correct ID against
+// the correct mapping.
+func RawToContainer(hostID int, idMap []IDMap) (int, error) {
if idMap == nil {
return hostID, nil
}
@@ -111,13 +118,17 @@ func toContainer(hostID int, idMap []IDMap) (int, error) {
return contID, nil
}
}
- return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
+ return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID)
}
-// toHost takes an id mapping and a remapped ID, and translates the
-// ID to the mapped host ID. If no map is provided, then the translation
-// assumes a 1-to-1 mapping and returns the passed in id #
-func toHost(contID int, idMap []IDMap) (int, error) {
+// RawToHost takes an id mapping and a remapped ID, and translates the ID to
+// the mapped host ID. If no map is provided, then the translation assumes a
+// 1-to-1 mapping and returns the passed in id.
+//
+// If you wish to map a (uid,gid) combination you should use the corresponding
+// IDMappings methods, which ensure that you are mapping the correct ID against
+// the correct mapping.
+func RawToHost(contID int, idMap []IDMap) (int, error) {
if idMap == nil {
return contID, nil
}
@@ -127,7 +138,7 @@ func toHost(contID int, idMap []IDMap) (int, error) {
return hostID, nil
}
}
- return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
+ return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID)
}
// IDPair is a UID and GID pair
@@ -155,10 +166,10 @@ func NewIDMappings(username, groupname string) (*IDMappings, error) {
return nil, err
}
if len(subuidRanges) == 0 {
- return nil, fmt.Errorf("No subuid ranges found for user %q in %s", username, subuidFileName)
+ return nil, fmt.Errorf("no subuid ranges found for user %q in %s", username, subuidFileName)
}
if len(subgidRanges) == 0 {
- return nil, fmt.Errorf("No subgid ranges found for group %q in %s", groupname, subgidFileName)
+ return nil, fmt.Errorf("no subgid ranges found for group %q in %s", groupname, subgidFileName)
}
return &IDMappings{
@@ -182,31 +193,87 @@ func (i *IDMappings) RootPair() IDPair {
}
// ToHost returns the host UID and GID for the container uid, gid.
-// Remapping is only performed if the ids aren't already the remapped root ids
func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
+ var err error
+ var target IDPair
+
+ target.UID, err = RawToHost(pair.UID, i.uids)
+ if err != nil {
+ return target, err
+ }
+
+ target.GID, err = RawToHost(pair.GID, i.gids)
+ return target, err
+}
+
+var (
+ overflowUIDOnce sync.Once
+ overflowGIDOnce sync.Once
+ overflowUID int
+ overflowGID int
+)
+
+// getOverflowUID returns the UID mapped to the overflow user
+func getOverflowUID() int {
+ overflowUIDOnce.Do(func() {
+ // 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present
+ overflowUID = 65534
+ if content, err := os.ReadFile("/proc/sys/kernel/overflowuid"); err == nil {
+ if tmp, err := strconv.Atoi(string(content)); err == nil {
+ overflowUID = tmp
+ }
+ }
+ })
+ return overflowUID
+}
+
+// getOverflowUID returns the GID mapped to the overflow user
+func getOverflowGID() int {
+ overflowGIDOnce.Do(func() {
+ // 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present
+ overflowGID = 65534
+ if content, err := os.ReadFile("/proc/sys/kernel/overflowgid"); err == nil {
+ if tmp, err := strconv.Atoi(string(content)); err == nil {
+ overflowGID = tmp
+ }
+ }
+ })
+ return overflowGID
+}
+
+// ToHost returns the host UID and GID for the container uid, gid.
+// Remapping is only performed if the ids aren't already the remapped root ids
+// If the mapping is not possible because the target ID is not mapped into
+// the namespace, then the overflow ID is used.
+func (i *IDMappings) ToHostOverflow(pair IDPair) (IDPair, error) {
var err error
target := i.RootPair()
if pair.UID != target.UID {
- target.UID, err = toHost(pair.UID, i.uids)
+ target.UID, err = RawToHost(pair.UID, i.uids)
if err != nil {
- return target, err
+ target.UID = getOverflowUID()
+ logrus.Debugf("Failed to map UID %v to the target mapping, using the overflow ID %v", pair.UID, target.UID)
}
}
if pair.GID != target.GID {
- target.GID, err = toHost(pair.GID, i.gids)
+ target.GID, err = RawToHost(pair.GID, i.gids)
+ if err != nil {
+ target.GID = getOverflowGID()
+ logrus.Debugf("Failed to map GID %v to the target mapping, using the overflow ID %v", pair.GID, target.GID)
+ }
}
- return target, err
+ return target, nil
}
// ToContainer returns the container UID and GID for the host uid and gid
func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
- uid, err := toContainer(pair.UID, i.uids)
+ uid, err := RawToContainer(pair.UID, i.uids)
if err != nil {
return -1, -1, err
}
- gid, err := toContainer(pair.GID, i.gids)
+ gid, err := RawToContainer(pair.GID, i.gids)
return uid, gid, err
}
@@ -274,16 +341,16 @@ func parseSubidFile(path, username string) (ranges, error) {
}
parts := strings.Split(text, ":")
if len(parts) != 3 {
- return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
+ return rangeList, fmt.Errorf("cannot parse subuid/gid information: Format not correct for %s file", path)
}
if parts[0] == username || username == "ALL" || (parts[0] == uidstr && parts[0] != "") {
startid, err := strconv.Atoi(parts[1])
if err != nil {
- return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err)
}
length, err := strconv.Atoi(parts[2])
if err != nil {
- return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err)
}
rangeList = append(rangeList, subIDRange{startid, length})
}
@@ -293,12 +360,31 @@ func parseSubidFile(path, username string) (ranges, error) {
func checkChownErr(err error, name string, uid, gid int) error {
if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL {
- return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name)
+ return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err)
}
return err
}
func SafeChown(name string, uid, gid int) error {
+ if runtime.GOOS == "darwin" {
+ var mode uint64 = 0o0700
+ xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
+ if err == nil {
+ attrs := strings.Split(string(xstat), ":")
+ if len(attrs) == 3 {
+ val, err := strconv.ParseUint(attrs[2], 8, 32)
+ if err == nil {
+ mode = val
+ }
+ }
+ }
+ value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
+ if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil {
+ return err
+ }
+ uid = os.Getuid()
+ gid = os.Getgid()
+ }
if stat, statErr := system.Stat(name); statErr == nil {
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
return nil
@@ -308,6 +394,25 @@ func SafeChown(name string, uid, gid int) error {
}
func SafeLchown(name string, uid, gid int) error {
+ if runtime.GOOS == "darwin" {
+ var mode uint64 = 0o0700
+ xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
+ if err == nil {
+ attrs := strings.Split(string(xstat), ":")
+ if len(attrs) == 3 {
+ val, err := strconv.ParseUint(attrs[2], 8, 32)
+ if err == nil {
+ mode = val
+ }
+ }
+ }
+ value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
+ if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil {
+ return err
+ }
+ uid = os.Getuid()
+ gid = os.Getgid()
+ }
if stat, statErr := system.Lstat(name); statErr == nil {
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
return nil
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
index db50a62e4c0..03e78737631 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
@@ -1,40 +1,66 @@
+//go:build linux && cgo && libsubid
// +build linux,cgo,libsubid
package idtools
import (
+ "errors"
+ "os/user"
"unsafe"
-
- "github.com/pkg/errors"
)
/*
#cgo LDFLAGS: -l subid
#include
#include
+#include
const char *Prog = "storage";
+FILE *shadow_logfd = NULL;
+
struct subid_range get_range(struct subid_range *ranges, int i)
{
- return ranges[i];
+ shadow_logfd = stderr;
+ return ranges[i];
}
+
+#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4)
+# define subid_get_uid_ranges get_subuid_ranges
+# define subid_get_gid_ranges get_subgid_ranges
+#endif
+
*/
import "C"
func readSubid(username string, isUser bool) (ranges, error) {
var ret ranges
+ uidstr := ""
+
if username == "ALL" {
return nil, errors.New("username ALL not supported")
}
+ if u, err := user.Lookup(username); err == nil {
+ uidstr = u.Uid
+ }
+
cUsername := C.CString(username)
defer C.free(unsafe.Pointer(cUsername))
+ cuidstr := C.CString(uidstr)
+ defer C.free(unsafe.Pointer(cuidstr))
+
var nRanges C.int
var cRanges *C.struct_subid_range
if isUser {
- nRanges = C.get_subuid_ranges(cUsername, &cRanges)
+ nRanges = C.subid_get_uid_ranges(cUsername, &cRanges)
+ if nRanges <= 0 {
+ nRanges = C.subid_get_uid_ranges(cuidstr, &cRanges)
+ }
} else {
- nRanges = C.get_subgid_ranges(cUsername, &cRanges)
+ nRanges = C.subid_get_gid_ranges(cUsername, &cRanges)
+ if nRanges <= 0 {
+ nRanges = C.subid_get_gid_ranges(cuidstr, &cRanges)
+ }
}
if nRanges < 0 {
return nil, errors.New("cannot read subids")
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
index 9776b2a1287..daff1e4a99d 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package idtools
@@ -46,6 +47,9 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
// walk back to "/" looking for directories which do not exist
// and add them to the paths array for chown after creation
dirPath := path
+ if !filepath.IsAbs(dirPath) {
+ return fmt.Errorf("path: %s should be absolute", dirPath)
+ }
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
diff --git a/vendor/github.com/containers/storage/pkg/idtools/parser.go b/vendor/github.com/containers/storage/pkg/idtools/parser.go
index 1c819a1f971..042d0ea9577 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/parser.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/parser.go
@@ -11,22 +11,22 @@ import (
func parseTriple(spec []string) (container, host, size uint32, err error) {
cid, err := strconv.ParseUint(spec[0], 10, 32)
if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err)
+ return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[0], err)
}
hid, err := strconv.ParseUint(spec[1], 10, 32)
if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err)
+ return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[1], err)
}
sz, err := strconv.ParseUint(spec[2], 10, 32)
if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err)
+ return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[2], err)
}
return uint32(cid), uint32(hid), uint32(sz), nil
}
// ParseIDMap parses idmap triples from string.
func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) {
- stdErr := fmt.Errorf("error initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec)
+ stdErr := fmt.Errorf("initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec)
for _, idMapSpec := range mapSpec {
if idMapSpec == "" {
continue
diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
index 3dd7bf21057..a467f41c356 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go
@@ -37,32 +37,32 @@ var (
// mapping ranges in containers.
func AddNamespaceRangesUser(name string) (int, int, error) {
if err := addUser(name); err != nil {
- return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
+ return -1, -1, fmt.Errorf("adding user %q: %w", name, err)
}
// Query the system for the created uid and gid pair
out, err := execCmd("id", name)
if err != nil {
- return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
+ return -1, -1, fmt.Errorf("trying to find uid/gid for new user %q: %w", name, err)
}
matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
if len(matches) != 3 {
- return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
+ return -1, -1, fmt.Errorf("can't find uid, gid from `id` output: %q", string(out))
}
uid, err := strconv.Atoi(matches[1])
if err != nil {
- return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
+ return -1, -1, fmt.Errorf("can't convert found uid (%s) to int: %w", matches[1], err)
}
gid, err := strconv.Atoi(matches[2])
if err != nil {
- return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
+ return -1, -1, fmt.Errorf("can't convert found gid (%s) to int: %w", matches[2], err)
}
// Now we need to create the subuid/subgid ranges for our new user/group (system users
// do not get auto-created ranges in subuid/subgid)
if err := createSubordinateRanges(name); err != nil {
- return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
+ return -1, -1, fmt.Errorf("couldn't create subordinate ID ranges: %w", err)
}
return uid, gid, nil
}
@@ -77,12 +77,12 @@ func addUser(userName string) error {
}
})
if userCommand == "" {
- return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
+ return fmt.Errorf("cannot add user; no useradd/adduser binary found")
}
args := fmt.Sprintf(cmdTemplates[userCommand], userName)
out, err := execCmd(userCommand, args)
if err != nil {
- return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
+ return fmt.Errorf("failed to add user with error: %w; output: %q", err, string(out))
}
return nil
}
@@ -93,33 +93,33 @@ func createSubordinateRanges(name string) error {
// by the distro tooling
ranges, err := readSubuid(name)
if err != nil {
- return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
+ return fmt.Errorf("while looking for subuid ranges for user %q: %w", name, err)
}
if len(ranges) == 0 {
// no UID ranges; let's create one
startID, err := findNextUIDRange()
if err != nil {
- return fmt.Errorf("Can't find available subuid range: %v", err)
+ return fmt.Errorf("can't find available subuid range: %w", err)
}
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
if err != nil {
- return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
+ return fmt.Errorf("unable to add subuid range to user: %q; output: %s, err: %w", name, out, err)
}
}
ranges, err = readSubgid(name)
if err != nil {
- return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
+ return fmt.Errorf("while looking for subgid ranges for user %q: %w", name, err)
}
if len(ranges) == 0 {
// no GID ranges; let's create one
startID, err := findNextGIDRange()
if err != nil {
- return fmt.Errorf("Can't find available subgid range: %v", err)
+ return fmt.Errorf("can't find available subgid range: %w", err)
}
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
if err != nil {
- return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
+ return fmt.Errorf("unable to add subgid range to user: %q; output: %s, err: %w", name, out, err)
}
}
return nil
@@ -128,7 +128,7 @@ func createSubordinateRanges(name string) error {
func findNextUIDRange() (int, error) {
ranges, err := readSubuid("ALL")
if err != nil {
- return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
+ return -1, fmt.Errorf("couldn't parse all ranges in /etc/subuid file: %w", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
@@ -137,7 +137,7 @@ func findNextUIDRange() (int, error) {
func findNextGIDRange() (int, error) {
ranges, err := readSubgid("ALL")
if err != nil {
- return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
+ return -1, fmt.Errorf("couldn't parse all ranges in /etc/subgid file: %w", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go
index d98b354cbd8..15bd98edefe 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package idtools
@@ -8,5 +9,5 @@ import "fmt"
// and calls the appropriate helper function to add the group and then
// the user to the group in /etc/group and /etc/passwd respectively.
func AddNamespaceRangesUser(name string) (int, int, error) {
- return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
+ return -1, -1, fmt.Errorf("no support for adding users or groups on this OS")
}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
index 9703ecbd9d6..33a7dee6c59 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package idtools
@@ -23,7 +24,7 @@ func resolveBinary(binname string) (string, error) {
if filepath.Base(resolvedPath) == binname {
return resolvedPath, nil
}
- return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
+ return "", fmt.Errorf("binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
}
func execCmd(cmd, args string) ([]byte, error) {
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
index cd12470f9da..a74893e816e 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
@@ -2,7 +2,6 @@ package ioutils
import (
"io"
- "io/ioutil"
"os"
"path/filepath"
)
@@ -28,7 +27,7 @@ func SetDefaultOptions(opts AtomicFileWriterOptions) {
// temporary file and closing it atomically changes the temporary file to
// destination path. Writing and closing concurrently is not allowed.
func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (io.WriteCloser, error) {
- f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
+ f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
if err != nil {
return nil, err
}
@@ -124,7 +123,7 @@ type AtomicWriteSet struct {
// commit. If no temporary directory is given the system
// default is used.
func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
- td, err := ioutil.TempDir(tmpDir, "write-set-")
+ td, err := os.MkdirTemp(tmpDir, "write-set-")
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
index 1539ad21b57..9d5af610e09 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
@@ -1,10 +1,11 @@
+//go:build !windows
// +build !windows
package ioutils
-import "io/ioutil"
+import "os"
-// TempDir on Unix systems is equivalent to ioutil.TempDir.
+// TempDir on Unix systems is equivalent to os.MkdirTemp.
func TempDir(dir, prefix string) (string, error) {
- return ioutil.TempDir(dir, prefix)
+ return os.MkdirTemp(dir, prefix)
}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
index c719c120b5f..2c2242d69d4 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
@@ -1,16 +1,17 @@
+//go:build windows
// +build windows
package ioutils
import (
- "io/ioutil"
+ "os"
"github.com/containers/storage/pkg/longpath"
)
-// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
+// TempDir is the equivalent of os.MkdirTemp, except that the result is in Windows longpath format.
func TempDir(dir, prefix string) (string, error) {
- tempDir, err := ioutil.TempDir(dir, prefix)
+ tempDir, err := os.MkdirTemp(dir, prefix)
if err != nil {
return "", err
}
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
index 6a00141c3de..d3f4df0985c 100644
--- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
@@ -1,11 +1,10 @@
package lockfile
import (
+ "fmt"
"path/filepath"
"sync"
"time"
-
- "github.com/pkg/errors"
)
// A Locker represents a file lock where the file is used to cache an
@@ -87,14 +86,14 @@ func getLockfile(path string, ro bool) (Locker, error) {
}
cleanPath, err := filepath.Abs(path)
if err != nil {
- return nil, errors.Wrapf(err, "error ensuring that path %q is an absolute path", path)
+ return nil, fmt.Errorf("ensuring that path %q is an absolute path: %w", path, err)
}
if locker, ok := lockfiles[cleanPath]; ok {
if ro && locker.IsReadWrite() {
- return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath)
+ return nil, fmt.Errorf("lock %q is not a read-only lock", cleanPath)
}
if !ro && !locker.IsReadWrite() {
- return nil, errors.Errorf("lock %q is not a read-write lock", cleanPath)
+ return nil, fmt.Errorf("lock %q is not a read-write lock", cleanPath)
}
return locker, nil
}
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
index b224e7b5cf4..3c242016d1d 100644
--- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
@@ -1,17 +1,20 @@
+//go:build linux || solaris || darwin || freebsd
// +build linux solaris darwin freebsd
package lockfile
import (
+ "bytes"
+ cryptorand "crypto/rand"
+ "encoding/binary"
"fmt"
"os"
"path/filepath"
"sync"
+ "sync/atomic"
"time"
- "github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/system"
- "github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@@ -23,27 +26,58 @@ type lockfile struct {
counter int64
file string
fd uintptr
- lw string
+ lw []byte // "last writer"-unique value valid as of the last .Touch() or .Modified(), generated by newLastWriterID()
locktype int16
locked bool
ro bool
recursive bool
}
+const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
+var lastWriterIDCounter uint64 // Private state for newLastWriterID
+
+// newLastWriterID returns a new "last writer" ID.
+// The value must be different on every call, and also differ from values
+// generated by other processes.
+func newLastWriterID() []byte {
+ // The ID is (PID, time, per-process counter, random)
+ // PID + time represents both a unique process across reboots,
+ // and a specific time within the process; the per-process counter
+ // is an extra safeguard for in-process concurrency.
+ // The random part disambiguates across process namespaces
+ // (where PID values might collide), serves as a general-purpose
+ // extra safety, _and_ is used to pad the output to lastWriterIDSize,
+ // because other versions of this code exist and they don't work
+ // efficiently if the size of the value changes.
+ pid := os.Getpid()
+ tm := time.Now().UnixNano()
+ counter := atomic.AddUint64(&lastWriterIDCounter, 1)
+
+ res := make([]byte, lastWriterIDSize)
+ binary.LittleEndian.PutUint64(res[0:8], uint64(tm))
+ binary.LittleEndian.PutUint64(res[8:16], counter)
+ binary.LittleEndian.PutUint32(res[16:20], uint32(pid))
+ if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 {
+ panic(err) // This shouldn't happen
+ }
+
+ return res
+}
+
// openLock opens the file at path and returns the corresponding file
-// descriptor. Note that the path is opened read-only when ro is set. If ro
-// is unset, openLock will open the path read-write and create the file if
-// necessary.
+// descriptor. The path is opened either read-only or read-write,
+// depending on the value of ro argument.
+//
+// openLock will create the file and its parent directories,
+// if necessary.
func openLock(path string, ro bool) (fd int, err error) {
+ flags := unix.O_CLOEXEC | os.O_CREATE
if ro {
- fd, err = unix.Open(path, os.O_RDONLY|unix.O_CLOEXEC, 0)
+ flags |= os.O_RDONLY
} else {
- fd, err = unix.Open(path,
- os.O_RDWR|unix.O_CLOEXEC|os.O_CREATE,
- unix.S_IRUSR|unix.S_IWUSR|unix.S_IRGRP|unix.S_IROTH,
- )
+ flags |= os.O_RDWR
}
-
+ fd, err = unix.Open(path, flags, 0o644)
if err == nil {
return
}
@@ -51,13 +85,13 @@ func openLock(path string, ro bool) (fd int, err error) {
// the directory of the lockfile seems to be removed, try to create it
if os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
- return fd, errors.Wrap(err, "creating locker directory")
+ return fd, fmt.Errorf("creating locker directory: %w", err)
}
return openLock(path, ro)
}
- return
+ return fd, &os.PathError{Op: "open", Path: path, Err: err}
}
// createLockerForPath returns a Locker object, possibly (depending on the platform)
@@ -77,7 +111,7 @@ func createLockerForPath(path string, ro bool) (Locker, error) {
// Check if we can open the lock.
fd, err := openLock(path, ro)
if err != nil {
- return nil, errors.Wrapf(err, "error opening %q", path)
+ return nil, err
}
unix.Close(fd)
@@ -89,7 +123,7 @@ func createLockerForPath(path string, ro bool) (Locker, error) {
stateMutex: &sync.Mutex{},
rwMutex: &sync.RWMutex{},
file: path,
- lw: stringid.GenerateRandomID(),
+ lw: newLastWriterID(),
locktype: int16(locktype),
locked: false,
ro: ro}, nil
@@ -124,7 +158,7 @@ func (l *lockfile) lock(lType int16, recursive bool) {
// If we're the first reference on the lock, we need to open the file again.
fd, err := openLock(l.file, l.ro)
if err != nil {
- panic(fmt.Sprintf("error opening %q: %v", l.file, err))
+ panic(err)
}
l.fd = uintptr(fd)
@@ -212,13 +246,12 @@ func (l *lockfile) Touch() error {
panic("attempted to update last-writer in lockfile without the write lock")
}
defer l.stateMutex.Unlock()
- l.lw = stringid.GenerateRandomID()
- id := []byte(l.lw)
- n, err := unix.Pwrite(int(l.fd), id, 0)
+ l.lw = newLastWriterID()
+ n, err := unix.Pwrite(int(l.fd), l.lw, 0)
if err != nil {
return err
}
- if n != len(id) {
+ if n != len(l.lw) {
return unix.ENOSPC
}
return nil
@@ -228,21 +261,21 @@ func (l *lockfile) Touch() error {
// was loaded.
func (l *lockfile) Modified() (bool, error) {
l.stateMutex.Lock()
- id := []byte(l.lw)
if !l.locked {
panic("attempted to check last-writer in lockfile without locking it first")
}
defer l.stateMutex.Unlock()
- n, err := unix.Pread(int(l.fd), id, 0)
+ currentLW := make([]byte, len(l.lw))
+ n, err := unix.Pread(int(l.fd), currentLW, 0)
if err != nil {
return true, err
}
- if n != len(id) {
+ if n != len(l.lw) {
return true, nil
}
- lw := l.lw
- l.lw = string(id)
- return l.lw != lw, nil
+ oldLW := l.lw
+ l.lw = currentLW
+ return !bytes.Equal(currentLW, oldLW), nil
}
// IsReadWriteLock indicates if the lock file is a read-write lock.
diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
index e2cf30b41ba..6f072650537 100644
--- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
+++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
@@ -43,7 +43,7 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File
var st syscall.Stat_t
err = syscall.Fstat(int(sparseFile.Fd()), &st)
if err != nil {
- logrus.Errorf("Error reading information about loopback file %s: %v", sparseName, err)
+ logrus.Errorf("Reading information about loopback file %s: %v", sparseName, err)
return nil, ErrAttachLoopbackDevice
}
@@ -68,7 +68,7 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File
// OpenFile adds O_CLOEXEC
loopFile, err = os.OpenFile(target, os.O_RDWR, 0644)
if err != nil {
- logrus.Errorf("Error opening loopback device: %s", err)
+ logrus.Errorf("Opening loopback device: %s", err)
return nil, ErrAttachLoopbackDevice
}
@@ -90,7 +90,7 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File
// device and inode numbers.
dev, ino, err := getLoopbackBackingFile(loopFile)
if err != nil {
- logrus.Errorf("Error getting loopback backing file: %s", err)
+ logrus.Errorf("Getting loopback backing file: %s", err)
return nil, ErrGetLoopbackBackingFile
}
if dev != uint64(st.Dev) || ino != st.Ino {
@@ -125,7 +125,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
// OpenFile adds O_CLOEXEC
sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644)
if err != nil {
- logrus.Errorf("Error opening sparse file %s: %s", sparseName, err)
+ logrus.Errorf("Opening sparse file: %v", err)
return nil, ErrAttachLoopbackDevice
}
defer sparseFile.Close()
@@ -147,7 +147,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
// If the call failed, then free the loopback device
if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
- logrus.Error("Error while cleaning up the loopback device")
+ logrus.Error("While cleaning up the loopback device")
}
loopFile.Close()
return nil, ErrAttachLoopbackDevice
diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go
index f4cf2826e93..c9be05776d1 100644
--- a/vendor/github.com/containers/storage/pkg/loopback/loopback.go
+++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go
@@ -13,7 +13,7 @@ import (
func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
loopInfo, err := ioctlLoopGetStatus64(file.Fd())
if err != nil {
- logrus.Errorf("Error get loopback backing file: %s", err)
+ logrus.Errorf("Get loopback backing file: %v", err)
return 0, 0, ErrGetLoopbackBackingFile
}
return loopInfo.loDevice, loopInfo.loInode, nil
@@ -22,7 +22,7 @@ func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
// SetCapacity reloads the size for the loopback device.
func SetCapacity(file *os.File) error {
if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil {
- logrus.Errorf("Error loopbackSetCapacity: %s", err)
+ logrus.Errorf("loopbackSetCapacity: %s", err)
return ErrSetCapacity
}
return nil
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go
index 07a0f4847c1..5de3a671dd3 100644
--- a/vendor/github.com/containers/storage/pkg/mount/flags.go
+++ b/vendor/github.com/containers/storage/pkg/mount/flags.go
@@ -99,7 +99,7 @@ func MergeTmpfsOptions(options []string) ([]string, error) {
}
opt := strings.SplitN(option, "=", 2)
if len(opt) != 2 || !validFlags[opt[0]] {
- return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
+ return nil, fmt.Errorf("invalid tmpfs option %q", opt)
}
if !dataCollisions[opt[0]] {
// We prepend the option and add to collision map
@@ -142,7 +142,7 @@ func ParseTmpfsOptions(options string) (int, string, error) {
for _, o := range strings.Split(data, ",") {
opt := strings.SplitN(o, "=", 2)
if !validFlags[opt[0]] {
- return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt)
+ return 0, "", fmt.Errorf("invalid tmpfs option %q", opt)
}
}
return flags, data, nil
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go
new file mode 100644
index 00000000000..3ba99cf9351
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go
@@ -0,0 +1,48 @@
+package mount
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // RDONLY will mount the file system read-only.
+ RDONLY = unix.MNT_RDONLY
+
+ // NOSUID will not allow set-user-identifier or set-group-identifier bits to
+ // take effect.
+ NOSUID = unix.MNT_NOSUID
+
+ // NOEXEC will not allow execution of any binaries on the mounted file system.
+ NOEXEC = unix.MNT_NOEXEC
+
+ // SYNCHRONOUS will allow I/O to the file system to be done synchronously.
+ SYNCHRONOUS = unix.MNT_SYNCHRONOUS
+
+ // REMOUNT will attempt to remount an already-mounted file system. This is
+ // commonly used to change the mount flags for a file system, especially to
+ // make a readonly file system writeable. It does not change device or mount
+ // point.
+ REMOUNT = unix.MNT_UPDATE
+
+ // NOATIME will not update the file access time when reading from a file.
+ NOATIME = unix.MNT_NOATIME
+
+ mntDetach = unix.MNT_FORCE
+
+ NODIRATIME = 0
+ NODEV = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ BIND = 0
+ RBIND = 0
+ UNBINDABLE = 0
+ RUNBINDABLE = 0
+ PRIVATE = 0
+ RPRIVATE = 0
+ SLAVE = 0
+ RSLAVE = 0
+ SHARED = 0
+ RSHARED = 0
+ RELATIME = 0
+ STRICTATIME = 0
+)
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
index 9afd26d4c06..ee0f593a50a 100644
--- a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
@@ -1,4 +1,5 @@
-// +build !linux
+//go:build !linux && !freebsd
+// +build !linux,!freebsd
package mount
diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
index b31cf99d0ff..c70b0bf9916 100644
--- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
@@ -1,3 +1,6 @@
+//go:build freebsd && cgo
+// +build freebsd,cgo
+
package mount
/*
@@ -28,14 +31,25 @@ func allocateIOVecs(options []string) []C.struct_iovec {
func mount(device, target, mType string, flag uintptr, data string) error {
isNullFS := false
- xs := strings.Split(data, ",")
- for _, x := range xs {
- if x == "bind" {
- isNullFS = true
+ options := []string{"fspath", target}
+
+ if data != "" {
+ xs := strings.Split(data, ",")
+ for _, x := range xs {
+ if x == "bind" {
+ isNullFS = true
+ continue
+ }
+ opt := strings.SplitN(x, "=", 2)
+ options = append(options, opt[0])
+ if len(opt) == 2 {
+ options = append(options, opt[1])
+ } else {
+ options = append(options, "")
+ }
}
}
- options := []string{"fspath", target}
if isNullFS {
options = append(options, "fstype", "nullfs", "target", device)
} else {
@@ -48,7 +62,7 @@ func mount(device, target, mType string, flag uintptr, data string) error {
if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
reason := C.GoString(C.strerror(*C.__error()))
- return fmt.Errorf("Failed to call nmount: %s", reason)
+ return fmt.Errorf("failed to call nmount: %s", reason)
}
return nil
}
diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go
index 9d20cfbf869..74fe666090f 100644
--- a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go
@@ -1,4 +1,6 @@
-// +build !linux,!freebsd
+//go:build !linux && !(freebsd && cgo)
+// +build !linux
+// +build !freebsd !cgo
package mount
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go
index 71f205b2852..20d67f78071 100644
--- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go
+++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go
@@ -1,3 +1,4 @@
+//go:build darwin
// +build darwin
// Package kernel provides helper function to get, parse and compare kernel
@@ -37,16 +38,16 @@ func getRelease() (string, error) {
// It has the format like ' Kernel Version: Darwin 14.5.0'
content := strings.SplitN(line, ":", 2)
if len(content) != 2 {
- return "", fmt.Errorf("Kernel Version is invalid")
+ return "", fmt.Errorf("kernel version is invalid")
}
prettyNames, err := shellwords.Parse(content[1])
if err != nil {
- return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error())
+ return "", fmt.Errorf("kernel version is invalid: %s", err.Error())
}
if len(prettyNames) != 2 {
- return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ")
+ return "", fmt.Errorf("kernel version needs to be 'Darwin x.x.x' ")
}
release = prettyNames[1]
}
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
index 76e1e499f37..7a68bc39bf4 100644
--- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
+++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
@@ -35,7 +35,7 @@ func GetKernelVersion() (*VersionInfo, error) {
// the given version.
func CheckKernelVersion(k, major, minor int) bool {
if v, err := GetKernelVersion(); err != nil {
- logrus.Warnf("error getting kernel version: %s", err)
+ logrus.Warnf("Error getting kernel version: %s", err)
} else {
if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 {
return false
diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/containers/storage/pkg/parsers/parsers.go
index acc897168f3..85c23381d91 100644
--- a/vendor/github.com/containers/storage/pkg/parsers/parsers.go
+++ b/vendor/github.com/containers/storage/pkg/parsers/parsers.go
@@ -13,7 +13,7 @@ import (
func ParseKeyValueOpt(opt string) (string, string, error) {
parts := strings.SplitN(opt, "=", 2)
if len(parts) != 2 {
- return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt)
+ return "", "", fmt.Errorf("unable to parse key/value option: %s", opt)
}
return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go
new file mode 100644
index 00000000000..6f63ae99170
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go
@@ -0,0 +1,37 @@
+// +build freebsd
+
+package reexec
+
+import (
+ "context"
+ "os"
+ "os/exec"
+
+ "golang.org/x/sys/unix"
+)
+
+// Self returns the path to the current process's binary.
+// Uses sysctl.
+func Self() string {
+ path, err := unix.SysctlArgs("kern.proc.pathname", -1)
+ if err == nil {
+ return path
+ }
+ return os.Args[0]
+}
+
+// Command returns *exec.Cmd which has Path as current binary.
+// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
+// be set to "/usr/bin/docker".
+func Command(args ...string) *exec.Cmd {
+ cmd := exec.Command(Self())
+ cmd.Args = args
+ return cmd
+}
+
+// CommandContext returns *exec.Cmd which has Path as current binary.
+func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
+ cmd := exec.CommandContext(ctx, Self())
+ cmd.Args = args
+ return cmd
+}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
index 372bee7321f..d3dd86d349f 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
@@ -17,6 +17,7 @@ func Self() string {
// This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func Command(args ...string) *exec.Cmd {
+ panicIfNotInitialized()
cmd := exec.Command(Self())
cmd.Args = args
return cmd
@@ -26,6 +27,7 @@ func Command(args ...string) *exec.Cmd {
// This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
+ panicIfNotInitialized()
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
return cmd
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
index 1ecaa906fed..a56ada2161e 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
@@ -1,4 +1,5 @@
-// +build freebsd solaris darwin
+//go:build solaris || darwin
+// +build solaris darwin
package reexec
@@ -17,6 +18,7 @@ func Self() string {
// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
// be set to "/usr/bin/docker".
func Command(args ...string) *exec.Cmd {
+ panicIfNotInitialized()
cmd := exec.Command(Self())
cmd.Args = args
return cmd
@@ -24,6 +26,7 @@ func Command(args ...string) *exec.Cmd {
// CommandContext returns *exec.Cmd which has Path as current binary.
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
+ panicIfNotInitialized()
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
return cmd
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
index 9d937426854..5b3605f319c 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
@@ -9,10 +9,12 @@ import (
// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin.
func Command(args ...string) *exec.Cmd {
+ panicIfNotInitialized()
return nil
}
// CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin.
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
+ panicIfNotInitialized()
return nil
}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go
index 673ab476abd..d868564767f 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go
@@ -17,6 +17,7 @@ func Self() string {
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
// be set to "C:\docker.exe".
func Command(args ...string) *exec.Cmd {
+ panicIfNotInitialized()
cmd := exec.Command(Self())
cmd.Args = args
return cmd
@@ -26,6 +27,7 @@ func Command(args ...string) *exec.Cmd {
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
// be set to "C:\docker.exe".
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
+ panicIfNotInitialized()
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
return cmd
diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go
index c56671d9192..a1938cd4f34 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/reexec.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go
@@ -7,7 +7,10 @@ import (
"path/filepath"
)
-var registeredInitializers = make(map[string]func())
+var (
+ registeredInitializers = make(map[string]func())
+ initWasCalled = false
+)
// Register adds an initialization func under the specified name
func Register(name string, initializer func()) {
@@ -22,6 +25,7 @@ func Register(name string, initializer func()) {
// initialization function was called.
func Init() bool {
initializer, exists := registeredInitializers[os.Args[0]]
+ initWasCalled = true
if exists {
initializer()
@@ -30,6 +34,21 @@ func Init() bool {
return false
}
+func panicIfNotInitialized() {
+ if !initWasCalled {
+ // The reexec package is used to run subroutines in
+ // subprocesses which would otherwise have unacceptable side
+ // effects on the main thread. If you found this error, then
+ // your program uses a package which needs to do this. In
+ // order for that to work, main() should start with this
+ // boilerplate, or an equivalent:
+ // if reexec.Init() {
+ // return
+ // }
+ panic("a library subroutine needed to run a subprocess, but reexec.Init() was not called in main()")
+ }
+}
+
func naiveSelf() string {
name := os.Args[0]
if filepath.Base(name) == name {
diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go
index a0c7c42a050..4c434f0e583 100644
--- a/vendor/github.com/containers/storage/pkg/stringid/stringid.go
+++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go
@@ -12,6 +12,7 @@ import (
"regexp"
"strconv"
"strings"
+ "sync"
"time"
)
@@ -20,6 +21,9 @@ const shortLen = 12
var (
validShortID = regexp.MustCompile("^[a-f0-9]{12}$")
validHex = regexp.MustCompile(`^[a-f0-9]{64}$`)
+
+ rngLock sync.Mutex
+ rng *rand.Rand // A RNG with seeding properties we control. It can only be accessed with randLock held.
)
// IsShortID determines if an arbitrary string *looks like* a short ID.
@@ -67,7 +71,9 @@ func GenerateRandomID() string {
// secure sources of random.
// It helps you to save entropy.
func GenerateNonCryptoID() string {
- return generateID(readerFunc(rand.Read))
+ rngLock.Lock()
+ defer rngLock.Unlock()
+ return generateID(readerFunc(rng.Read))
}
// ValidateID checks whether an ID string is a valid image ID.
@@ -79,7 +85,7 @@ func ValidateID(id string) error {
}
func init() {
- // safely set the seed globally so we generate random ids. Tries to use a
+ // Initialize a private RNG so we generate random ids. Tries to use a
// crypto seed before falling back to time.
var seed int64
if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
@@ -89,7 +95,7 @@ func init() {
seed = cryptoseed.Int64()
}
- rand.Seed(seed)
+ rng = rand.New(rand.NewSource(seed))
}
type readerFunc func(p []byte) (int, error)
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go
new file mode 100644
index 00000000000..46cb40291f6
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go
@@ -0,0 +1,85 @@
+//go:build freebsd && cgo
+// +build freebsd,cgo
+
+package system
+
+import (
+ "errors"
+ "fmt"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// #include
+// #include
+// #include
+// #include
+import "C"
+
+func getMemInfo() (int64, int64, error) {
+ data, err := unix.SysctlRaw("vm.vmtotal")
+ if err != nil {
+ return -1, -1, fmt.Errorf("can't get kernel info: %w", err)
+ }
+ if len(data) != C.sizeof_struct_vmtotal {
+ return -1, -1, fmt.Errorf("unexpected vmtotal size %d", len(data))
+ }
+
+ total := (*C.struct_vmtotal)(unsafe.Pointer(&data[0]))
+
+ pagesize := int64(C.sysconf(C._SC_PAGESIZE))
+ npages := int64(C.sysconf(C._SC_PHYS_PAGES))
+ return pagesize * npages, pagesize * int64(total.t_free), nil
+}
+
+func getSwapInfo() (int64, int64, error) {
+ var (
+ total int64 = 0
+ used int64 = 0
+ )
+ swapCount, err := unix.SysctlUint32("vm.nswapdev")
+ if err != nil {
+ return -1, -1, fmt.Errorf("reading vm.nswapdev: %w", err)
+ }
+ for i := 0; i < int(swapCount); i++ {
+ data, err := unix.SysctlRaw("vm.swap_info", i)
+ if err != nil {
+ return -1, -1, fmt.Errorf("reading vm.swap_info.%d: %w", i, err)
+ }
+ if len(data) != C.sizeof_struct_xswdev {
+ return -1, -1, fmt.Errorf("unexpected swap_info size %d", len(data))
+ }
+ xsw := (*C.struct_xswdev)(unsafe.Pointer(&data[0]))
+ total += int64(xsw.xsw_nblks)
+ used += int64(xsw.xsw_used)
+ }
+ pagesize := int64(C.sysconf(C._SC_PAGESIZE))
+ return pagesize * total, pagesize * (total - used), nil
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ MemTotal, MemFree, err := getMemInfo()
+ if err != nil {
+ return nil, fmt.Errorf("getting memory totals %w", err)
+ }
+ SwapTotal, SwapFree, err := getSwapInfo()
+ if err != nil {
+ return nil, fmt.Errorf("getting swap totals %w", err)
+ }
+
+ if MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 {
+ return nil, errors.New("getting system memory info")
+ }
+
+ meminfo := &MemInfo{}
+ // Total memory is total physical memory less than memory locked by kernel
+ meminfo.MemTotal = MemTotal
+ meminfo.MemFree = MemFree
+ meminfo.SwapTotal = SwapTotal
+ meminfo.SwapFree = SwapFree
+
+ return meminfo, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
index 925776e789b..d727b545c49 100644
--- a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
@@ -1,3 +1,4 @@
+//go:build solaris && cgo
// +build solaris,cgo
package system
@@ -90,7 +91,7 @@ func ReadMemInfo() (*MemInfo, error) {
if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
SwapFree < 0 {
- return nil, fmt.Errorf("error getting system memory info %v\n", err)
+ return nil, fmt.Errorf("getting system memory info %w", err)
}
meminfo := &MemInfo{}
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go
index 3ce019dffdd..0f9feb1d228 100644
--- a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go
@@ -1,4 +1,8 @@
-// +build !linux,!windows,!solaris
+//go:build !linux && !windows && !solaris && !(freebsd && cgo)
+// +build !linux
+// +build !windows
+// +build !solaris
+// +build !freebsd !cgo
package system
diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go
index c276ce8e80d..d3d0ed8a128 100644
--- a/vendor/github.com/containers/storage/pkg/system/mknod.go
+++ b/vendor/github.com/containers/storage/pkg/system/mknod.go
@@ -1,3 +1,4 @@
+//go:build !windows && !freebsd
// +build !windows,!freebsd
package system
@@ -8,8 +9,8 @@ import (
// Mknod creates a filesystem node (file, device special file or named pipe) named path
// with attributes specified by mode and dev.
-func Mknod(path string, mode uint32, dev int) error {
- return unix.Mknod(path, mode, dev)
+func Mknod(path string, mode uint32, dev uint32) error {
+ return unix.Mknod(path, mode, int(dev))
}
// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go
index d09005589af..53c3f2837e2 100644
--- a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go
@@ -1,3 +1,4 @@
+//go:build freebsd
// +build freebsd
package system
@@ -17,6 +18,6 @@ func Mknod(path string, mode uint32, dev uint64) error {
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
// then the top 12 bits of the minor.
-func Mkdev(major int64, minor int64) uint32 {
- return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
+func Mkdev(major int64, minor int64) uint64 {
+ return uint64(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
}
diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go
index aab891522db..9f250973843 100644
--- a/vendor/github.com/containers/storage/pkg/system/path_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package system
@@ -21,13 +22,13 @@ import (
// d:\ --> Fail
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
if len(path) == 2 && string(path[1]) == ":" {
- return "", fmt.Errorf("No relative path specified in %q", path)
+ return "", fmt.Errorf("relative path not specified in %q", path)
}
if !filepath.IsAbs(path) || len(path) < 2 {
return filepath.FromSlash(path), nil
}
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
- return "", fmt.Errorf("The specified path is not on the system drive (C:)")
+ return "", fmt.Errorf("specified path is not on the system drive (C:)")
}
return filepath.FromSlash(path[2:]), nil
}
diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go
index 510e714283d..5d63dc7418e 100644
--- a/vendor/github.com/containers/storage/pkg/system/rm.go
+++ b/vendor/github.com/containers/storage/pkg/system/rm.go
@@ -1,12 +1,12 @@
package system
import (
+ "fmt"
"os"
"syscall"
"time"
"github.com/containers/storage/pkg/mount"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -35,6 +35,9 @@ func EnsureRemoveAll(dir string) error {
}
for {
+ if err := resetFileFlags(dir); err != nil {
+ return fmt.Errorf("resetting file flags: %w", err)
+ }
err := os.RemoveAll(dir)
if err == nil {
return nil
@@ -67,7 +70,7 @@ func EnsureRemoveAll(dir string) error {
}
if e := mount.Unmount(pe.Path); e != nil {
- return errors.Wrapf(e, "error while removing %s", dir)
+ return fmt.Errorf("while removing %s: %w", dir, e)
}
if exitOnErr[pe.Path] == maxRetry {
diff --git a/vendor/github.com/containers/storage/pkg/system/rm_common.go b/vendor/github.com/containers/storage/pkg/system/rm_common.go
new file mode 100644
index 00000000000..117eb1d6dc5
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/rm_common.go
@@ -0,0 +1,10 @@
+//go:build !freebsd
+// +build !freebsd
+
+package system
+
+// Reset file flags in a directory tree. This allows EnsureRemoveAll
+// to delete trees which have the immutable flag set.
+func resetFileFlags(dir string) error {
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go b/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go
new file mode 100644
index 00000000000..35896c11d21
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go
@@ -0,0 +1,32 @@
+package system
+
+import (
+ "io/fs"
+ "path/filepath"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func lchflags(path string, flags int) (err error) {
+ p, err := unix.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ _, _, e1 := unix.Syscall(unix.SYS_LCHFLAGS, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
+ if e1 != 0 {
+ return e1
+ }
+ return nil
+}
+
+// Reset file flags in a directory tree. This allows EnsureRemoveAll
+// to delete trees which have the immutable flag set.
+func resetFileFlags(dir string) error {
+ return filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
+ if err := lchflags(path, 0); err != nil {
+ return err
+ }
+ return nil
+ })
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go
index 49dbdd37817..c4816c133e7 100644
--- a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go
@@ -1,8 +1,13 @@
-// +build linux freebsd
+//go:build linux || freebsd || darwin
+// +build linux freebsd darwin
package system
-import "golang.org/x/sys/unix"
+import (
+ "errors"
+
+ "golang.org/x/sys/unix"
+)
// Unmount is a platform-specific helper function to call
// the unmount syscall.
@@ -15,3 +20,8 @@ func Unmount(dest string) error {
func CommandLineToArgv(commandLine string) ([]string, error) {
return []string{commandLine}, nil
}
+
+// IsEBUSY checks if the specified error is EBUSY.
+func IsEBUSY(err error) bool {
+ return errors.Is(err, unix.EBUSY)
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go
index 23e9b207c75..f4d8692cdb8 100644
--- a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go
@@ -120,3 +120,8 @@ func HasWin32KSupport() bool {
// APIs.
return ntuserApiset.Load() == nil
}
+
+// IsEBUSY checks if the specified error is EBUSY.
+func IsEBUSY(err error) bool {
+ return false
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go
index 6a77524376d..edc588a63f3 100644
--- a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go
@@ -10,13 +10,14 @@ import (
// LUtimesNano is used to change access and modification time of the specified path.
// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
func LUtimesNano(path string, ts []syscall.Timespec) error {
+ atFdCwd := unix.AT_FDCWD
+
var _path *byte
_path, err := unix.BytePtrFromString(path)
if err != nil {
return err
}
-
- if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS {
+ if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS {
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go
new file mode 100644
index 00000000000..75275b964ea
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go
@@ -0,0 +1,84 @@
+package system
+
+import (
+ "bytes"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // Value is larger than the maximum size allowed
+ E2BIG unix.Errno = unix.E2BIG
+
+ // Operation not supported
+ EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP
+)
+
+// Lgetxattr retrieves the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+// Returns a []byte slice if the xattr is set and nil otherwise.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ // Start with a 128 length byte array
+ dest := make([]byte, 128)
+ sz, errno := unix.Lgetxattr(path, attr, dest)
+
+ for errno == unix.ERANGE {
+ // Buffer too small, use zero-sized buffer to get the actual size
+ sz, errno = unix.Lgetxattr(path, attr, []byte{})
+ if errno != nil {
+ return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno}
+ }
+ dest = make([]byte, sz)
+ sz, errno = unix.Lgetxattr(path, attr, dest)
+ }
+
+ switch {
+ case errno == unix.ENOATTR:
+ return nil, nil
+ case errno != nil:
+ return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno}
+ }
+
+ return dest[:sz], nil
+}
+
+// Lsetxattr sets the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ if err := unix.Lsetxattr(path, attr, data, flags); err != nil {
+ return &os.PathError{Op: "lsetxattr", Path: path, Err: err}
+ }
+
+ return nil
+}
+
+// Llistxattr lists extended attributes associated with the given path
+// in the file system.
+func Llistxattr(path string) ([]string, error) {
+ dest := make([]byte, 128)
+ sz, errno := unix.Llistxattr(path, dest)
+
+ for errno == unix.ERANGE {
+ // Buffer too small, use zero-sized buffer to get the actual size
+ sz, errno = unix.Llistxattr(path, []byte{})
+ if errno != nil {
+ return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno}
+ }
+
+ dest = make([]byte, sz)
+ sz, errno = unix.Llistxattr(path, dest)
+ }
+ if errno != nil {
+ return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno}
+ }
+
+ var attrs []string
+ for _, token := range bytes.Split(dest[:sz], []byte{0}) {
+ if len(token) > 0 {
+ attrs = append(attrs, string(token))
+ }
+ }
+
+ return attrs, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
index 10355848bdb..6b47c4e717f 100644
--- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
@@ -13,6 +13,9 @@ const (
// Operation not supported
EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP
+
+ // Value is too small or too large for maximum size allowed
+ EOVERFLOW unix.Errno = unix.EOVERFLOW
)
// Lgetxattr retrieves the value of the extended attribute identified by attr
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
index bc8b8e3a5fe..221eb78bc22 100644
--- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+// +build !linux,!darwin
package system
@@ -10,6 +10,9 @@ const (
// Operation not supported
EOPNOTSUPP syscall.Errno = syscall.Errno(0)
+
+ // Value is too small or too large for maximum size allowed
+ EOVERFLOW syscall.Errno = syscall.Errno(0)
)
// Lgetxattr is not supported on platforms other than linux.
diff --git a/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go b/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go
index 26cd8504cf9..674e0a0baed 100644
--- a/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go
+++ b/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go
@@ -34,7 +34,7 @@ func NewLogger(logger func(*tar.Header)) (io.WriteCloser, error) {
}
// Make sure to avoid writes after the reader has been closed.
if err := reader.Close(); err != nil {
- logrus.Errorf("error closing tarlogger reader: %v", err)
+ logrus.Errorf("Closing tarlogger reader: %v", err)
}
// Unblock the Close().
t.closeMutex.Unlock()
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.c b/vendor/github.com/containers/storage/pkg/unshare/unshare.c
index c0e359b2761..f5a7c3a259b 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare.c
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.c
@@ -1,4 +1,4 @@
-#ifndef UNSHARE_NO_CODE_AT_ALL
+#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__linux__)
#define _GNU_SOURCE
#include
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/vendor/github.com/containers/storage/pkg/unshare/unshare.go
index 53cfeb0ec45..c854fdf5e47 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.go
@@ -6,8 +6,7 @@ import (
"os/user"
"sync"
- "github.com/pkg/errors"
- "github.com/syndtr/gocapability/capability"
+ "github.com/sirupsen/logrus"
)
var (
@@ -27,7 +26,7 @@ func HomeDir() (string, error) {
if home == "" {
usr, err := user.LookupId(fmt.Sprintf("%d", GetRootlessUID()))
if err != nil {
- homeDir, homeDirErr = "", errors.Wrapf(err, "unable to resolve HOME directory")
+ homeDir, homeDirErr = "", fmt.Errorf("unable to resolve HOME directory: %w", err)
return
}
homeDir, homeDirErr = usr.HomeDir, nil
@@ -38,19 +37,13 @@ func HomeDir() (string, error) {
return homeDir, homeDirErr
}
-// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN.
-func HasCapSysAdmin() (bool, error) {
- hasCapSysAdminOnce.Do(func() {
- currentCaps, err := capability.NewPid2(0)
- if err != nil {
- hasCapSysAdminErr = err
- return
- }
- if err = currentCaps.Load(); err != nil {
- hasCapSysAdminErr = err
- return
+func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname
+ if err != nil {
+ if format != "" {
+ logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)
+ } else {
+ logrus.Errorf("%v", err)
}
- hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN)
- })
- return hasCapSysAdminRet, hasCapSysAdminErr
+ os.Exit(1)
+ }
}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go
index b3f8099f6a0..fbfb90d599a 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go
@@ -1,10 +1,11 @@
-// +build linux,cgo,!gccgo
+//go:build (linux && cgo && !gccgo) || (freebsd && cgo)
+// +build linux,cgo,!gccgo freebsd,cgo
package unshare
// #cgo CFLAGS: -Wall
// extern void _containers_unshare(void);
-// void __attribute__((constructor)) init(void) {
+// static void __attribute__((constructor)) init(void) {
// _containers_unshare();
// }
import "C"
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go
new file mode 100644
index 00000000000..01cf33bde73
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go
@@ -0,0 +1,53 @@
+// +build darwin
+
+package unshare
+
+import (
+ "os"
+
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+const (
+ // UsernsEnvName is the environment variable, if set indicates in rootless mode
+ UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
+)
+
+// IsRootless tells us if we are running in rootless mode
+func IsRootless() bool {
+ return true
+}
+
+// GetRootlessUID returns the UID of the user in the parent userNS
+func GetRootlessUID() int {
+ return os.Getuid()
+}
+
+// RootlessEnv returns the environment settings for the rootless containers
+func RootlessEnv() []string {
+ return append(os.Environ(), UsernsEnvName+"=")
+}
+
+// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
+func MaybeReexecUsingUserNamespace(evenForRoot bool) {
+}
+
+// GetHostIDMappings reads mappings for the specified process (or the current
+// process if pid is "self" or an empty string) from the kernel.
+func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
+ return nil, nil, nil
+}
+
+// ParseIDMappings parses mapping triples.
+func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
+ uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map")
+ if err != nil {
+ return nil, nil, err
+ }
+ gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map")
+ if err != nil {
+ return nil, nil, err
+ }
+ return uid, gid, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c
new file mode 100644
index 00000000000..0b2f1788696
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c
@@ -0,0 +1,76 @@
+#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__FreeBSD__)
+
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+static int _containers_unshare_parse_envint(const char *envname) {
+ char *p, *q;
+ long l;
+
+ p = getenv(envname);
+ if (p == NULL) {
+ return -1;
+ }
+ q = NULL;
+ l = strtol(p, &q, 10);
+ if ((q == NULL) || (*q != '\0')) {
+ fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p);
+ _exit(1);
+ }
+ unsetenv(envname);
+ return l;
+}
+
+void _containers_unshare(void)
+{
+ int pidfd, continuefd, n, pgrp, sid, ctty;
+ char buf[2048];
+
+ pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe");
+ if (pidfd != -1) {
+ snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid());
+ size_t size = write(pidfd, buf, strlen(buf));
+ if (size != strlen(buf)) {
+ fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd);
+ _exit(1);
+ }
+ close(pidfd);
+ }
+ continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe");
+ if (continuefd != -1) {
+ n = read(continuefd, buf, sizeof(buf));
+ if (n > 0) {
+ fprintf(stderr, "Error: %.*s\n", n, buf);
+ _exit(1);
+ }
+ close(continuefd);
+ }
+ sid = _containers_unshare_parse_envint("_Containers-setsid");
+ if (sid == 1) {
+ if (setsid() == -1) {
+ fprintf(stderr, "Error during setsid: %m\n");
+ _exit(1);
+ }
+ }
+ pgrp = _containers_unshare_parse_envint("_Containers-setpgrp");
+ if (pgrp == 1) {
+ if (setpgrp(0, 0) == -1) {
+ fprintf(stderr, "Error during setpgrp: %m\n");
+ _exit(1);
+ }
+ }
+ ctty = _containers_unshare_parse_envint("_Containers-ctty");
+ if (ctty != -1) {
+ if (ioctl(ctty, TIOCSCTTY, 0) == -1) {
+ fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty);
+ _exit(1);
+ }
+ }
+}
+
+#endif
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go
new file mode 100644
index 00000000000..f52760abba2
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go
@@ -0,0 +1,179 @@
+//go:build freebsd
+// +build freebsd
+
+package unshare
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "runtime"
+ "strconv"
+ "syscall"
+
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/sirupsen/logrus"
+)
+
+// Cmd wraps an exec.Cmd created by the reexec package in unshare(),
+// and one day might handle setting ID maps and other related setting*s
+// by triggering initialization code in the child.
+type Cmd struct {
+ *exec.Cmd
+ Setsid bool
+ Setpgrp bool
+ Ctty *os.File
+ Hook func(pid int) error
+}
+
+// Command creates a new Cmd which can be customized.
+func Command(args ...string) *Cmd {
+ cmd := reexec.Command(args...)
+ return &Cmd{
+ Cmd: cmd,
+ }
+}
+
+func (c *Cmd) Start() error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ // Set environment variables to tell the child to synchronize its startup.
+ if c.Env == nil {
+ c.Env = os.Environ()
+ }
+
+ // Create the pipe for reading the child's PID.
+ pidRead, pidWrite, err := os.Pipe()
+ if err != nil {
+ return fmt.Errorf("creating pid pipe: %w", err)
+ }
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3))
+ c.ExtraFiles = append(c.ExtraFiles, pidWrite)
+
+ // Create the pipe for letting the child know to proceed.
+ continueRead, continueWrite, err := os.Pipe()
+ if err != nil {
+ pidRead.Close()
+ pidWrite.Close()
+ return fmt.Errorf("creating pid pipe: %w", err)
+ }
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3))
+ c.ExtraFiles = append(c.ExtraFiles, continueRead)
+
+ // Pass along other instructions.
+ if c.Setsid {
+ c.Env = append(c.Env, "_Containers-setsid=1")
+ }
+ if c.Setpgrp {
+ c.Env = append(c.Env, "_Containers-setpgrp=1")
+ }
+ if c.Ctty != nil {
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3))
+ c.ExtraFiles = append(c.ExtraFiles, c.Ctty)
+ }
+
+ // Make sure we clean up our pipes.
+ defer func() {
+ if pidRead != nil {
+ pidRead.Close()
+ }
+ if pidWrite != nil {
+ pidWrite.Close()
+ }
+ if continueRead != nil {
+ continueRead.Close()
+ }
+ if continueWrite != nil {
+ continueWrite.Close()
+ }
+ }()
+
+ // Start the new process.
+ err = c.Cmd.Start()
+ if err != nil {
+ return err
+ }
+
+ // Close the ends of the pipes that the parent doesn't need.
+ continueRead.Close()
+ continueRead = nil
+ pidWrite.Close()
+ pidWrite = nil
+
+ // Read the child's PID from the pipe.
+ pidString := ""
+ b := new(bytes.Buffer)
+ if _, err := io.Copy(b, pidRead); err != nil {
+ return fmt.Errorf("reading child PID: %w", err)
+ }
+ pidString = b.String()
+ pid, err := strconv.Atoi(pidString)
+ if err != nil {
+ fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err)
+ return fmt.Errorf("parsing PID %q: %w", pidString, err)
+ }
+
+ // Run any additional setup that we want to do before the child starts running proper.
+ if c.Hook != nil {
+ if err = c.Hook(pid); err != nil {
+ fmt.Fprintf(continueWrite, "hook error: %v", err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *Cmd) Run() error {
+ if err := c.Start(); err != nil {
+ return err
+ }
+ return c.Wait()
+}
+
+func (c *Cmd) CombinedOutput() ([]byte, error) {
+ return nil, errors.New("unshare: CombinedOutput() not implemented")
+}
+
+func (c *Cmd) Output() ([]byte, error) {
+ return nil, errors.New("unshare: Output() not implemented")
+}
+
+type Runnable interface {
+ Run() error
+}
+
+// ExecRunnable runs the specified unshare command, captures its exit status,
+// and exits with the same status.
+func ExecRunnable(cmd Runnable, cleanup func()) {
+ exit := func(status int) {
+ if cleanup != nil {
+ cleanup()
+ }
+ os.Exit(status)
+ }
+ if err := cmd.Run(); err != nil {
+ if exitError, ok := err.(*exec.ExitError); ok {
+ if exitError.ProcessState.Exited() {
+ if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
+ if waitStatus.Exited() {
+ logrus.Debugf("%v", exitError)
+ exit(waitStatus.ExitStatus())
+ }
+ if waitStatus.Signaled() {
+ logrus.Debugf("%v", exitError)
+ exit(int(waitStatus.Signal()) + 128)
+ }
+ }
+ }
+ }
+ logrus.Errorf("%v", err)
+ logrus.Errorf("(Unable to determine exit status)")
+ exit(1)
+ }
+ exit(0)
+}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go
index 2f95da7d8e9..39bff651ea9 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go
@@ -4,7 +4,7 @@ package unshare
// #cgo CFLAGS: -Wall -Wextra
// extern void _containers_unshare(void);
-// void __attribute__((constructor)) init(void) {
+// static void __attribute__((constructor)) init(void) {
// _containers_unshare();
// }
import "C"
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
index 96b85754310..3fc36201c5a 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package unshare
@@ -5,10 +6,12 @@ package unshare
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"io"
"os"
"os/exec"
+ "os/signal"
"os/user"
"runtime"
"strconv"
@@ -19,7 +22,6 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/reexec"
"github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/syndtr/gocapability/capability"
)
@@ -75,6 +77,28 @@ func getRootlessGID() int {
return os.Getegid()
}
+// IsSetID checks if specified path has correct FileMode (Setuid|SETGID) or the
+// matching file capability
+func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error) {
+ info, err := os.Stat(path)
+ if err != nil {
+ return false, err
+ }
+
+ mode := info.Mode()
+ if mode&modeid == modeid {
+ return true, nil
+ }
+ cap, err := capability.NewFile2(path)
+ if err != nil {
+ return false, err
+ }
+ if err := cap.Load(); err != nil {
+ return false, err
+ }
+ return cap.Get(capability.EFFECTIVE, capid), nil
+}
+
func (c *Cmd) Start() error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
@@ -95,7 +119,7 @@ func (c *Cmd) Start() error {
// Create the pipe for reading the child's PID.
pidRead, pidWrite, err := os.Pipe()
if err != nil {
- return errors.Wrapf(err, "error creating pid pipe")
+ return fmt.Errorf("creating pid pipe: %w", err)
}
c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3))
c.ExtraFiles = append(c.ExtraFiles, pidWrite)
@@ -105,7 +129,7 @@ func (c *Cmd) Start() error {
if err != nil {
pidRead.Close()
pidWrite.Close()
- return errors.Wrapf(err, "error creating pid pipe")
+ return fmt.Errorf("creating pid pipe: %w", err)
}
c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3))
c.ExtraFiles = append(c.ExtraFiles, continueRead)
@@ -154,13 +178,13 @@ func (c *Cmd) Start() error {
pidString := ""
b := new(bytes.Buffer)
if _, err := io.Copy(b, pidRead); err != nil {
- return errors.Wrapf(err, "error reading child PID")
+ return fmt.Errorf("reading child PID: %w", err)
}
pidString = b.String()
pid, err := strconv.Atoi(pidString)
if err != nil {
fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err)
- return errors.Wrapf(err, "error parsing PID %q", pidString)
+ return fmt.Errorf("parsing PID %q: %w", pidString, err)
}
pidString = fmt.Sprintf("%d", pid)
@@ -170,18 +194,18 @@ func (c *Cmd) Start() error {
setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening setgroups: %v", err)
- return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
+ return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err)
}
defer setgroups.Close()
if c.GidMappingsEnableSetgroups {
if _, err := fmt.Fprintf(setgroups, "allow"); err != nil {
fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err)
- return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString)
+ return fmt.Errorf("opening \"allow\" to /proc/%s/setgroups: %w", pidString, err)
}
} else {
if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err)
- return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString)
+ return fmt.Errorf("writing \"deny\" to /proc/%s/setgroups: %w", pidString, err)
}
}
@@ -189,7 +213,7 @@ func (c *Cmd) Start() error {
uidmap, gidmap, err := GetHostIDMappings("")
if err != nil {
fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err)
- return errors.Wrapf(err, "error reading ID mappings in parent")
+ return fmt.Errorf("reading ID mappings in parent: %w", err)
}
if len(c.UidMappings) == 0 {
c.UidMappings = uidmap
@@ -214,16 +238,27 @@ func (c *Cmd) Start() error {
gidmapSet := false
// Set the GID map.
if c.UseNewgidmap {
- cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...)
+ path, err := exec.LookPath("newgidmap")
+ if err != nil {
+ return fmt.Errorf("finding newgidmap: %w", err)
+ }
+ cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...)
g.Reset()
cmd.Stdout = g
cmd.Stderr = g
- err := cmd.Run()
- if err == nil {
+ if err := cmd.Run(); err == nil {
gidmapSet = true
} else {
- logrus.Warnf("error running newgidmap: %v: %s", err, g.String())
- logrus.Warnf("falling back to single mapping")
+ logrus.Warnf("running newgidmap: %v: %s", err, g.String())
+ isSetgid, err := IsSetID(path, os.ModeSetgid, capability.CAP_SETGID)
+ if err != nil {
+ logrus.Warnf("Failed to check for setgid on %s: %v", path, err)
+ } else {
+ if !isSetgid {
+ logrus.Warnf("%s should be setgid or have filecaps setgid", path)
+ }
+ }
+ logrus.Warnf("Falling back to single mapping")
g.Reset()
g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid())))
}
@@ -233,23 +268,23 @@ func (c *Cmd) Start() error {
setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err)
- return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
+ return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err)
}
defer setgroups.Close()
if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err)
- return errors.Wrapf(err, "error writing 'deny' to /proc/%s/setgroups", pidString)
+ return fmt.Errorf("writing 'deny' to /proc/%s/setgroups: %w", pidString, err)
}
}
gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
- fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err)
- return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString)
+ fmt.Fprintf(continueWrite, "opening /proc/%s/gid_map: %v", pidString, err)
+ return fmt.Errorf("opening /proc/%s/gid_map: %w", pidString, err)
}
defer gidmap.Close()
if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil {
- fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err)
- return errors.Wrapf(err, "error writing %q to /proc/%s/gid_map", g.String(), pidString)
+ fmt.Fprintf(continueWrite, "writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err)
+ return fmt.Errorf("writing %q to /proc/%s/gid_map: %w", g.String(), pidString, err)
}
}
}
@@ -261,18 +296,30 @@ func (c *Cmd) Start() error {
fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
}
uidmapSet := false
- // Set the GID map.
+ // Set the UID map.
if c.UseNewuidmap {
- cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...)
+ path, err := exec.LookPath("newuidmap")
+ if err != nil {
+ return fmt.Errorf("finding newuidmap: %w", err)
+ }
+ cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...)
u.Reset()
cmd.Stdout = u
cmd.Stderr = u
- err := cmd.Run()
- if err == nil {
+ if err := cmd.Run(); err == nil {
uidmapSet = true
} else {
- logrus.Warnf("error running newuidmap: %v: %s", err, u.String())
- logrus.Warnf("falling back to single mapping")
+ logrus.Warnf("Error running newuidmap: %v: %s", err, u.String())
+ isSetuid, err := IsSetID(path, os.ModeSetuid, capability.CAP_SETUID)
+ if err != nil {
+ logrus.Warnf("Failed to check for setuid on %s: %v", path, err)
+ } else {
+ if !isSetuid {
+ logrus.Warnf("%s should be setuid or have filecaps setuid", path)
+ }
+ }
+
+ logrus.Warnf("Falling back to single mapping")
u.Reset()
u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid())))
}
@@ -281,12 +328,12 @@ func (c *Cmd) Start() error {
uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err)
- return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString)
+ return fmt.Errorf("opening /proc/%s/uid_map: %w", pidString, err)
}
defer uidmap.Close()
if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil {
fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err)
- return errors.Wrapf(err, "error writing %q to /proc/%s/uid_map", u.String(), pidString)
+ return fmt.Errorf("writing %q to /proc/%s/uid_map: %w", u.String(), pidString, err)
}
}
}
@@ -296,12 +343,12 @@ func (c *Cmd) Start() error {
oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0)
if err != nil {
fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err)
- return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString)
+ return fmt.Errorf("opening /proc/%s/oom_score_adj: %w", pidString, err)
}
defer oomScoreAdj.Close()
if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil {
fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err)
- return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString)
+ return fmt.Errorf("writing \"%d\" to /proc/%s/oom_score_adj: %w", c.OOMScoreAdj, pidString, err)
}
}
// Run any additional setup that we want to do before the child starts running proper.
@@ -340,10 +387,41 @@ const (
UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
)
+// hasFullUsersMappings checks whether the current user namespace has all the IDs mapped.
+func hasFullUsersMappings() (bool, error) {
+ content, err := os.ReadFile("/proc/self/uid_map")
+ if err != nil {
+ return false, err
+ }
+ // The kernel rejects attempts to create mappings where either starting
+ // point is (u32)-1: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/kernel/user_namespace.c?id=af3e9579ecfb#n1006 .
+ // So, if the uid_map contains 4294967295, the entire IDs space is available in the
+ // user namespace, so it is likely the initial user namespace.
+ return bytes.Contains(content, []byte("4294967295")), nil
+}
+
// IsRootless tells us if we are running in rootless mode
func IsRootless() bool {
isRootlessOnce.Do(func() {
isRootless = getRootlessUID() != 0 || getenv(UsernsEnvName) != ""
+ if !isRootless {
+ hasCapSysAdmin, err := HasCapSysAdmin()
+ if err != nil {
+ logrus.Warnf("Failed to read CAP_SYS_ADMIN presence for the current process")
+ }
+ if err == nil && !hasCapSysAdmin {
+ isRootless = true
+ }
+ }
+ if !isRootless {
+ hasMappings, err := hasFullUsersMappings()
+ if err != nil {
+ logrus.Warnf("Failed to read current user namespace mappings")
+ }
+ if err == nil && !hasMappings {
+ isRootless = true
+ }
+ }
})
return isRootless
}
@@ -367,17 +445,6 @@ type Runnable interface {
Run() error
}
-func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname
- if err != nil {
- if format != "" {
- logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)
- } else {
- logrus.Errorf("%v", err)
- }
- os.Exit(1)
- }
-}
-
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
func MaybeReexecUsingUserNamespace(evenForRoot bool) {
// If we've already been through this once, no need to try again.
@@ -407,7 +474,7 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
// ID and a range size.
uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username)
if err != nil {
- logrus.Warnf("error reading allowed ID mappings: %v", err)
+ logrus.Warnf("Reading allowed ID mappings: %v", err)
}
if len(uidmap) == 0 {
logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username)
@@ -434,13 +501,13 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
// If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able
// to use unshare(), so don't bother creating a new user namespace at this point.
capabilities, err := capability.NewPid(0)
- bailOnError(err, "error reading the current capabilities sets")
+ bailOnError(err, "Reading the current capabilities sets")
if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) {
return
}
// Read the set of ID mappings that we're currently using.
uidmap, gidmap, err = GetHostIDMappings("")
- bailOnError(err, "error reading current ID mappings")
+ bailOnError(err, "Reading current ID mappings")
// Just reuse them.
for i := range uidmap {
uidmap[i].HostID = uidmap[i].ContainerID
@@ -463,7 +530,7 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present {
if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
- logrus.Errorf("error setting BUILDAH_ISOLATION=rootless in environment: %v", err)
+ logrus.Errorf("Setting BUILDAH_ISOLATION=rootless in environment: %v", err)
os.Exit(1)
}
}
@@ -483,7 +550,31 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
cmd.GidMappingsEnableSetgroups = true
// Finish up.
- logrus.Debugf("running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings)
+ logrus.Debugf("Running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings)
+
+ // Forward SIGHUP, SIGINT, and SIGTERM to our child process.
+ interrupted := make(chan os.Signal, 100)
+ defer func() {
+ signal.Stop(interrupted)
+ close(interrupted)
+ }()
+ cmd.Hook = func(int) error {
+ go func() {
+ for receivedSignal := range interrupted {
+ cmd.Cmd.Process.Signal(receivedSignal)
+ }
+ }()
+ return nil
+ }
+ signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
+
+ // Make sure our child process gets SIGKILLed if we exit, for whatever
+ // reason, before it does.
+ if cmd.Cmd.SysProcAttr == nil {
+ cmd.Cmd.SysProcAttr = &syscall.SysProcAttr{}
+ }
+ cmd.Cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
+
ExecRunnable(cmd, nil)
}
@@ -497,22 +588,22 @@ func ExecRunnable(cmd Runnable, cleanup func()) {
os.Exit(status)
}
if err := cmd.Run(); err != nil {
- if exitError, ok := errors.Cause(err).(*exec.ExitError); ok {
+ if exitError, ok := err.(*exec.ExitError); ok {
if exitError.ProcessState.Exited() {
if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
if waitStatus.Exited() {
- logrus.Errorf("%v", exitError)
+ logrus.Debugf("%v", exitError)
exit(waitStatus.ExitStatus())
}
if waitStatus.Signaled() {
- logrus.Errorf("%v", exitError)
+ logrus.Debugf("%v", exitError)
exit(int(waitStatus.Signal()) + 128)
}
}
}
}
logrus.Errorf("%v", err)
- logrus.Errorf("(unable to determine exit status)")
+ logrus.Errorf("(Unable to determine exit status)")
exit(1)
}
exit(0)
@@ -523,7 +614,7 @@ func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) {
var mappings []specs.LinuxIDMapping
f, err := os.Open(path)
if err != nil {
- return nil, errors.Wrapf(err, "error reading ID mappings from %q", path)
+ return nil, fmt.Errorf("reading ID mappings from %q: %w", path, err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
@@ -531,19 +622,19 @@ func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) {
line := scanner.Text()
fields := strings.Fields(line)
if len(fields) != 3 {
- return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields))
+ return nil, fmt.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields))
}
cid, err := strconv.ParseUint(fields[0], 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path)
+ return nil, fmt.Errorf("parsing container ID value %q from line %q in %q: %w", fields[0], line, path, err)
}
hid, err := strconv.ParseUint(fields[1], 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path)
+ return nil, fmt.Errorf("parsing host ID value %q from line %q in %q: %w", fields[1], line, path, err)
}
size, err := strconv.ParseUint(fields[2], 10, 32)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path)
+ return nil, fmt.Errorf("parsing size value %q from line %q in %q: %w", fields[2], line, path, err)
}
mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)})
}
@@ -571,7 +662,7 @@ func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMappi
func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
mappings, err := idtools.NewIDMappings(user, group)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group)
+ return nil, nil, fmt.Errorf("reading subuid mappings for user %q and subgid mappings for group %q: %w", user, group, err)
}
var uidmap, gidmap []specs.LinuxIDMapping
for _, m := range mappings.UIDs() {
@@ -603,3 +694,20 @@ func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap,
}
return uid, gid, nil
}
+
+// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN.
+func HasCapSysAdmin() (bool, error) {
+ hasCapSysAdminOnce.Do(func() {
+ currentCaps, err := capability.NewPid2(0)
+ if err != nil {
+ hasCapSysAdminErr = err
+ return
+ }
+ if err = currentCaps.Load(); err != nil {
+ hasCapSysAdminErr = err
+ return
+ }
+ hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN)
+ })
+ return hasCapSysAdminRet, hasCapSysAdminErr
+}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
index bf4d567b8af..66dd545966e 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
@@ -1,4 +1,5 @@
-// +build !linux
+//go:build !linux && !darwin
+// +build !linux,!darwin
package unshare
@@ -43,3 +44,8 @@ func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMappi
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
return nil, nil, nil
}
+
+// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN.
+func HasCapSysAdmin() (bool, error) {
+ return os.Geteuid() == 0, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go
index d5f2d22a801..a6b38eda8fd 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go
@@ -1,4 +1,5 @@
-// +build !linux,cgo
+//go:build cgo && !(linux || freebsd)
+// +build cgo,!linux,!freebsd
package unshare
diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf
index 722750c0cca..1294f6a9ab2 100644
--- a/vendor/github.com/containers/storage/storage.conf
+++ b/vendor/github.com/containers/storage/storage.conf
@@ -1,5 +1,14 @@
-# This file is is the configuration file for all tools
-# that use the containers/storage library.
+# This file is the configuration file for all tools
+# that use the containers/storage library. The storage.conf file
+# overrides all other storage.conf files. Container engines using the
+# container/storage library do not inherit fields from other storage.conf
+# files.
+#
+# Note: The storage.conf file overrides other storage.conf files based on this precedence:
+# /usr/containers/storage.conf
+# /etc/containers/storage.conf
+# $HOME/.config/containers/storage.conf
+# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set)
# See man 5 containers-storage.conf for more information
# The "container storage" table contains all of the server options.
[storage]
@@ -11,8 +20,14 @@ driver = "overlay"
runroot = "/run/containers/storage"
# Primary Read/Write location of container storage
+# When changing the graphroot location on an SELINUX system, you must
+# ensure the labeling matches the default locations labels with the
+# following commands:
+# semanage fcontext -a -e /var/lib/containers/storage /NEWSTORAGEPATH
+# restorecon -R -v /NEWSTORAGEPATH
graphroot = "/var/lib/containers/storage"
+
# Storage path for rootless users
#
# rootless_storage_path = "$HOME/.local/share/containers/storage"
@@ -25,6 +40,28 @@ graphroot = "/var/lib/containers/storage"
additionalimagestores = [
]
+# Allows specification of how storage is populated when pulling images. This
+# option can speed the pulling process of images compressed with format
+# zstd:chunked. Containers/storage looks for files within images that are being
+# pulled from a container registry that were previously pulled to the host. It
+# can copy or create a hard link to the existing file when it finds them,
+# eliminating the need to pull them from the container registry. These options
+# can deduplicate pulling of content, disk storage of content and can allow the
+# kernel to use less memory when running containers.
+
+# containers/storage supports four keys
+# * enable_partial_images="true" | "false"
+# Tells containers/storage to look for files previously pulled in storage
+# rather then always pulling them from the container registry.
+# * use_hard_links = "false" | "true"
+# Tells containers/storage to use hard links rather then create new files in
+# the image, if an identical file already existed in storage.
+# * ostree_repos = ""
+# Tells containers/storage where an ostree repository exists that might have
+# previously pulled content which can be used when attempting to avoid
+# pulling content from the container registry
+pull_options = {enable_partial_images = "false", use_hard_links = "false", ostree_repos=""}
+
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
# a container, to the UIDs/GIDs as they should appear outside of the container,
# and the length of the range of UIDs/GIDs. Additional mapped sets can be
@@ -113,7 +150,7 @@ mountopt = "nodev"
# future. When "force_mask" is set the original permission mask is stored in
# the "user.containers.override_stat" xattr and the "mount_program" option must
# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the
-# extended attribute permissions to processes within containers rather then the
+# extended attribute permissions to processes within containers rather than the
# "force_mask" permissions.
#
# force_mask = ""
diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd
new file mode 100644
index 00000000000..34d80152c02
--- /dev/null
+++ b/vendor/github.com/containers/storage/storage.conf-freebsd
@@ -0,0 +1,205 @@
+# This file is is the configuration file for all tools
+# that use the containers/storage library. The storage.conf file
+# overrides all other storage.conf files. Container engines using the
+# container/storage library do not inherit fields from other storage.conf
+# files.
+#
+# Note: The storage.conf file overrides other storage.conf files based on this precedence:
+# /usr/local/share/containers/storage.conf
+# /usr/local/etc/containers/storage.conf
+# $HOME/.config/containers/storage.conf
+# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set)
+# See man 5 containers-storage.conf for more information
+# The "container storage" table contains all of the server options.
+[storage]
+
+# Default Storage Driver, Must be set for proper operation.
+driver = "zfs"
+
+# Temporary storage location
+runroot = "/var/run/containers/storage"
+
+# Primary Read/Write location of container storage
+graphroot = "/var/db/containers/storage"
+
+
+# Storage path for rootless users
+#
+# rootless_storage_path = "$HOME/.local/share/containers/storage"
+
+[storage.options]
+# Storage options to be passed to underlying storage drivers
+
+# AdditionalImageStores is used to pass paths to additional Read/Only image stores
+# Must be comma separated list.
+additionalimagestores = [
+]
+
+# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
+# a container, to the UIDs/GIDs as they should appear outside of the container,
+# and the length of the range of UIDs/GIDs. Additional mapped sets can be
+# listed and will be heeded by libraries, but there are limits to the number of
+# mappings which the kernel will allow when you later attempt to run a
+# container.
+#
+# remap-uids = 0:1668442479:65536
+# remap-gids = 0:1668442479:65536
+
+# Remap-User/Group is a user name which can be used to look up one or more UID/GID
+# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting
+# with an in-container ID of 0 and then a host-level ID taken from the lowest
+# range that matches the specified name, and using the length of that range.
+# Additional ranges are then assigned, using the ranges which specify the
+# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID,
+# until all of the entries have been used for maps.
+#
+# remap-user = "containers"
+# remap-group = "containers"
+
+# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
+# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned
+# to containers configured to create automatically a user namespace. Containers
+# configured to automatically create a user namespace can still overlap with containers
+# having an explicit mapping set.
+# This setting is ignored when running as rootless.
+# root-auto-userns-user = "storage"
+#
+# Auto-userns-min-size is the minimum size for a user namespace created automatically.
+# auto-userns-min-size=1024
+#
+# Auto-userns-max-size is the minimum size for a user namespace created automatically.
+# auto-userns-max-size=65536
+
+[storage.options.overlay]
+# ignore_chown_errors can be set to allow a non privileged user running with
+# a single UID within a user namespace to run containers. The user can pull
+# and use any image even those with multiple uids. Note multiple UIDs will be
+# squashed down to the default uid in the container. These images will have no
+# separation between the users in the container. Only supported for the overlay
+# and vfs drivers.
+#ignore_chown_errors = "false"
+
+# Inodes is used to set a maximum inodes of the container image.
+# inodes = ""
+
+# Path to an helper program to use for mounting the file system instead of mounting it
+# directly.
+#mount_program = "/usr/bin/fuse-overlayfs"
+
+# mountopt specifies comma separated list of extra mount options
+mountopt = "nodev"
+
+# Set to skip a PRIVATE bind mount on the storage home directory.
+# skip_mount_home = "false"
+
+# Size is used to set a maximum size of the container image.
+# size = ""
+
+# ForceMask specifies the permissions mask that is used for new files and
+# directories.
+#
+# The values "shared" and "private" are accepted.
+# Octal permission masks are also accepted.
+#
+# "": No value specified.
+# All files/directories, get set with the permissions identified within the
+# image.
+# "private": it is equivalent to 0700.
+# All files/directories get set with 0700 permissions. The owner has rwx
+# access to the files. No other users on the system can access the files.
+# This setting could be used with networked based homedirs.
+# "shared": it is equivalent to 0755.
+# The owner has rwx access to the files and everyone else can read, access
+# and execute them. This setting is useful for sharing containers storage
+# with other users. For instance have a storage owned by root but shared
+# to rootless users as an additional store.
+# NOTE: All files within the image are made readable and executable by any
+# user on the system. Even /etc/shadow within your image is now readable by
+# any user.
+#
+# OCTAL: Users can experiment with other OCTAL Permissions.
+#
+# Note: The force_mask Flag is an experimental feature, it could change in the
+# future. When "force_mask" is set the original permission mask is stored in
+# the "user.containers.override_stat" xattr and the "mount_program" option must
+# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the
+# extended attribute permissions to processes within containers rather then the
+# "force_mask" permissions.
+#
+# force_mask = ""
+
+[storage.options.thinpool]
+# Storage Options for thinpool
+
+# autoextend_percent determines the amount by which pool needs to be
+# grown. This is specified in terms of % of pool size. So a value of 20 means
+# that when threshold is hit, pool will be grown by 20% of existing
+# pool size.
+# autoextend_percent = "20"
+
+# autoextend_threshold determines the pool extension threshold in terms
+# of percentage of pool size. For example, if threshold is 60, that means when
+# pool is 60% full, threshold has been hit.
+# autoextend_threshold = "80"
+
+# basesize specifies the size to use when creating the base device, which
+# limits the size of images and containers.
+# basesize = "10G"
+
+# blocksize specifies a custom blocksize to use for the thin pool.
+# blocksize="64k"
+
+# directlvm_device specifies a custom block storage device to use for the
+# thin pool. Required if you setup devicemapper.
+# directlvm_device = ""
+
+# directlvm_device_force wipes device even if device already has a filesystem.
+# directlvm_device_force = "True"
+
+# fs specifies the filesystem type to use for the base device.
+# fs="xfs"
+
+# log_level sets the log level of devicemapper.
+# 0: LogLevelSuppress 0 (Default)
+# 2: LogLevelFatal
+# 3: LogLevelErr
+# 4: LogLevelWarn
+# 5: LogLevelNotice
+# 6: LogLevelInfo
+# 7: LogLevelDebug
+# log_level = "7"
+
+# min_free_space specifies the min free space percent in a thin pool require for
+# new device creation to succeed. Valid values are from 0% - 99%.
+# Value 0% disables
+# min_free_space = "10%"
+
+# mkfsarg specifies extra mkfs arguments to be used when creating the base
+# device.
+# mkfsarg = ""
+
+# metadata_size is used to set the `pvcreate --metadatasize` options when
+# creating thin devices. Default is 128k
+# metadata_size = ""
+
+# Size is used to set a maximum size of the container image.
+# size = ""
+
+# use_deferred_removal marks devicemapper block device for deferred removal.
+# If the thinpool is in use when the driver attempts to remove it, the driver
+# tells the kernel to remove it as soon as possible. Note this does not free
+# up the disk space, use deferred deletion to fully remove the thinpool.
+# use_deferred_removal = "True"
+
+# use_deferred_deletion marks thinpool device for deferred deletion.
+# If the device is busy when the driver attempts to delete it, the driver
+# will attempt to delete device every 30 seconds until successful.
+# If the program using the driver exits, the driver will continue attempting
+# to cleanup the next time the driver is used. Deferred deletion permanently
+# deletes the device and all data stored in device will be lost.
+# use_deferred_deletion = "True"
+
+# xfs_nospace_max_retries specifies the maximum number of retries XFS should
+# attempt to complete IO when ENOSPC (no space) error is returned by
+# underlying storage device.
+# xfs_nospace_max_retries = "0"
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 8d6f2c4d779..fb1faaa13af 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -2,9 +2,9 @@ package storage
import (
"encoding/base64"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"reflect"
@@ -21,13 +21,27 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/parsers"
- "github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/stringutils"
+ "github.com/containers/storage/pkg/system"
"github.com/containers/storage/types"
"github.com/hashicorp/go-multierror"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux/label"
- "github.com/pkg/errors"
+)
+
+type updateNameOperation int
+
+const (
+ setNames updateNameOperation = iota
+ addNames
+ removeNames
+)
+
+const (
+ volatileFlag = "Volatile"
+ mountLabelFlag = "MountLabel"
+ processLabelFlag = "ProcessLabel"
+ mountOptsFlag = "MountOpts"
)
var (
@@ -164,6 +178,7 @@ type Store interface {
GraphRoot() string
GraphDriverName() string
GraphOptions() []string
+ PullOptions() map[string]string
UIDMap() []idtools.IDMap
GIDMap() []idtools.IDMap
@@ -367,8 +382,17 @@ type Store interface {
// SetNames changes the list of names for a layer, image, or container.
// Duplicate names are removed from the list automatically.
+ // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
SetNames(id string, names []string) error
+ // AddNames adds the list of names for a layer, image, or container.
+ // Duplicate names are removed from the list automatically.
+ AddNames(id string, names []string) error
+
+ // RemoveNames removes the list of names for a layer, image, or container.
+ // Duplicate names are removed from the list automatically.
+ RemoveNames(id string, names []string) error
+
// ListImageBigData retrieves a list of the (possibly large) chunks of
// named data associated with an image.
ListImageBigData(id string) ([]string, error)
@@ -392,7 +416,7 @@ type Store interface {
SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
// ListLayerBigData retrieves a list of the (possibly large) chunks of
- // named data associated with an layer.
+ // named data associated with a layer.
ListLayerBigData(id string) ([]string, error)
// LayerBigData retrieves a (possibly large) chunk of named data
@@ -574,10 +598,11 @@ type ContainerOptions struct {
// container's layer will inherit settings from the image's top layer
// or, if it is not being created based on an image, the Store object.
types.IDMappingOptions
- LabelOpts []string
- Flags map[string]interface{}
- MountOpts []string
- Volatile bool
+ LabelOpts []string
+ Flags map[string]interface{}
+ MountOpts []string
+ Volatile bool
+ StorageOpt map[string]string
}
type store struct {
@@ -588,6 +613,7 @@ type store struct {
graphRoot string
graphDriverName string
graphOptions []string
+ pullOptions map[string]string
uidMap []idtools.IDMap
gidMap []idtools.IDMap
autoUsernsUser string
@@ -612,19 +638,24 @@ type store struct {
// If StoreOptions `options` haven't been fully populated, then DefaultStoreOptions are used.
//
// These defaults observe environment variables:
-// * `STORAGE_DRIVER` for the name of the storage driver to attempt to use
-// * `STORAGE_OPTS` for the string of options to pass to the driver
+// - `STORAGE_DRIVER` for the name of the storage driver to attempt to use
+// - `STORAGE_OPTS` for the string of options to pass to the driver
//
// Note that we do some of this work in a child process. The calling process's
// main() function needs to import our pkg/reexec package and should begin with
// something like this in order to allow us to properly start that child
// process:
-// if reexec.Init() {
-// return
-// }
+//
+// if reexec.Init() {
+// return
+// }
func GetStore(options types.StoreOptions) (Store, error) {
+ defaultOpts, err := types.Options()
+ if err != nil {
+ return nil, err
+ }
if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 {
- options = types.Options()
+ options = defaultOpts
}
if options.GraphRoot != "" {
@@ -645,17 +676,21 @@ func GetStore(options types.StoreOptions) (Store, error) {
storesLock.Lock()
defer storesLock.Unlock()
+ // return if BOTH run and graph root are matched, otherwise our run-root can be overridden if the graph is found first
for _, s := range stores {
- if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
+ if (s.graphRoot == options.GraphRoot) && (s.runRoot == options.RunRoot) && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
return s, nil
}
}
- if options.GraphRoot == "" {
- return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified")
- }
- if options.RunRoot == "" {
- return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified")
+ // if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither.
+ switch {
+ case options.RunRoot == "" && options.GraphRoot == "":
+ return nil, fmt.Errorf("no storage runroot or graphroot specified: %w", ErrIncompleteOptions)
+ case options.GraphRoot == "":
+ options.GraphRoot = defaultOpts.GraphRoot
+ case options.RunRoot == "":
+ options.RunRoot = defaultOpts.RunRoot
}
if err := os.MkdirAll(options.RunRoot, 0700); err != nil {
@@ -703,6 +738,7 @@ func GetStore(options types.StoreOptions) (Store, error) {
additionalGIDs: nil,
usernsLock: usernsLock,
disableVolatile: options.DisableVolatile,
+ pullOptions: options.PullOptions,
}
if err := s.load(); err != nil {
return nil, err
@@ -753,6 +789,14 @@ func (s *store) GraphOptions() []string {
return s.graphOptions
}
+func (s *store) PullOptions() map[string]string {
+ cp := make(map[string]string, len(s.pullOptions))
+ for k, v := range s.pullOptions {
+ cp[k] = v
+ }
+ return cp
+}
+
func (s *store) UIDMap() []idtools.IDMap {
return copyIDMap(s.uidMap)
}
@@ -982,9 +1026,6 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
if err := rcstore.ReloadIfChanged(); err != nil {
return nil, -1, err
}
- if id == "" {
- id = stringid.GenerateRandomID()
- }
if options == nil {
options = &LayerOptions{}
}
@@ -1063,10 +1104,6 @@ func (s *store) CreateLayer(id, parent string, names []string, mountLabel string
}
func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) {
- if id == "" {
- id = stringid.GenerateRandomID()
- }
-
if layer != "" {
lstore, err := s.LayerStore()
if err != nil {
@@ -1131,10 +1168,6 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea
if options.HostGIDMapping && len(layer.GIDMap) != 0 {
return false
}
- // If we don't care about the mapping, it's fine.
- if len(options.UIDMap) == 0 && len(options.GIDMap) == 0 {
- return true
- }
// Compare the maps.
return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap)
}
@@ -1176,6 +1209,11 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea
if layer == nil {
layer = cLayer
parentLayer = cParentLayer
+ if store != rlstore {
+ // The layer is in another store, so we cannot
+ // create a mapped version of it to the image.
+ createMappedLayer = false
+ }
}
}
}
@@ -1217,13 +1255,13 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea
layerOptions.TemplateLayer = layer.ID
mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
if err != nil {
- return nil, errors.Wrapf(err, "error creating an ID-mapped copy of layer %q", layer.ID)
+ return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err)
}
if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
- err = errors.WithMessage(err, fmt.Sprintf("error deleting layer %q: %v", mappedLayer.ID, err2))
+ err = fmt.Errorf("deleting layer %q: %v: %w", mappedLayer.ID, err2, err)
}
- return nil, errors.Wrapf(err, "error registering ID-mapped layer with image %q", image.ID)
+ return nil, fmt.Errorf("registering ID-mapped layer with image %q: %w", image.ID, err)
}
layer = mappedLayer
}
@@ -1244,9 +1282,6 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
if err != nil {
return nil, err
}
- if id == "" {
- id = stringid.GenerateRandomID()
- }
var imageTopLayer *Layer
imageID := ""
@@ -1302,14 +1337,14 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
}
}
if cimage == nil {
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", image, ErrImageUnknown)
}
imageID = cimage.ID
}
if options.AutoUserNs {
var err error
- options.UIDMap, options.GIDMap, err = s.getAutoUserNS(id, &options.AutoUserNsOpts, cimage)
+ options.UIDMap, options.GIDMap, err = s.getAutoUserNS(&options.AutoUserNsOpts, cimage)
if err != nil {
return nil, err
}
@@ -1371,11 +1406,10 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
if options.Flags == nil {
options.Flags = make(map[string]interface{})
}
- plabel, _ := options.Flags["ProcessLabel"].(string)
- mlabel, _ := options.Flags["MountLabel"].(string)
- if (plabel == "" && mlabel != "") ||
- (plabel != "" && mlabel == "") {
- return nil, errors.Errorf("ProcessLabel and Mountlabel must either not be specified or both specified")
+ plabel, _ := options.Flags[processLabelFlag].(string)
+ mlabel, _ := options.Flags[mountLabelFlag].(string)
+ if (plabel == "" && mlabel != "") || (plabel != "" && mlabel == "") {
+ return nil, errors.New("ProcessLabel and Mountlabel must either not be specified or both specified")
}
if plabel == "" {
@@ -1383,11 +1417,12 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
if err != nil {
return nil, err
}
- options.Flags["ProcessLabel"] = processLabel
- options.Flags["MountLabel"] = mountLabel
+ mlabel = mountLabel
+ options.Flags[processLabelFlag] = processLabel
+ options.Flags[mountLabelFlag] = mountLabel
}
- clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), nil, layerOptions, true)
+ clayer, err := rlstore.Create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true)
if err != nil {
return nil, err
}
@@ -1533,7 +1568,7 @@ func (s *store) ListImageBigData(id string) ([]string, error) {
return bigDataNames, err
}
}
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (s *store) ImageBigDataSize(id, key string) (int64, error) {
@@ -1611,9 +1646,9 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) {
}
}
if foundImage {
- return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q", key, id)
+ return nil, fmt.Errorf("locating item named %q for image with ID %q (consider removing the image to resolve the issue): %w", key, id, os.ErrNotExist)
}
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
// ListLayerBigData retrieves a list of the (possibly large) chunks of
@@ -1644,9 +1679,9 @@ func (s *store) ListLayerBigData(id string) ([]string, error) {
}
}
if foundLayer {
- return nil, errors.Wrapf(os.ErrNotExist, "error locating big data for layer with ID %q", id)
+ return nil, fmt.Errorf("locating big data for layer with ID %q: %w", id, os.ErrNotExist)
}
- return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id)
+ return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown)
}
// LayerBigData retrieves a (possibly large) chunk of named data
@@ -1677,9 +1712,9 @@ func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) {
}
}
if foundLayer {
- return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for layer with ID %q", key, id)
+ return nil, fmt.Errorf("locating item named %q for layer with ID %q: %w", key, id, os.ErrNotExist)
}
- return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id)
+ return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown)
}
// SetLayerBigData stores a (possibly large) chunk of named data
@@ -1718,11 +1753,11 @@ func (s *store) ImageSize(id string) (int64, error) {
lstore, err := s.LayerStore()
if err != nil {
- return -1, errors.Wrapf(err, "error loading primary layer store data")
+ return -1, fmt.Errorf("loading primary layer store data: %w", err)
}
lstores, err := s.ROLayerStores()
if err != nil {
- return -1, errors.Wrapf(err, "error loading additional layer stores")
+ return -1, fmt.Errorf("loading additional layer stores: %w", err)
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
@@ -1736,11 +1771,11 @@ func (s *store) ImageSize(id string) (int64, error) {
var imageStore ROBigDataStore
istore, err := s.ImageStore()
if err != nil {
- return -1, errors.Wrapf(err, "error loading primary image store data")
+ return -1, fmt.Errorf("loading primary image store data: %w", err)
}
istores, err := s.ROImageStores()
if err != nil {
- return -1, errors.Wrapf(err, "error loading additional image stores")
+ return -1, fmt.Errorf("loading additional image stores: %w", err)
}
// Look for the image's record.
@@ -1757,7 +1792,7 @@ func (s *store) ImageSize(id string) (int64, error) {
}
}
if image == nil {
- return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
// Start with a list of the image's top layers, if it has any.
@@ -1787,7 +1822,7 @@ func (s *store) ImageSize(id string) (int64, error) {
}
}
if layer == nil {
- return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", layerID)
+ return -1, fmt.Errorf("locating layer with ID %q: %w", layerID, ErrLayerUnknown)
}
// The UncompressedSize is only valid if there's a digest to go with it.
n := layer.UncompressedSize
@@ -1795,7 +1830,7 @@ func (s *store) ImageSize(id string) (int64, error) {
// Compute the size.
n, err = layerStore.DiffSize("", layer.ID)
if err != nil {
- return -1, errors.Wrapf(err, "size/digest of layer with ID %q could not be calculated", layerID)
+ return -1, fmt.Errorf("size/digest of layer with ID %q could not be calculated: %w", layerID, err)
}
}
// Count this layer.
@@ -1810,12 +1845,12 @@ func (s *store) ImageSize(id string) (int64, error) {
// Count big data items.
names, err := imageStore.BigDataNames(id)
if err != nil {
- return -1, errors.Wrapf(err, "error reading list of big data items for image %q", id)
+ return -1, fmt.Errorf("reading list of big data items for image %q: %w", id, err)
}
for _, name := range names {
n, err := imageStore.BigDataSize(id, name)
if err != nil {
- return -1, errors.Wrapf(err, "error reading size of big data item %q for image %q", name, id)
+ return -1, fmt.Errorf("reading size of big data item %q for image %q: %w", name, id, err)
}
size += n
}
@@ -1875,24 +1910,24 @@ func (s *store) ContainerSize(id string) (int64, error) {
if layer, err = store.Get(container.LayerID); err == nil {
size, err = store.DiffSize("", layer.ID)
if err != nil {
- return -1, errors.Wrapf(err, "error determining size of layer with ID %q", layer.ID)
+ return -1, fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err)
}
break
}
}
if layer == nil {
- return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", container.LayerID)
+ return -1, fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown)
}
// Count big data items.
names, err := rcstore.BigDataNames(id)
if err != nil {
- return -1, errors.Wrapf(err, "error reading list of big data items for container %q", container.ID)
+ return -1, fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err)
}
for _, name := range names {
n, err := rcstore.BigDataSize(id, name)
if err != nil {
- return -1, errors.Wrapf(err, "error reading size of big data item %q for container %q", name, id)
+ return -1, fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err)
}
size += n
}
@@ -2048,7 +2083,20 @@ func dedupeNames(names []string) []string {
return deduped
}
+// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
func (s *store) SetNames(id string, names []string) error {
+ return s.updateNames(id, names, setNames)
+}
+
+func (s *store) AddNames(id string, names []string) error {
+ return s.updateNames(id, names, addNames)
+}
+
+func (s *store) RemoveNames(id string, names []string) error {
+ return s.updateNames(id, names, removeNames)
+}
+
+func (s *store) updateNames(id string, names []string, op updateNameOperation) error {
deduped := dedupeNames(names)
rlstore, err := s.LayerStore()
@@ -2061,7 +2109,16 @@ func (s *store) SetNames(id string, names []string) error {
return err
}
if rlstore.Exists(id) {
- return rlstore.SetNames(id, deduped)
+ switch op {
+ case setNames:
+ return rlstore.SetNames(id, deduped)
+ case removeNames:
+ return rlstore.RemoveNames(id, deduped)
+ case addNames:
+ return rlstore.AddNames(id, deduped)
+ default:
+ return errInvalidUpdateNameOperation
+ }
}
ristore, err := s.ImageStore()
@@ -2074,7 +2131,16 @@ func (s *store) SetNames(id string, names []string) error {
return err
}
if ristore.Exists(id) {
- return ristore.SetNames(id, deduped)
+ switch op {
+ case setNames:
+ return ristore.SetNames(id, deduped)
+ case removeNames:
+ return ristore.RemoveNames(id, deduped)
+ case addNames:
+ return ristore.AddNames(id, deduped)
+ default:
+ return errInvalidUpdateNameOperation
+ }
}
// Check is id refers to a RO Store
@@ -2112,7 +2178,16 @@ func (s *store) SetNames(id string, names []string) error {
return err
}
if rcstore.Exists(id) {
- return rcstore.SetNames(id, deduped)
+ switch op {
+ case setNames:
+ return rcstore.SetNames(id, deduped)
+ case removeNames:
+ return rcstore.RemoveNames(id, deduped)
+ case addNames:
+ return rcstore.AddNames(id, deduped)
+ default:
+ return errInvalidUpdateNameOperation
+ }
}
return ErrLayerUnknown
}
@@ -2270,7 +2345,7 @@ func (s *store) DeleteLayer(id string) error {
}
for _, layer := range layers {
if layer.Parent == id {
- return errors.Wrapf(ErrLayerHasChildren, "used by layer %v", layer.ID)
+ return fmt.Errorf("used by layer %v: %w", layer.ID, ErrLayerHasChildren)
}
}
images, err := ristore.Images()
@@ -2280,12 +2355,12 @@ func (s *store) DeleteLayer(id string) error {
for _, image := range images {
if image.TopLayer == id {
- return errors.Wrapf(ErrLayerUsedByImage, "layer %v used by image %v", id, image.ID)
+ return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage)
}
if stringutils.InSlice(image.MappedTopLayers, id) {
// No write access to the image store, fail before the layer is deleted
if _, ok := ristore.(*imageStore); !ok {
- return errors.Wrapf(ErrLayerUsedByImage, "layer %v used by image %v", id, image.ID)
+ return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage)
}
}
}
@@ -2295,11 +2370,11 @@ func (s *store) DeleteLayer(id string) error {
}
for _, container := range containers {
if container.LayerID == id {
- return errors.Wrapf(ErrLayerUsedByContainer, "layer %v used by container %v", id, container.ID)
+ return fmt.Errorf("layer %v used by container %v: %w", id, container.ID, ErrLayerUsedByContainer)
}
}
if err := rlstore.Delete(id); err != nil {
- return errors.Wrapf(err, "delete layer %v", id)
+ return fmt.Errorf("delete layer %v: %w", id, err)
}
// The check here is used to avoid iterating the images if we don't need to.
@@ -2308,7 +2383,7 @@ func (s *store) DeleteLayer(id string) error {
for _, image := range images {
if stringutils.InSlice(image.MappedTopLayers, id) {
if err = istore.removeMappedTopLayer(image.ID, id); err != nil {
- return errors.Wrapf(err, "remove mapped top layer %v from image %v", id, image.ID)
+ return fmt.Errorf("remove mapped top layer %v from image %v: %w", id, image.ID, err)
}
}
}
@@ -2363,7 +2438,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
aContainerByImage[container.ImageID] = container.ID
}
if container, ok := aContainerByImage[id]; ok {
- return nil, errors.Wrapf(ErrImageUsedByContainer, "Image used by %v", container)
+ return nil, fmt.Errorf("image used by %v: %w", container, ErrImageUsedByContainer)
}
images, err := ristore.Images()
if err != nil {
@@ -2373,22 +2448,16 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
if err != nil {
return nil, err
}
- childrenByParent := make(map[string]*[]string)
+ childrenByParent := make(map[string][]string)
for _, layer := range layers {
- parent := layer.Parent
- if list, ok := childrenByParent[parent]; ok {
- newList := append(*list, layer.ID)
- childrenByParent[parent] = &newList
- } else {
- childrenByParent[parent] = &([]string{layer.ID})
- }
+ childrenByParent[layer.Parent] = append(childrenByParent[layer.Parent], layer.ID)
}
- otherImagesByTopLayer := make(map[string]string)
+ otherImagesTopLayers := make(map[string]struct{})
for _, img := range images {
if img.ID != id {
- otherImagesByTopLayer[img.TopLayer] = img.ID
+ otherImagesTopLayers[img.TopLayer] = struct{}{}
for _, layerID := range img.MappedTopLayers {
- otherImagesByTopLayer[layerID] = img.ID
+ otherImagesTopLayers[layerID] = struct{}{}
}
}
}
@@ -2398,43 +2467,44 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
}
}
layer := image.TopLayer
- lastRemoved := ""
+ layersToRemoveMap := make(map[string]struct{})
+ layersToRemove = append(layersToRemove, image.MappedTopLayers...)
+ for _, mappedTopLayer := range image.MappedTopLayers {
+ layersToRemoveMap[mappedTopLayer] = struct{}{}
+ }
for layer != "" {
if rcstore.Exists(layer) {
break
}
- if _, ok := otherImagesByTopLayer[layer]; ok {
+ if _, used := otherImagesTopLayers[layer]; used {
break
}
parent := ""
if l, err := rlstore.Get(layer); err == nil {
parent = l.Parent
}
- hasOtherRefs := func() bool {
+ hasChildrenNotBeingRemoved := func() bool {
layersToCheck := []string{layer}
if layer == image.TopLayer {
layersToCheck = append(layersToCheck, image.MappedTopLayers...)
}
for _, layer := range layersToCheck {
- if childList, ok := childrenByParent[layer]; ok && childList != nil {
- children := *childList
- for _, child := range children {
- if child != lastRemoved {
- return true
+ if childList := childrenByParent[layer]; len(childList) > 0 {
+ for _, child := range childList {
+ if _, childIsSlatedForRemoval := layersToRemoveMap[child]; childIsSlatedForRemoval {
+ continue
}
+ return true
}
}
}
return false
}
- if hasOtherRefs() {
+ if hasChildrenNotBeingRemoved() {
break
}
- lastRemoved = layer
- if layer == image.TopLayer {
- layersToRemove = append(layersToRemove, image.MappedTopLayers...)
- }
- layersToRemove = append(layersToRemove, lastRemoved)
+ layersToRemove = append(layersToRemove, layer)
+ layersToRemoveMap[layer] = struct{}{}
layer = parent
}
} else {
@@ -2502,15 +2572,29 @@ func (s *store) DeleteContainer(id string) error {
gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
wg.Add(1)
go func() {
- errChan <- os.RemoveAll(gcpath)
- wg.Done()
+ defer wg.Done()
+ // attempt a simple rm -rf first
+ err := os.RemoveAll(gcpath)
+ if err == nil {
+ errChan <- nil
+ return
+ }
+ // and if it fails get to the more complicated cleanup
+ errChan <- system.EnsureRemoveAll(gcpath)
}()
rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
wg.Add(1)
go func() {
- errChan <- os.RemoveAll(rcpath)
- wg.Done()
+ defer wg.Done()
+ // attempt a simple rm -rf first
+ err := os.RemoveAll(rcpath)
+ if err == nil {
+ errChan <- nil
+ return
+ }
+ // and if it fails get to the more complicated cleanup
+ errChan <- system.EnsureRemoveAll(rcpath)
}()
go func() {
@@ -2519,17 +2603,12 @@ func (s *store) DeleteContainer(id string) error {
}()
var errors []error
- for {
- select {
- case err, ok := <-errChan:
- if !ok {
- return multierror.Append(nil, errors...).ErrorOrNil()
- }
- if err != nil {
- errors = append(errors, err)
- }
+ for err := range errChan {
+ if err != nil {
+ errors = append(errors, err)
}
}
+ return multierror.Append(nil, errors...).ErrorOrNil()
}
}
return ErrNotAContainer
@@ -2718,8 +2797,10 @@ func (s *store) Mount(id, mountLabel string) (string, error) {
options.GidMaps = container.GIDMap
options.Options = container.MountOpts()
if !s.disableVolatile {
- if v, found := container.Flags["Volatile"]; found {
- options.Volatile = v.(bool)
+ if v, found := container.Flags[volatileFlag]; found {
+ if b, ok := v.(bool); ok {
+ options.Volatile = b
+ }
}
}
}
@@ -2825,10 +2906,33 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
if err != nil {
return nil, err
}
+
+ // NaiveDiff could cause mounts to happen without a lock, so be safe
+ // and treat the .Diff operation as a Mount.
+ s.graphLock.Lock()
+ defer s.graphLock.Unlock()
+
+ modified, err := s.graphLock.Modified()
+ if err != nil {
+ return nil, err
+ }
+
+ // We need to make sure the home mount is present when the Mount is done.
+ if modified {
+ s.graphDriver = nil
+ s.layerStore = nil
+ s.graphDriver, err = s.getGraphDriver()
+ if err != nil {
+ return nil, err
+ }
+ s.lastLoaded = time.Now()
+ }
+
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
if err := store.ReloadIfChanged(); err != nil {
+ store.Unlock()
return nil, err
}
if store.Exists(to) {
@@ -2954,7 +3058,7 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye
}
storeLayers, err := m(store, d)
if err != nil {
- if errors.Cause(err) != ErrLayerUnknown {
+ if !errors.Is(err, ErrLayerUnknown) {
return nil, err
}
continue
@@ -2969,14 +3073,14 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye
func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
if err := d.Validate(); err != nil {
- return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d)
+ return nil, fmt.Errorf("looking for compressed layers matching digest %q: %w", d, err)
}
return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d)
}
func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
if err := d.Validate(); err != nil {
- return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d)
+ return nil, fmt.Errorf("looking for layers matching digest %q: %w", d, err)
}
return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d)
}
@@ -3256,7 +3360,7 @@ func (s *store) Image(id string) (*Image, error) {
return image, nil
}
}
- return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
@@ -3314,7 +3418,7 @@ func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) {
return nil, err
}
imageList, err := store.ByDigest(d)
- if err != nil && errors.Cause(err) != ErrImageUnknown {
+ if err != nil && !errors.Is(err, ErrImageUnknown) {
return nil, err
}
images = append(images, imageList...)
@@ -3446,7 +3550,7 @@ func (s *store) FromContainerDirectory(id, file string) ([]byte, error) {
if err != nil {
return nil, err
}
- return ioutil.ReadFile(filepath.Join(dir, file))
+ return os.ReadFile(filepath.Join(dir, file))
}
func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error {
@@ -3466,7 +3570,7 @@ func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) {
if err != nil {
return nil, err
}
- return ioutil.ReadFile(filepath.Join(dir, file))
+ return os.ReadFile(filepath.Join(dir, file))
}
func (s *store) Shutdown(force bool) ([]string, error) {
@@ -3510,7 +3614,7 @@ func (s *store) Shutdown(force bool) ([]string, error) {
}
}
if len(mounted) > 0 && err == nil {
- err = errors.Wrap(ErrLayerUsedByContainer, "A layer is mounted")
+ err = fmt.Errorf("a layer is mounted: %w", ErrLayerUsedByContainer)
}
if err == nil {
err = s.graphDriver.Cleanup()
@@ -3624,7 +3728,10 @@ func ReloadConfigurationFile(configFile string, storeOptions *types.StoreOptions
// GetDefaultMountOptions returns the default mountoptions defined in container/storage
func GetDefaultMountOptions() ([]string, error) {
- defaultStoreOptions := types.Options()
+ defaultStoreOptions, err := types.Options()
+ if err != nil {
+ return nil, err
+ }
return GetMountOptions(defaultStoreOptions.GraphDriverName, defaultStoreOptions.GraphDriverOptions)
}
diff --git a/vendor/github.com/containers/storage/types/errors.go b/vendor/github.com/containers/storage/types/errors.go
index d920d12eb50..ad12ffdbf2d 100644
--- a/vendor/github.com/containers/storage/types/errors.go
+++ b/vendor/github.com/containers/storage/types/errors.go
@@ -55,4 +55,6 @@ var (
ErrStoreIsReadOnly = errors.New("called a write method on a read-only store")
// ErrNotSupported is returned when the requested functionality is not supported.
ErrNotSupported = errors.New("not supported")
+ // ErrInvalidMappings is returned when the specified mappings are invalid.
+ ErrInvalidMappings = errors.New("invalid mappings specified")
)
diff --git a/vendor/github.com/containers/storage/types/idmappings.go b/vendor/github.com/containers/storage/types/idmappings.go
index 82824ae2b22..aabdf7a81f3 100644
--- a/vendor/github.com/containers/storage/types/idmappings.go
+++ b/vendor/github.com/containers/storage/types/idmappings.go
@@ -5,7 +5,6 @@ import (
"os"
"github.com/containers/storage/pkg/idtools"
- "github.com/pkg/errors"
)
// AutoUserNsOptions defines how to automatically create a user namespace.
@@ -77,18 +76,18 @@ func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap stri
if subUIDMap != "" && subGIDMap != "" {
mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap)
if err != nil {
- return nil, errors.Wrapf(err, "failed to create NewIDMappings for uidmap=%s gidmap=%s", subUIDMap, subGIDMap)
+ return nil, fmt.Errorf("failed to create NewIDMappings for uidmap=%s gidmap=%s: %w", subUIDMap, subGIDMap, err)
}
options.UIDMap = mappings.UIDs()
options.GIDMap = mappings.GIDs()
}
parsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, "UID")
if err != nil {
- return nil, errors.Wrapf(err, "failed to create ParseUIDMap UID=%s", UIDMapSlice)
+ return nil, fmt.Errorf("failed to create ParseUIDMap UID=%s: %w", UIDMapSlice, err)
}
parsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, "GID")
if err != nil {
- return nil, errors.Wrapf(err, "failed to create ParseGIDMap GID=%s", UIDMapSlice)
+ return nil, fmt.Errorf("failed to create ParseGIDMap GID=%s: %w", UIDMapSlice, err)
}
options.UIDMap = append(options.UIDMap, parsedUIDMap...)
options.GIDMap = append(options.GIDMap, parsedGIDMap...)
diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go
index f9bf7e6b658..4c873b45f04 100644
--- a/vendor/github.com/containers/storage/types/options.go
+++ b/vendor/github.com/containers/storage/types/options.go
@@ -1,46 +1,101 @@
package types
import (
+ "errors"
"fmt"
"os"
- "os/exec"
"path/filepath"
"strings"
"sync"
"time"
"github.com/BurntSushi/toml"
- "github.com/containers/storage/drivers/overlay"
cfg "github.com/containers/storage/pkg/config"
"github.com/containers/storage/pkg/idtools"
"github.com/sirupsen/logrus"
)
// TOML-friendly explicit tables used for conversions.
-type tomlConfig struct {
+type TomlConfig struct {
Storage struct {
- Driver string `toml:"driver"`
- RunRoot string `toml:"runroot"`
- GraphRoot string `toml:"graphroot"`
- RootlessStoragePath string `toml:"rootless_storage_path"`
- Options cfg.OptionsConfig `toml:"options"`
+ Driver string `toml:"driver,omitempty"`
+ RunRoot string `toml:"runroot,omitempty"`
+ GraphRoot string `toml:"graphroot,omitempty"`
+ RootlessStoragePath string `toml:"rootless_storage_path,omitempty"`
+ Options cfg.OptionsConfig `toml:"options,omitempty"`
} `toml:"storage"`
}
-// defaultConfigFile path to the system wide storage.conf file
+const (
+ overlayDriver = "overlay"
+ overlay2 = "overlay2"
+ storageConfEnv = "CONTAINERS_STORAGE_CONF"
+)
+
var (
- defaultConfigFile = "/etc/containers/storage.conf"
- defaultConfigFileSet = false
- // DefaultStoreOptions is a reasonable default set of options.
- defaultStoreOptions StoreOptions
+ defaultStoreOptionsOnce sync.Once
+ loadDefaultStoreOptionsErr error
)
-func init() {
- defaultStoreOptions.RunRoot = "/run/containers/storage"
- defaultStoreOptions.GraphRoot = "/var/lib/containers/storage"
+func loadDefaultStoreOptions() {
defaultStoreOptions.GraphDriverName = ""
- ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions)
+ setDefaults := func() {
+ // reload could set values to empty for run and graph root if config does not contains anything
+ if defaultStoreOptions.RunRoot == "" {
+ defaultStoreOptions.RunRoot = defaultRunRoot
+ }
+ if defaultStoreOptions.GraphRoot == "" {
+ defaultStoreOptions.GraphRoot = defaultGraphRoot
+ }
+ }
+ setDefaults()
+
+ if path, ok := os.LookupEnv(storageConfEnv); ok {
+ defaultOverrideConfigFile = path
+ if err := ReloadConfigurationFileIfNeeded(path, &defaultStoreOptions); err != nil {
+ loadDefaultStoreOptionsErr = err
+ return
+ }
+ setDefaults()
+ return
+ }
+
+ if path, ok := os.LookupEnv("XDG_CONFIG_HOME"); ok {
+ homeConfigFile := filepath.Join(path, "containers", "storage.conf")
+ if _, err := os.Stat(homeConfigFile); err == nil {
+ // user storage.conf in XDG_CONFIG_HOME if it exists
+ defaultOverrideConfigFile = homeConfigFile
+ } else {
+ if !os.IsNotExist(err) {
+ loadDefaultStoreOptionsErr = err
+ return
+ }
+ }
+ }
+
+ _, err := os.Stat(defaultOverrideConfigFile)
+ if err == nil {
+ // The DefaultConfigFile(rootless) function returns the path
+ // of the used storage.conf file, by returning defaultConfigFile
+ // If override exists containers/storage uses it by default.
+ defaultConfigFile = defaultOverrideConfigFile
+ if err := ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions); err != nil {
+ loadDefaultStoreOptionsErr = err
+ return
+ }
+ setDefaults()
+ return
+ }
+
+ if !os.IsNotExist(err) {
+ logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err)
+ }
+ if err := ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions); err != nil && !errors.Is(err, os.ErrNotExist) {
+ loadDefaultStoreOptionsErr = err
+ return
+ }
+ setDefaults()
}
// defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing.
@@ -51,6 +106,10 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
defaultRootlessGraphRoot string
err error
)
+ defaultStoreOptionsOnce.Do(loadDefaultStoreOptions)
+ if loadDefaultStoreOptionsErr != nil {
+ return StoreOptions{}, loadDefaultStoreOptionsErr
+ }
storageOpts := defaultStoreOptions
if rootless && rootlessUID != 0 {
storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts)
@@ -168,13 +227,13 @@ func isRootlessDriver(driver string) bool {
// getRootlessStorageOpts returns the storage opts for containers running as non root
func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) {
var opts StoreOptions
- const overlayDriver = "overlay"
dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID)
if err != nil {
return opts, err
}
opts.RunRoot = rootlessRuntime
+ opts.PullOptions = systemOpts.PullOptions
if systemOpts.RootlessStoragePath != "" {
opts.GraphRoot, err = expandEnvPath(systemOpts.RootlessStoragePath, rootlessUID)
if err != nil {
@@ -190,25 +249,20 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti
if driver := os.Getenv("STORAGE_DRIVER"); driver != "" {
opts.GraphDriverName = driver
}
- if opts.GraphDriverName == "" || opts.GraphDriverName == overlayDriver {
- supported, err := overlay.SupportsNativeOverlay(opts.GraphRoot, rootlessRuntime)
- if err != nil {
- return opts, err
- }
- if supported {
- opts.GraphDriverName = overlayDriver
- } else {
- if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
- opts.GraphDriverName = overlayDriver
- opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)}
- }
- }
- if opts.GraphDriverName == overlayDriver {
- for _, o := range systemOpts.GraphDriverOptions {
- if strings.Contains(o, "ignore_chown_errors") {
- opts.GraphDriverOptions = append(opts.GraphDriverOptions, o)
- break
- }
+ if opts.GraphDriverName == overlay2 {
+ logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver")
+ opts.GraphDriverName = overlayDriver
+ }
+
+ // If the configuration file was explicitly set, then copy all the options
+ // present.
+ if defaultConfigFileSet {
+ opts.GraphDriverOptions = systemOpts.GraphDriverOptions
+ } else if opts.GraphDriverName == overlayDriver {
+ for _, o := range systemOpts.GraphDriverOptions {
+ if strings.Contains(o, "ignore_chown_errors") {
+ opts.GraphDriverOptions = append(opts.GraphDriverOptions, o)
+ break
}
}
}
@@ -237,56 +291,57 @@ var prevReloadConfig = struct {
}{}
// SetDefaultConfigFilePath sets the default configuration to the specified path
-func SetDefaultConfigFilePath(path string) {
+func SetDefaultConfigFilePath(path string) error {
defaultConfigFile = path
defaultConfigFileSet = true
- ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions)
+ return ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions)
}
-func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) {
+func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) error {
prevReloadConfig.mutex.Lock()
defer prevReloadConfig.mutex.Unlock()
fi, err := os.Stat(configFile)
if err != nil {
- if !os.IsNotExist(err) {
- fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
- }
- return
+ return err
}
mtime := fi.ModTime()
if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile {
*storeOptions = *prevReloadConfig.storeOptions
- return
+ return nil
}
- ReloadConfigurationFile(configFile, storeOptions)
+ if err := ReloadConfigurationFile(configFile, storeOptions); err != nil {
+ return err
+ }
- prevReloadConfig.storeOptions = storeOptions
+ cOptions := *storeOptions
+ prevReloadConfig.storeOptions = &cOptions
prevReloadConfig.mod = mtime
prevReloadConfig.configFile = configFile
+ return nil
}
// ReloadConfigurationFile parses the specified configuration file and overrides
// the configuration in storeOptions.
-func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
- config := new(tomlConfig)
+func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) error {
+ config := new(TomlConfig)
meta, err := toml.DecodeFile(configFile, &config)
if err == nil {
keys := meta.Undecoded()
if len(keys) > 0 {
- logrus.Warningf("Failed to decode the keys %q from %q.", keys, configFile)
+ logrus.Warningf("Failed to decode the keys %q from %q", keys, configFile)
}
} else {
if !os.IsNotExist(err) {
fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
- return
+ return err
}
}
- // Clear storeOptions of previos settings
+ // Clear storeOptions of previous settings
*storeOptions = StoreOptions{}
if config.Storage.Driver != "" {
storeOptions.GraphDriverName = config.Storage.Driver
@@ -295,8 +350,12 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
config.Storage.Driver = os.Getenv("STORAGE_DRIVER")
storeOptions.GraphDriverName = config.Storage.Driver
}
+ if storeOptions.GraphDriverName == overlay2 {
+ logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver")
+ storeOptions.GraphDriverName = overlayDriver
+ }
if storeOptions.GraphDriverName == "" {
- logrus.Errorf("The storage 'driver' option must be set in %s, guarantee proper operation.", configFile)
+ logrus.Errorf("The storage 'driver' option must be set in %s to guarantee proper operation", configFile)
}
if config.Storage.RunRoot != "" {
storeOptions.RunRoot = config.Storage.RunRoot
@@ -341,7 +400,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup)
if err != nil {
fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err)
- return
+ return err
}
storeOptions.UIDMap = mappings.UIDs()
storeOptions.GIDMap = mappings.GIDs()
@@ -349,16 +408,15 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids")
if err != nil {
- fmt.Print(err)
- } else {
- storeOptions.UIDMap = uidmap
+ return err
}
gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids")
if err != nil {
- fmt.Print(err)
- } else {
- storeOptions.GIDMap = gidmap
+ return err
}
+
+ storeOptions.UIDMap = uidmap
+ storeOptions.GIDMap = gidmap
storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser
if config.Storage.Options.AutoUsernsMinSize > 0 {
storeOptions.AutoNsMinSize = config.Storage.Options.AutoUsernsMinSize
@@ -380,8 +438,46 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
if len(storeOptions.GraphDriverOptions) == 1 && storeOptions.GraphDriverOptions[0] == "" {
storeOptions.GraphDriverOptions = nil
}
+ return nil
+}
+
+func Options() (StoreOptions, error) {
+ defaultStoreOptionsOnce.Do(loadDefaultStoreOptions)
+ return defaultStoreOptions, loadDefaultStoreOptionsErr
}
-func Options() StoreOptions {
- return defaultStoreOptions
+// Save overwrites the tomlConfig in storage.conf with the given conf
+func Save(conf TomlConfig, rootless bool) error {
+ configFile, err := DefaultConfigFile(rootless)
+ if err != nil {
+ return err
+ }
+
+ if err = os.Remove(configFile); !os.IsNotExist(err) && err != nil {
+ return err
+ }
+
+ f, err := os.Create(configFile)
+ if err != nil {
+ return err
+ }
+
+ return toml.NewEncoder(f).Encode(conf)
+}
+
+// StorageConfig is used to retrieve the storage.conf toml in order to overwrite it
+func StorageConfig(rootless bool) (*TomlConfig, error) {
+ config := new(TomlConfig)
+
+ configFile, err := DefaultConfigFile(rootless)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = toml.DecodeFile(configFile, &config)
+ if err != nil {
+ return nil, err
+ }
+
+ return config, nil
}
diff --git a/vendor/github.com/containers/storage/types/options_darwin.go b/vendor/github.com/containers/storage/types/options_darwin.go
new file mode 100644
index 00000000000..d5ad50bc0bd
--- /dev/null
+++ b/vendor/github.com/containers/storage/types/options_darwin.go
@@ -0,0 +1,17 @@
+package types
+
+const (
+ // these are default path for run and graph root for rootful users
+ // for rootless path is constructed via getRootlessStorageOpts
+ defaultRunRoot string = "/run/containers/storage"
+ defaultGraphRoot string = "/var/lib/containers/storage"
+)
+
+// defaultConfigFile path to the system wide storage.conf file
+var (
+ defaultConfigFile = "/usr/share/containers/storage.conf"
+ defaultOverrideConfigFile = "/etc/containers/storage.conf"
+ defaultConfigFileSet = false
+ // DefaultStoreOptions is a reasonable default set of options.
+ defaultStoreOptions StoreOptions
+)
diff --git a/vendor/github.com/containers/storage/types/options_freebsd.go b/vendor/github.com/containers/storage/types/options_freebsd.go
new file mode 100644
index 00000000000..d5976b6d581
--- /dev/null
+++ b/vendor/github.com/containers/storage/types/options_freebsd.go
@@ -0,0 +1,17 @@
+package types
+
+const (
+ // these are default path for run and graph root for rootful users
+ // for rootless path is constructed via getRootlessStorageOpts
+ defaultRunRoot string = "/var/run/containers/storage"
+ defaultGraphRoot string = "/var/db/containers/storage"
+)
+
+// defaultConfigFile path to the system wide storage.conf file
+var (
+ defaultConfigFile = "/usr/local/share/containers/storage.conf"
+ defaultOverrideConfigFile = "/usr/local/etc/containers/storage.conf"
+ defaultConfigFileSet = false
+ // DefaultStoreOptions is a reasonable default set of options.
+ defaultStoreOptions StoreOptions
+)
diff --git a/vendor/github.com/containers/storage/types/options_linux.go b/vendor/github.com/containers/storage/types/options_linux.go
new file mode 100644
index 00000000000..d5ad50bc0bd
--- /dev/null
+++ b/vendor/github.com/containers/storage/types/options_linux.go
@@ -0,0 +1,17 @@
+package types
+
+const (
+ // these are default path for run and graph root for rootful users
+ // for rootless path is constructed via getRootlessStorageOpts
+ defaultRunRoot string = "/run/containers/storage"
+ defaultGraphRoot string = "/var/lib/containers/storage"
+)
+
+// defaultConfigFile path to the system wide storage.conf file
+var (
+ defaultConfigFile = "/usr/share/containers/storage.conf"
+ defaultOverrideConfigFile = "/etc/containers/storage.conf"
+ defaultConfigFileSet = false
+ // DefaultStoreOptions is a reasonable default set of options.
+ defaultStoreOptions StoreOptions
+)
diff --git a/vendor/github.com/containers/storage/types/options_windows.go b/vendor/github.com/containers/storage/types/options_windows.go
new file mode 100644
index 00000000000..d5ad50bc0bd
--- /dev/null
+++ b/vendor/github.com/containers/storage/types/options_windows.go
@@ -0,0 +1,17 @@
+package types
+
+const (
+ // these are default path for run and graph root for rootful users
+ // for rootless path is constructed via getRootlessStorageOpts
+ defaultRunRoot string = "/run/containers/storage"
+ defaultGraphRoot string = "/var/lib/containers/storage"
+)
+
+// defaultConfigFile path to the system wide storage.conf file
+var (
+ defaultConfigFile = "/usr/share/containers/storage.conf"
+ defaultOverrideConfigFile = "/etc/containers/storage.conf"
+ defaultConfigFileSet = false
+ // DefaultStoreOptions is a reasonable default set of options.
+ defaultStoreOptions StoreOptions
+)
diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go
index b7ab0734211..c54de763567 100644
--- a/vendor/github.com/containers/storage/types/utils.go
+++ b/vendor/github.com/containers/storage/types/utils.go
@@ -1,8 +1,8 @@
package types
import (
+ "errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -10,7 +10,6 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/system"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -22,7 +21,7 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
}
path = filepath.Join(path, "containers")
if err := os.MkdirAll(path, 0700); err != nil {
- return "", errors.Wrapf(err, "unable to make rootless runtime")
+ return "", fmt.Errorf("unable to make rootless runtime: %w", err)
}
return path, nil
}
@@ -75,7 +74,7 @@ func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, e
return runtimeDir, nil
}
- initCommand, err := ioutil.ReadFile(env.getProcCommandFile())
+ initCommand, err := os.ReadFile(env.getProcCommandFile())
if err != nil || string(initCommand) == "systemd" {
runUserDir := env.getRunUserDir()
if isRootlessRuntimeDirOwner(runUserDir, env) {
@@ -87,7 +86,7 @@ func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, e
if tmpPerUserDir != "" {
if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) {
if err := os.Mkdir(tmpPerUserDir, 0700); err != nil {
- logrus.Errorf("failed to create temp directory for user: %v", err)
+ logrus.Errorf("Failed to create temp directory for user: %v", err)
} else {
return tmpPerUserDir, nil
}
@@ -132,7 +131,7 @@ func getRootlessDirInfo(rootlessUID int) (string, string, error) {
home := homedir.Get()
if home == "" {
- return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty")
+ return "", "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty: %w", err)
}
// runc doesn't like symlinks in the rootfs path, and at least
// on CoreOS /home is a symlink to /var/home, so resolve any symlink.
@@ -170,7 +169,7 @@ func DefaultConfigFile(rootless bool) (string, error) {
return defaultConfigFile, nil
}
- if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok {
+ if path, ok := os.LookupEnv(storageConfEnv); ok {
return path, nil
}
if !rootless {
diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go
index 3ada41f734f..6e200ec1212 100644
--- a/vendor/github.com/containers/storage/userns.go
+++ b/vendor/github.com/containers/storage/userns.go
@@ -1,6 +1,7 @@
package storage
import (
+ "fmt"
"os"
"os/user"
"path/filepath"
@@ -11,7 +12,6 @@ import (
"github.com/containers/storage/pkg/unshare"
"github.com/containers/storage/types"
libcontainerUser "github.com/opencontainers/runc/libcontainer/user"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -43,7 +43,7 @@ func getAdditionalSubIDs(username string) (*idSet, *idSet, error) {
}
mappings, err := idtools.NewIDMappings(username, username)
if err != nil {
- logrus.Errorf("cannot find mappings for user %q: %v", username, err)
+ logrus.Errorf("Cannot find mappings for user %q: %v", username, err)
} else {
uids = getHostIDs(mappings.UIDs())
gids = getHostIDs(mappings.GIDs())
@@ -124,7 +124,7 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
// getMaxSizeFromImage returns the maximum ID used by the specified image.
// The layer stores must be already locked.
-func (s *store) getMaxSizeFromImage(id string, image *Image, passwdFile, groupFile string) (uint32, error) {
+func (s *store) getMaxSizeFromImage(image *Image, passwdFile, groupFile string) (uint32, error) {
lstore, err := s.LayerStore()
if err != nil {
return 0, err
@@ -164,7 +164,7 @@ outer:
}
continue outer
}
- return 0, errors.Errorf("cannot find layer %q", layerName)
+ return 0, fmt.Errorf("cannot find layer %q", layerName)
}
rlstore, err := s.LayerStore()
@@ -183,7 +183,7 @@ outer:
// We need to create a temporary layer so we can mount it and lookup the
// maximum IDs used.
- clayer, err := rlstore.Create(id, topLayer, nil, "", nil, layerOptions, false)
+ clayer, err := rlstore.Create("", topLayer, nil, "", nil, layerOptions, false)
if err != nil {
return 0, err
}
@@ -211,7 +211,7 @@ outer:
}
// getAutoUserNS creates an automatic user namespace
-func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image *Image) ([]idtools.IDMap, []idtools.IDMap, error) {
+func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image) ([]idtools.IDMap, []idtools.IDMap, error) {
requestedSize := uint32(0)
initialSize := uint32(1)
if options.Size > 0 {
@@ -223,10 +223,10 @@ func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image
availableUIDs, availableGIDs, err := s.getAvailableIDs()
if err != nil {
- return nil, nil, errors.Wrapf(err, "cannot read mappings")
+ return nil, nil, fmt.Errorf("cannot read mappings: %w", err)
}
- // Look every container that is using a user namespace and store
+ // Look at every container that is using a user namespace and store
// the intervals that are already used.
containers, err := s.Containers()
if err != nil {
@@ -250,7 +250,7 @@ func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image
size = s.autoNsMinSize
}
if image != nil {
- sizeFromImage, err := s.getMaxSizeFromImage(id, image, options.PasswdFile, options.GroupFile)
+ sizeFromImage, err := s.getMaxSizeFromImage(image, options.PasswdFile, options.GroupFile)
if err != nil {
return nil, nil, err
}
@@ -259,7 +259,7 @@ func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image
}
}
if s.autoNsMaxSize > 0 && size > s.autoNsMaxSize {
- return nil, nil, errors.Errorf("the container needs a user namespace with size %q that is bigger than the maximum value allowed with userns=auto %q", size, s.autoNsMaxSize)
+ return nil, nil, fmt.Errorf("the container needs a user namespace with size %q that is bigger than the maximum value allowed with userns=auto %q", size, s.autoNsMaxSize)
}
}
diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go
index 80d56041b06..ae9600e6878 100644
--- a/vendor/github.com/containers/storage/utils.go
+++ b/vendor/github.com/containers/storage/utils.go
@@ -16,12 +16,12 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
return types.GetRootlessRuntimeDir(rootlessUID)
}
-// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers
+// DefaultStoreOptionsAutoDetectUID returns the default storage options for containers
func DefaultStoreOptionsAutoDetectUID() (types.StoreOptions, error) {
return types.DefaultStoreOptionsAutoDetectUID()
}
-// DefaultStoreOptions returns the default storage ops for containers
+// DefaultStoreOptions returns the default storage options for containers
func DefaultStoreOptions(rootless bool, rootlessUID int) (types.StoreOptions, error) {
return types.DefaultStoreOptions(rootless, rootlessUID)
}
@@ -40,3 +40,35 @@ func validateMountOptions(mountOptions []string) error {
}
return nil
}
+
+func applyNameOperation(oldNames []string, opParameters []string, op updateNameOperation) ([]string, error) {
+ var result []string
+ switch op {
+ case setNames:
+ // ignore all old names and just return new names
+ result = opParameters
+ case removeNames:
+ // remove given names from old names
+ result = make([]string, 0, len(oldNames))
+ for _, name := range oldNames {
+ // only keep names in final result which do not intersect with input names
+ // basically `result = oldNames - opParameters`
+ nameShouldBeRemoved := false
+ for _, opName := range opParameters {
+ if name == opName {
+ nameShouldBeRemoved = true
+ }
+ }
+ if !nameShouldBeRemoved {
+ result = append(result, name)
+ }
+ }
+ case addNames:
+ result = make([]string, 0, len(opParameters)+len(oldNames))
+ result = append(result, opParameters...)
+ result = append(result, oldNames...)
+ default:
+ return result, errInvalidUpdateNameOperation
+ }
+ return dedupeNames(result), nil
+}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml
index 3938f383494..b94ff8cf92a 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml
+++ b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml
@@ -4,10 +4,12 @@
language: go
go:
- - 1.7.x
- - 1.8.x
+ - 1.13.x
+ - 1.16.x
- tip
-
+arch:
+ - AMD64
+ - ppc64le
os:
- linux
- osx
diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md
index 49b2baa9f35..3624617c89b 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/README.md
+++ b/vendor/github.com/cyphar/filepath-securejoin/README.md
@@ -7,6 +7,19 @@ standard library][go#20126]. The purpose of this function is to be a "secure"
alternative to `filepath.Join`, and in particular it provides certain
guarantees that are not provided by `filepath.Join`.
+> **NOTE**: This code is *only* safe if you are not at risk of other processes
+> modifying path components after you've used `SecureJoin`. If it is possible
+> for a malicious process to modify path components of the resolved path, then
+> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There
+> are some Linux kernel patches I'm working on which might allow for a better
+> solution.][lwn-obeneath]
+>
+> In addition, with a slightly modified API it might be possible to use
+> `O_PATH` and verify that the opened path is actually the resolved one -- but
+> I have not done that yet. I might add it in the future as a helper function
+> to help users verify the path (we can't just return `/proc/self/fd/`
+> because that doesn't always work transparently for all users).
+
This is the function prototype:
```go
@@ -16,8 +29,8 @@ func SecureJoin(root, unsafePath string) (string, error)
This library **guarantees** the following:
* If no error is set, the resulting string **must** be a child path of
- `SecureJoin` and will not contain any symlink path components (they will all
- be expanded).
+ `root` and will not contain any symlink path components (they will all be
+ expanded).
* When expanding symlinks, all symlink path components **must** be resolved
relative to the provided root. In particular, this can be considered a
@@ -25,7 +38,7 @@ This library **guarantees** the following:
these symlinks will **not** be expanded lexically (`filepath.Clean` is not
called on the input before processing).
-* Non-existant path components are unaffected by `SecureJoin` (similar to
+* Non-existent path components are unaffected by `SecureJoin` (similar to
`filepath.EvalSymlinks`'s semantics).
* The returned path will always be `filepath.Clean`ed and thus not contain any
@@ -57,6 +70,7 @@ func SecureJoin(root, unsafePath string) (string, error) {
}
```
+[lwn-obeneath]: https://lwn.net/Articles/767547/
[go#20126]: https://github.com/golang/go/issues/20126
### License ###
diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION
index ee1372d33a2..7179039691c 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/VERSION
+++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION
@@ -1 +1 @@
-0.2.2
+0.2.3
diff --git a/vendor/github.com/cyphar/filepath-securejoin/go.mod b/vendor/github.com/cyphar/filepath-securejoin/go.mod
new file mode 100644
index 00000000000..0607c1fa060
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/go.mod
@@ -0,0 +1,3 @@
+module github.com/cyphar/filepath-securejoin
+
+go 1.13
diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go
index c4ca3d71300..7dd08dbbdf7 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/join.go
+++ b/vendor/github.com/cyphar/filepath-securejoin/join.go
@@ -12,39 +12,20 @@ package securejoin
import (
"bytes"
+ "errors"
"os"
"path/filepath"
"strings"
"syscall"
-
- "github.com/pkg/errors"
)
-// ErrSymlinkLoop is returned by SecureJoinVFS when too many symlinks have been
-// evaluated in attempting to securely join the two given paths.
-var ErrSymlinkLoop = errors.Wrap(syscall.ELOOP, "secure join")
-
// IsNotExist tells you if err is an error that implies that either the path
// accessed does not exist (or path components don't exist). This is
// effectively a more broad version of os.IsNotExist.
func IsNotExist(err error) bool {
- // If it's a bone-fide ENOENT just bail.
- if os.IsNotExist(errors.Cause(err)) {
- return true
- }
-
// Check that it's not actually an ENOTDIR, which in some cases is a more
// convoluted case of ENOENT (usually involving weird paths).
- var errno error
- switch err := errors.Cause(err).(type) {
- case *os.PathError:
- errno = err.Err
- case *os.LinkError:
- errno = err.Err
- case *os.SyscallError:
- errno = err.Err
- }
- return errno == syscall.ENOTDIR || errno == syscall.ENOENT
+ return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT)
}
// SecureJoinVFS joins the two given path components (similar to Join) except
@@ -68,7 +49,7 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
n := 0
for unsafePath != "" {
if n > 255 {
- return "", ErrSymlinkLoop
+ return "", &os.PathError{Op: "SecureJoin", Path: root + "/" + unsafePath, Err: syscall.ELOOP}
}
// Next path component, p.
diff --git a/vendor/github.com/cyphar/filepath-securejoin/vendor.conf b/vendor/github.com/cyphar/filepath-securejoin/vendor.conf
deleted file mode 100644
index 66bb574b955..00000000000
--- a/vendor/github.com/cyphar/filepath-securejoin/vendor.conf
+++ /dev/null
@@ -1 +0,0 @@
-github.com/pkg/errors v0.8.0
diff --git a/vendor/github.com/docker/distribution/.gitignore b/vendor/github.com/docker/distribution/.gitignore
deleted file mode 100644
index 4cf7888e927..00000000000
--- a/vendor/github.com/docker/distribution/.gitignore
+++ /dev/null
@@ -1,38 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
-
-# never checkin from the bin file (for now)
-bin/*
-
-# Test key files
-*.pem
-
-# Cover profiles
-*.out
-
-# Editor/IDE specific files.
-*.sublime-project
-*.sublime-workspace
-.idea/*
diff --git a/vendor/github.com/docker/distribution/.gometalinter.json b/vendor/github.com/docker/distribution/.gometalinter.json
deleted file mode 100644
index 9df5b14bcb2..00000000000
--- a/vendor/github.com/docker/distribution/.gometalinter.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "Vendor": true,
- "Deadline": "2m",
- "Sort": ["linter", "severity", "path", "line"],
- "EnableGC": true,
- "Enable": [
- "structcheck",
- "staticcheck",
- "unconvert",
-
- "gofmt",
- "goimports",
- "golint",
- "vet"
- ]
-}
diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap
deleted file mode 100644
index 0f48321d497..00000000000
--- a/vendor/github.com/docker/distribution/.mailmap
+++ /dev/null
@@ -1,32 +0,0 @@
-Stephen J Day Stephen Day
-Stephen J Day Stephen Day
-Olivier Gambier Olivier Gambier
-Brian Bland Brian Bland
-Brian Bland Brian Bland
-Josh Hawn Josh Hawn
-Richard Scothern Richard
-Richard Scothern Richard Scothern
-Andrew Meredith Andrew Meredith
-harche harche
-Jessie Frazelle
-Sharif Nassar Sharif Nassar
-Sven Dowideit Sven Dowideit
-Vincent Giersch Vincent Giersch
-davidli davidli
-Omer Cohen Omer Cohen
-Eric Yang Eric Yang
-Nikita Tarasov Nikita
-Yu Wang yuwaMSFT2
-Yu Wang Yu Wang (UC)
-Olivier Gambier dmp
-Olivier Gambier Olivier
-Olivier Gambier Olivier
-Elsan Li 李楠 elsanli(李楠)
-Rui Cao ruicao
-Gwendolynne Barr gbarr01
-Haibing Zhou 周海兵 zhouhaibing089
-Feng Honglin tifayuki
-Helen Xie Helen-xie
-Mike Brown Mike Brown
-Manish Tomar Manish Tomar
-Sakeven Jiang sakeven
diff --git a/vendor/github.com/docker/distribution/.travis.yml b/vendor/github.com/docker/distribution/.travis.yml
deleted file mode 100644
index 44ced60451d..00000000000
--- a/vendor/github.com/docker/distribution/.travis.yml
+++ /dev/null
@@ -1,51 +0,0 @@
-dist: trusty
-sudo: required
-# setup travis so that we can run containers for integration tests
-services:
- - docker
-
-language: go
-
-go:
- - "1.11.x"
-
-go_import_path: github.com/docker/distribution
-
-addons:
- apt:
- packages:
- - python-minimal
-
-
-env:
- - TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1
-
-before_install:
- - uname -r
- - sudo apt-get -q update
-
-install:
- - go get -u github.com/vbatts/git-validation
- # TODO: Add enforcement of license
- # - go get -u github.com/kunalkushwaha/ltag
- - cd $TRAVIS_BUILD_DIR
-
-script:
- - export GOOS=$TRAVIS_GOOS
- - export CGO_ENABLED=$TRAVIS_CGO_ENABLED
- - DCO_VERBOSITY=-q script/validate/dco
- - GOOS=linux script/setup/install-dev-tools
- - script/validate/vendor
- - go build -i .
- - make check
- - make build
- - make binaries
- # Currently takes too long
- #- if [ "$GOOS" = "linux" ]; then make test-race ; fi
- - if [ "$GOOS" = "linux" ]; then make coverage ; fi
-
-after_success:
- - bash <(curl -s https://codecov.io/bash) -F linux
-
-before_deploy:
- # Run tests with storage driver configurations
diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md
deleted file mode 100644
index 2981d016b0d..00000000000
--- a/vendor/github.com/docker/distribution/BUILDING.md
+++ /dev/null
@@ -1,117 +0,0 @@
-
-# Building the registry source
-
-## Use-case
-
-This is useful if you intend to actively work on the registry.
-
-### Alternatives
-
-Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/).
-
-People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
-
-OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md).
-
-### Gotchas
-
-You are expected to know your way around with go & git.
-
-If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you.
-
-## Build the development environment
-
-The first prerequisite of properly building distribution targets is to have a Go
-development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html)
-for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the
-environment.
-
-If a Go development environment is setup, one can use `go get` to install the
-`registry` command from the current latest:
-
- go get github.com/docker/distribution/cmd/registry
-
-The above will install the source repository into the `GOPATH`.
-
-Now create the directory for the registry data (this might require you to set permissions properly)
-
- mkdir -p /var/lib/registry
-
-... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location.
-
-The `registry`
-binary can then be run with the following:
-
- $ $GOPATH/bin/registry --version
- $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown
-
-> __NOTE:__ While you do not need to use `go get` to checkout the distribution
-> project, for these build instructions to work, the project must be checked
-> out in the correct location in the `GOPATH`. This should almost always be
-> `$GOPATH/src/github.com/docker/distribution`.
-
-The registry can be run with the default config using the following
-incantation:
-
- $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml
- INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
- INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
- INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
- INFO[0000] debug server listening localhost:5001
-
-If it is working, one should see the above log messages.
-
-### Repeatable Builds
-
-For the full development experience, one should `cd` into
-`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go`
-commands, such as `go test`, should work per package (please see
-[Developing](#developing) if they don't work).
-
-A `Makefile` has been provided as a convenience to support repeatable builds.
-Please install the following into `GOPATH` for it to work:
-
- go get github.com/golang/lint/golint
-
-Once these commands are available in the `GOPATH`, run `make` to get a full
-build:
-
- $ make
- + clean
- + fmt
- + vet
- + lint
- + build
- github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar
- github.com/sirupsen/logrus
- github.com/docker/libtrust
- ...
- github.com/yvasiyarov/gorelic
- github.com/docker/distribution/registry/handlers
- github.com/docker/distribution/cmd/registry
- + test
- ...
- ok github.com/docker/distribution/digest 7.875s
- ok github.com/docker/distribution/manifest 0.028s
- ok github.com/docker/distribution/notifications 17.322s
- ? github.com/docker/distribution/registry [no test files]
- ok github.com/docker/distribution/registry/api/v2 0.101s
- ? github.com/docker/distribution/registry/auth [no test files]
- ok github.com/docker/distribution/registry/auth/silly 0.011s
- ...
- + /Users/sday/go/src/github.com/docker/distribution/bin/registry
- + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template
- + binaries
-
-The above provides a repeatable build using the contents of the vendor
-directory. This includes formatting, vetting, linting, building,
-testing and generating tagged binaries. We can verify this worked by running
-the registry binary generated in the "./bin" directory:
-
- $ ./bin/registry --version
- ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m
-
-### Optional build tags
-
-Optional [build tags](http://golang.org/pkg/go/build/) can be provided using
-the environment variable `DOCKER_BUILDTAGS`.
diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md
deleted file mode 100644
index 4c067d9e7ec..00000000000
--- a/vendor/github.com/docker/distribution/CONTRIBUTING.md
+++ /dev/null
@@ -1,148 +0,0 @@
-# Contributing to the registry
-
-## Before reporting an issue...
-
-### If your problem is with...
-
- - automated builds
- - your account on the [Docker Hub](https://hub.docker.com/)
- - any other [Docker Hub](https://hub.docker.com/) issue
-
-Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com)
-
-### If you...
-
- - need help setting up your registry
- - can't figure out something
- - are not sure what's going on or what your problem is
-
-Then please do not open an issue here yet - you should first try one of the following support forums:
-
- - irc: #docker-distribution on freenode
- - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
-
-### Reporting security issues
-
-The Docker maintainers take security seriously. If you discover a security
-issue, please bring it to their attention right away!
-
-Please **DO NOT** file a public issue, instead send your report privately to
-[security@docker.com](mailto:security@docker.com).
-
-## Reporting an issue properly
-
-By following these simple rules you will get better and faster feedback on your issue.
-
- - search the bugtracker for an already reported issue
-
-### If you found an issue that describes your problem:
-
- - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
- - please refrain from adding "same thing here" or "+1" comments
- - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
- - comment if you have some new, technical and relevant information to add to the case
- - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
-
-### If you have not found an existing issue that describes your problem:
-
- 1. create a new issue, with a succinct title that describes your issue:
- - bad title: "It doesn't work with my docker"
- - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
- 2. copy the output of:
- - `docker version`
- - `docker info`
- - `docker exec registry --version`
- 3. copy the command line you used to launch your Registry
- 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
- 5. reproduce your problem and get your docker daemon logs showing the error
- 6. if relevant, copy your registry logs that show the error
- 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
- 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
-
-## Contributing a patch for a known bug, or a small correction
-
-You should follow the basic GitHub workflow:
-
- 1. fork
- 2. commit a change
- 3. make sure the tests pass
- 4. PR
-
-Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple:
-
- - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com`
- - sign your commits using `-s`: `git commit -s -m "My commit"`
-
-Some simple rules to ensure quick merge:
-
- - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`)
- - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once
- - if you need to amend your PR following comments, please squash instead of adding more commits
-
-## Contributing new features
-
-You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve.
-
-If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning.
-If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work.
-
-Then you should submit your implementation, clearly linking to the issue (and possible proposal).
-
-Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged.
-
-It's mandatory to:
-
- - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines)
- - address maintainers' comments and modify your submission accordingly
- - write tests for any new code
-
-Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry.
-
-Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493)
-
-## Coding Style
-
-Unless explicitly stated, we follow all coding guidelines from the Go
-community. While some of these standards may seem arbitrary, they somehow seem
-to result in a solid, consistent codebase.
-
-It is possible that the code base does not currently comply with these
-guidelines. We are not looking for a massive PR that fixes this, since that
-goes against the spirit of the guidelines. All new contributions should make a
-best effort to clean up and make the code base better than they left it.
-Obviously, apply your best judgement. Remember, the goal here is to make the
-code base easier for humans to navigate and understand. Always keep that in
-mind when nudging others to comply.
-
-The rules:
-
-1. All code should be formatted with `gofmt -s`.
-2. All code should pass the default levels of
- [`golint`](https://github.com/golang/lint).
-3. All code should follow the guidelines covered in [Effective
- Go](http://golang.org/doc/effective_go.html) and [Go Code Review
- Comments](https://github.com/golang/go/wiki/CodeReviewComments).
-4. Comment the code. Tell us the why, the history and the context.
-5. Document _all_ declarations and methods, even private ones. Declare
- expectations, caveats and anything else that may be important. If a type
- gets exported, having the comments already there will ensure it's ready.
-6. Variable name length should be proportional to its context and no longer.
- `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
- In practice, short methods will have short variable names and globals will
- have longer names.
-7. No underscores in package names. If you need a compound name, step back,
- and re-examine why you need a compound name. If you still think you need a
- compound name, lose the underscore.
-8. No utils or helpers packages. If a function is not general enough to
- warrant its own package, it has not been written generally enough to be a
- part of a util package. Just leave it unexported and well-documented.
-9. All tests should run with `go test` and outside tooling should not be
- required. No, we don't need another unit testing framework. Assertion
- packages are acceptable if they provide _real_ incremental value.
-10. Even though we call these "rules" above, they are actually just
- guidelines. Since you've read all the rules, you now know that.
-
-If you are having trouble getting into the mood of idiomatic Go, we recommend
-reading through [Effective Go](http://golang.org/doc/effective_go.html). The
-[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the
-kool-aid is a lot easier than going thirsty.
diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS
deleted file mode 100644
index 3183620c57b..00000000000
--- a/vendor/github.com/docker/distribution/MAINTAINERS
+++ /dev/null
@@ -1,243 +0,0 @@
-# Distribution maintainers file
-#
-# This file describes who runs the docker/distribution project and how.
-# This is a living document - if you see something out of date or missing, speak up!
-#
-# It is structured to be consumable by both humans and programs.
-# To extract its contents programmatically, use any TOML-compliant parser.
-#
-
-[Rules]
-
- [Rules.maintainers]
-
- title = "What is a maintainer?"
-
- text = """
-There are different types of maintainers, with different responsibilities, but
-all maintainers have 3 things in common:
-
-1) They share responsibility in the project's success.
-2) They have made a long-term, recurring time investment to improve the project.
-3) They spend that time doing whatever needs to be done, not necessarily what
-is the most interesting or fun.
-
-Maintainers are often under-appreciated, because their work is harder to appreciate.
-It's easy to appreciate a really cool and technically advanced feature. It's harder
-to appreciate the absence of bugs, the slow but steady improvement in stability,
-or the reliability of a release process. But those things distinguish a good
-project from a great one.
-"""
-
- [Rules.reviewer]
-
- title = "What is a reviewer?"
-
- text = """
-A reviewer is a core role within the project.
-They share in reviewing issues and pull requests and their LGTM count towards the
-required LGTM count to merge a code change into the project.
-
-Reviewers are part of the organization but do not have write access.
-Becoming a reviewer is a core aspect in the journey to becoming a maintainer.
-"""
-
- [Rules.adding-maintainers]
-
- title = "How are maintainers added?"
-
- text = """
-Maintainers are first and foremost contributors that have shown they are
-committed to the long term success of a project. Contributors wanting to become
-maintainers are expected to be deeply involved in contributing code, pull
-request review, and triage of issues in the project for more than three months.
-
-Just contributing does not make you a maintainer, it is about building trust
-with the current maintainers of the project and being a person that they can
-depend on and trust to make decisions in the best interest of the project.
-
-Periodically, the existing maintainers curate a list of contributors that have
-shown regular activity on the project over the prior months. From this list,
-maintainer candidates are selected and proposed on the maintainers mailing list.
-
-After a candidate has been announced on the maintainers mailing list, the
-existing maintainers are given five business days to discuss the candidate,
-raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing
-list. Only maintainers of the repository that the candidate is proposed for are
-allowed to vote.
-
-If a candidate is approved, a maintainer will contact the candidate to invite
-the candidate to open a pull request that adds the contributor to the
-MAINTAINERS file. The candidate becomes a maintainer once the pull request is
-merged.
-"""
-
- [Rules.stepping-down-policy]
-
- title = "Stepping down policy"
-
- text = """
-Life priorities, interests, and passions can change. If you're a maintainer but
-feel you must remove yourself from the list, inform other maintainers that you
-intend to step down, and if possible, help find someone to pick up your work.
-At the very least, ensure your work can be continued where you left off.
-
-After you've informed other maintainers, create a pull request to remove
-yourself from the MAINTAINERS file.
-"""
-
- [Rules.inactive-maintainers]
-
- title = "Removal of inactive maintainers"
-
- text = """
-Similar to the procedure for adding new maintainers, existing maintainers can
-be removed from the list if they do not show significant activity on the
-project. Periodically, the maintainers review the list of maintainers and their
-activity over the last three months.
-
-If a maintainer has shown insufficient activity over this period, a neutral
-person will contact the maintainer to ask if they want to continue being
-a maintainer. If the maintainer decides to step down as a maintainer, they
-open a pull request to be removed from the MAINTAINERS file.
-
-If the maintainer wants to remain a maintainer, but is unable to perform the
-required duties they can be removed with a vote of at least 66% of
-the current maintainers. An e-mail is sent to the
-mailing list, inviting maintainers of the project to vote. The voting period is
-five business days. Issues related to a maintainer's performance should be
-discussed with them among the other maintainers so that they are not surprised
-by a pull request removing them.
-"""
-
- [Rules.decisions]
-
- title = "How are decisions made?"
-
- text = """
-Short answer: EVERYTHING IS A PULL REQUEST.
-
-distribution is an open-source project with an open design philosophy. This means
-that the repository is the source of truth for EVERY aspect of the project,
-including its philosophy, design, road map, and APIs. *If it's part of the
-project, it's in the repo. If it's in the repo, it's part of the project.*
-
-As a result, all decisions can be expressed as changes to the repository. An
-implementation change is a change to the source code. An API change is a change
-to the API specification. A philosophy change is a change to the philosophy
-manifesto, and so on.
-
-All decisions affecting distribution, big and small, follow the same 3 steps:
-
-* Step 1: Open a pull request. Anyone can do this.
-
-* Step 2: Discuss the pull request. Anyone can do this.
-
-* Step 3: Merge or refuse the pull request. Who does this depends on the nature
-of the pull request and which areas of the project it affects.
-"""
-
- [Rules.DCO]
-
- title = "Helping contributors with the DCO"
-
- text = """
-The [DCO or `Sign your work`](
-https://github.com/moby/moby/blob/master/CONTRIBUTING.md#sign-your-work)
-requirement is not intended as a roadblock or speed bump.
-
-Some distribution contributors are not as familiar with `git`, or have used a web
-based editor, and thus asking them to `git commit --amend -s` is not the best
-way forward.
-
-In this case, maintainers can update the commits based on clause (c) of the DCO.
-The most trivial way for a contributor to allow the maintainer to do this, is to
-add a DCO signature in a pull requests's comment, or a maintainer can simply
-note that the change is sufficiently trivial that it does not substantially
-change the existing contribution - i.e., a spelling change.
-
-When you add someone's DCO, please also add your own to keep a log.
-"""
-
- [Rules."no direct push"]
-
- title = "I'm a maintainer. Should I make pull requests too?"
-
- text = """
-Yes. Nobody should ever push to master directly. All changes should be
-made through a pull request.
-"""
-
- [Rules.tsc]
-
- title = "Conflict Resolution and technical disputes"
-
- text = """
-distribution defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters."
- """
-
- [Rules.meta]
-
- title = "How is this process changed?"
-
- text = "Just like everything else: by making a pull request :)"
-
-# Current project organization
-[Org]
-
- [Org.Maintainers]
- people = [
- "dmcgowan",
- "dmp42",
- "stevvooe",
- ]
- [Org.Reviewers]
- people = [
- "manishtomar",
- "caervs",
- "davidswu",
- "RobbKistler"
- ]
-
-[people]
-
-# A reference list of all people associated with the project.
-# All other sections should refer to people by their canonical key
-# in the people section.
-
- # ADD YOURSELF HERE IN ALPHABETICAL ORDER
-
- [people.caervs]
- Name = "Ryan Abrams"
- Email = "rdabrams@gmail.com"
- GitHub = "caervs"
-
- [people.davidswu]
- Name = "David Wu"
- Email = "dwu7401@gmail.com"
- GitHub = "davidswu"
-
- [people.dmcgowan]
- Name = "Derek McGowan"
- Email = "derek@mcgstyle.net"
- GitHub = "dmcgowan"
-
- [people.dmp42]
- Name = "Olivier Gambier"
- Email = "olivier@docker.com"
- GitHub = "dmp42"
-
- [people.manishtomar]
- Name = "Manish Tomar"
- Email = "manish.tomar@docker.com"
- GitHub = "manishtomar"
-
- [people.RobbKistler]
- Name = "Robb Kistler"
- Email = "robb.kistler@docker.com"
- GitHub = "RobbKistler"
-
- [people.stevvooe]
- Name = "Stephen Day"
- Email = "stephen.day@docker.com"
- GitHub = "stevvooe"
diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile
deleted file mode 100644
index 4635c6eca87..00000000000
--- a/vendor/github.com/docker/distribution/Makefile
+++ /dev/null
@@ -1,102 +0,0 @@
-# Root directory of the project (absolute path).
-ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
-
-# Used to populate version variable in main package.
-VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
-REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
-
-
-PKG=github.com/docker/distribution
-
-# Project packages.
-PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/)
-INTEGRATION_PACKAGE=${PKG}
-COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES})
-
-
-# Project binaries.
-COMMANDS=registry digest registry-api-descriptor-template
-
-# Allow turning off function inlining and variable registerization
-ifeq (${DISABLE_OPTIMIZATION},true)
- GO_GCFLAGS=-gcflags "-N -l"
- VERSION:="$(VERSION)-noopt"
-endif
-
-WHALE = "+"
-
-# Go files
-#
-TESTFLAGS_RACE=
-GOFILES=$(shell find . -type f -name '*.go')
-GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",)
-GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)'
-
-BINARIES=$(addprefix bin/,$(COMMANDS))
-
-# Flags passed to `go test`
-TESTFLAGS ?= -v $(TESTFLAGS_RACE)
-TESTFLAGS_PARALLEL ?= 8
-
-.PHONY: all build binaries check clean test test-race test-full integration coverage
-.DEFAULT: all
-
-all: binaries
-
-# This only needs to be generated by hand when cutting full releases.
-version/version.go:
- @echo "$(WHALE) $@"
- ./version/version.sh > $@
-
-check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck")
- @echo "$(WHALE) $@"
- gometalinter --config .gometalinter.json ./...
-
-test: ## run tests, except integration test with test.short
- @echo "$(WHALE) $@"
- @go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
-
-test-race: ## run tests, except integration test with test.short and race
- @echo "$(WHALE) $@"
- @go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
-
-test-full: ## run tests, except integration tests
- @echo "$(WHALE) $@"
- @go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
-
-integration: ## run integration tests
- @echo "$(WHALE) $@"
- @go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE}
-
-coverage: ## generate coverprofiles from the unit tests
- @echo "$(WHALE) $@"
- @rm -f coverage.txt
- @go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null
- @( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \
- go test ${GO_TAGS} ${TESTFLAGS} \
- -cover \
- -coverprofile=profile.out \
- -covermode=atomic $$pkg || exit; \
- if [ -f profile.out ]; then \
- cat profile.out >> coverage.txt; \
- rm profile.out; \
- fi; \
- done )
-
-FORCE:
-
-# Build a binary from a cmd.
-bin/%: cmd/% FORCE
- @echo "$(WHALE) $@${BINARY_SUFFIX}"
- @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$<
-
-binaries: $(BINARIES) ## build binaries
- @echo "$(WHALE) $@"
-
-build:
- @echo "$(WHALE) $@"
- @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES)
-
-clean: ## clean up binaries
- @echo "$(WHALE) $@"
- @rm -f $(BINARIES)
diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md
deleted file mode 100644
index 998878850cd..00000000000
--- a/vendor/github.com/docker/distribution/README.md
+++ /dev/null
@@ -1,130 +0,0 @@
-# Distribution
-
-The Docker toolset to pack, ship, store, and deliver content.
-
-This repository's main product is the Docker Registry 2.0 implementation
-for storing and distributing Docker images. It supersedes the
-[docker/docker-registry](https://github.com/docker/docker-registry)
-project with a new API design, focused around security and performance.
-
-
-
-[](https://circleci.com/gh/docker/distribution/tree/master)
-[](https://godoc.org/github.com/docker/distribution)
-
-This repository contains the following components:
-
-|**Component** |Description |
-|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
-| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
-| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
-| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. |
-
-### How does this integrate with Docker engine?
-
-This project should provide an implementation to a V2 API for use in the [Docker
-core project](https://github.com/docker/docker). The API should be embeddable
-and simplify the process of securely pulling and pushing content from `docker`
-daemons.
-
-### What are the long term goals of the Distribution project?
-
-The _Distribution_ project has the further long term goal of providing a
-secure tool chain for distributing content. The specifications, APIs and tools
-should be as useful with Docker as they are without.
-
-Our goal is to design a professional grade and extensible content distribution
-system that allow users to:
-
-* Enjoy an efficient, secured and reliable way to store, manage, package and
- exchange content
-* Hack/roll their own on top of healthy open-source components
-* Implement their own home made solution through good specs, and solid
- extensions mechanism.
-
-## More about Registry 2.0
-
-The new registry implementation provides the following benefits:
-
-- faster push and pull
-- new, more efficient implementation
-- simplified deployment
-- pluggable storage backend
-- webhook notifications
-
-For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md).
-
-### Who needs to deploy a registry?
-
-By default, Docker users pull images from Docker's public registry instance.
-[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
-ability. Users can also push images to a repository on Docker's public registry,
-if they have a [Docker Hub](https://hub.docker.com/) account.
-
-For some users and even companies, this default behavior is sufficient. For
-others, it is not.
-
-For example, users with their own software products may want to maintain a
-registry for private, company images. Also, you may wish to deploy your own
-image repository for images used to test or in continuous integration. For these
-use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md)
-may be the better choice.
-
-### Migration to Registry 2.0
-
-For those who have previously deployed their own registry based on the Registry
-1.0 implementation and wish to deploy a Registry 2.0 while retaining images,
-data migration is required. A tool to assist with migration efforts has been
-created. For more information see [docker/migrator](https://github.com/docker/migrator).
-
-## Contribute
-
-Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
-issues, fixes, and patches to this project. If you are contributing code, see
-the instructions for [building a development environment](BUILDING.md).
-
-## Support
-
-If any issues are encountered while using the _Distribution_ project, several
-avenues are available for support:
-
-
-
- |
- IRC
- |
-
- #docker-distribution on FreeNode
- |
-
-
- |
- Issue Tracker
- |
-
- github.com/docker/distribution/issues
- |
-
-
- |
- Google Groups
- |
-
- https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
- |
-
-
- |
- Mailing List
- |
-
- docker@dockerproject.org
- |
-
-
-
-
-## License
-
-This project is distributed under [Apache License, Version 2.0](LICENSE).
diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md
deleted file mode 100644
index 701127afec6..00000000000
--- a/vendor/github.com/docker/distribution/ROADMAP.md
+++ /dev/null
@@ -1,267 +0,0 @@
-# Roadmap
-
-The Distribution Project consists of several components, some of which are
-still being defined. This document defines the high-level goals of the
-project, identifies the current components, and defines the release-
-relationship to the Docker Platform.
-
-* [Distribution Goals](#distribution-goals)
-* [Distribution Components](#distribution-components)
-* [Project Planning](#project-planning): release-relationship to the Docker Platform.
-
-This road map is a living document, providing an overview of the goals and
-considerations made in respect of the future of the project.
-
-## Distribution Goals
-
-- Replace the existing [docker registry](github.com/docker/docker-registry)
- implementation as the primary implementation.
-- Replace the existing push and pull code in the docker engine with the
- distribution package.
-- Define a strong data model for distributing docker images
-- Provide a flexible distribution tool kit for use in the docker platform
-- Unlock new distribution models
-
-## Distribution Components
-
-Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming
-features and bugfixes for a component will be added to the relevant milestone. If a feature or
-bugfix is not part of a milestone, it is currently unscheduled for
-implementation.
-
-* [Registry](#registry)
-* [Distribution Package](#distribution-package)
-
-***
-
-### Registry
-
-The new Docker registry is the main portion of the distribution repository.
-Registry 2.0 is the first release of the next-generation registry. This was
-primarily focused on implementing the [new registry
-API](https://github.com/docker/distribution/blob/master/docs/spec/api.md),
-with a focus on security and performance.
-
-Following from the Distribution project goals above, we have a set of goals
-for registry v2 that we would like to follow in the design. New features
-should be compared against these goals.
-
-#### Data Storage and Distribution First
-
-The registry's first goal is to provide a reliable, consistent storage
-location for Docker images. The registry should only provide the minimal
-amount of indexing required to fetch image data and no more.
-
-This means we should be selective in new features and API additions, including
-those that may require expensive, ever growing indexes. Requests should be
-servable in "constant time".
-
-#### Content Addressability
-
-All data objects used in the registry API should be content addressable.
-Content identifiers should be secure and verifiable. This provides a secure,
-reliable base from which to build more advanced content distribution systems.
-
-#### Content Agnostic
-
-In the past, changes to the image format would require large changes in Docker
-and the Registry. By decoupling the distribution and image format, we can
-allow the formats to progress without having to coordinate between the two.
-This means that we should be focused on decoupling Docker from the registry
-just as much as decoupling the registry from Docker. Such an approach will
-allow us to unlock new distribution models that haven't been possible before.
-
-We can take this further by saying that the new registry should be content
-agnostic. The registry provides a model of names, tags, manifests and content
-addresses and that model can be used to work with content.
-
-#### Simplicity
-
-The new registry should be closer to a microservice component than its
-predecessor. This means it should have a narrower API and a low number of
-service dependencies. It should be easy to deploy.
-
-This means that other solutions should be explored before changing the API or
-adding extra dependencies. If functionality is required, can it be added as an
-extension or companion service.
-
-#### Extensibility
-
-The registry should provide extension points to add functionality. By keeping
-the scope narrow, but providing the ability to add functionality.
-
-Features like search, indexing, synchronization and registry explorers fall
-into this category. No such feature should be added unless we've found it
-impossible to do through an extension.
-
-#### Active Feature Discussions
-
-The following are feature discussions that are currently active.
-
-If you don't see your favorite, unimplemented feature, feel free to contact us
-via IRC or the mailing list and we can talk about adding it. The goal here is
-to make sure that new features go through a rigid design process before
-landing in the registry.
-
-##### Proxying to other Registries
-
-A _pull-through caching_ mode exists for the registry, but is restricted from
-within the docker client to only mirror the official Docker Hub. This functionality
-can be expanded when image provenance has been specified and implemented in the
-distribution project.
-
-##### Metadata storage
-
-Metadata for the registry is currently stored with the manifest and layer data on
-the storage backend. While this is a big win for simplicity and reliably maintaining
-state, it comes with the cost of consistency and high latency. The mutable registry
-metadata operations should be abstracted behind an API which will allow ACID compliant
-storage systems to handle metadata.
-
-##### Peer to Peer transfer
-
-Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit
-
-##### Indexing, Search and Discovery
-
-The original registry provided some implementation of search for use with
-private registries. Support has been elided from V2 since we'd like to both
-decouple search functionality from the registry. The makes the registry
-simpler to deploy, especially in use cases where search is not needed, and
-let's us decouple the image format from the registry.
-
-There are explorations into using the catalog API and notification system to
-build external indexes. The current line of thought is that we will define a
-common search API to index and query docker images. Such a system could be run
-as a companion to a registry or set of registries to power discovery.
-
-The main issue with search and discovery is that there are so many ways to
-accomplish it. There are two aspects to this project. The first is deciding on
-how it will be done, including an API definition that can work with changing
-data formats. The second is the process of integrating with `docker search`.
-We expect that someone attempts to address the problem with the existing tools
-and propose it as a standard search API or uses it to inform a standardization
-process. Once this has been explored, we integrate with the docker client.
-
-Please see the following for more detail:
-
-- https://github.com/docker/distribution/issues/206
-
-##### Deletes
-
-> __NOTE:__ Deletes are a much asked for feature. Before requesting this
-feature or participating in discussion, we ask that you read this section in
-full and understand the problems behind deletes.
-
-While, at first glance, implementing deleting seems simple, there are a number
-mitigating factors that make many solutions not ideal or even pathological in
-the context of a registry. The following paragraph discuss the background and
-approaches that could be applied to arrive at a solution.
-
-The goal of deletes in any system is to remove unused or unneeded data. Only
-data requested for deletion should be removed and no other data. Removing
-unintended data is worse than _not_ removing data that was requested for
-removal but ideally, both are supported. Generally, according to this rule, we
-err on holding data longer than needed, ensuring that it is only removed when
-we can be certain that it can be removed. With the current behavior, we opt to
-hold onto the data forever, ensuring that data cannot be incorrectly removed.
-
-To understand the problems with implementing deletes, one must understand the
-data model. All registry data is stored in a filesystem layout, implemented on
-a "storage driver", effectively a _virtual file system_ (VFS). The storage
-system must assume that this VFS layer will be eventually consistent and has
-poor read- after-write consistency, since this is the lower common denominator
-among the storage drivers. This is mitigated by writing values in reverse-
-dependent order, but makes wider transactional operations unsafe.
-
-Layered on the VFS model is a content-addressable _directed, acyclic graph_
-(DAG) made up of blobs. Manifests reference layers. Tags reference manifests.
-Since the same data can be referenced by multiple manifests, we only store
-data once, even if it is in different repositories. Thus, we have a set of
-blobs, referenced by tags and manifests. If we want to delete a blob we need
-to be certain that it is no longer referenced by another manifest or tag. When
-we delete a manifest, we also can try to delete the referenced blobs. Deciding
-whether or not a blob has an active reference is the crux of the problem.
-
-Conceptually, deleting a manifest and its resources is quite simple. Just find
-all the manifests, enumerate the referenced blobs and delete the blobs not in
-that set. An astute observer will recognize this as a garbage collection
-problem. As with garbage collection in programming languages, this is very
-simple when one always has a consistent view. When one adds parallelism and an
-inconsistent view of data, it becomes very challenging.
-
-A simple example can demonstrate this. Let's say we are deleting a manifest
-_A_ in one process. We scan the manifest and decide that all the blobs are
-ready for deletion. Concurrently, we have another process accepting a new
-manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_
-is accepted and all the blobs are considered present, so the operation
-proceeds. The original process then deletes the referenced blobs, assuming
-they were unreferenced. The manifest _B_, which we thought had all of its data
-present, can no longer be served by the registry, since the dependent data has
-been deleted.
-
-Deleting data from the registry safely requires some way to coordinate this
-operation. The following approaches are being considered:
-
-- _Reference Counting_ - Maintain a count of references to each blob. This is
- challenging for a number of reasons: 1. maintaining a consistent consensus
- of reference counts across a set of Registries and 2. Building the initial
- list of reference counts for an existing registry. These challenges can be
- met with a consensus protocol like Paxos or Raft in the first case and a
- necessary but simple scan in the second..
-- _Lock the World GC_ - Halt all writes to the data store. Walk the data store
- and find all blob references. Delete all unreferenced blobs. This approach
- is very simple but requires disabling writes for a period of time while the
- service reads all data. This is slow and expensive but very accurate and
- effective.
-- _Generational GC_ - Do something similar to above but instead of blocking
- writes, writes are sent to another storage backend while reads are broadcast
- to the new and old backends. GC is then performed on the read-only portion.
- Because writes land in the new backend, the data in the read-only section
- can be safely deleted. The main drawbacks of this approach are complexity
- and coordination.
-- _Centralized Oracle_ - Using a centralized, transactional database, we can
- know exactly which data is referenced at any given time. This avoids
- coordination problem by managing this data in a single location. We trade
- off metadata scalability for simplicity and performance. This is a very good
- option for most registry deployments. This would create a bottleneck for
- registry metadata. However, metadata is generally not the main bottleneck
- when serving images.
-
-Please let us know if other solutions exist that we have yet to enumerate.
-Note that for any approach, implementation is a massive consideration. For
-example, a mark-sweep based solution may seem simple but the amount of work in
-coordination offset the extra work it might take to build a _Centralized
-Oracle_. We'll accept proposals for any solution but please coordinate with us
-before dropping code.
-
-At this time, we have traded off simplicity and ease of deployment for disk
-space. Simplicity and ease of deployment tend to reduce developer involvement,
-which is currently the most expensive resource in software engineering. Taking
-on any solution for deletes will greatly effect these factors, trading off
-very cheap disk space for a complex deployment and operational story.
-
-Please see the following issues for more detail:
-
-- https://github.com/docker/distribution/issues/422
-- https://github.com/docker/distribution/issues/461
-- https://github.com/docker/distribution/issues/462
-
-### Distribution Package
-
-At its core, the Distribution Project is a set of Go packages that make up
-Distribution Components. At this time, most of these packages make up the
-Registry implementation.
-
-The package itself is considered unstable. If you're using it, please take care to vendor the dependent version.
-
-For feature additions, please see the Registry section. In the future, we may break out a
-separate Roadmap for distribution-specific features that apply to more than
-just the registry.
-
-***
-
-### Project Planning
-
-An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress.
-
diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go
deleted file mode 100644
index c0e9261be93..00000000000
--- a/vendor/github.com/docker/distribution/blobs.go
+++ /dev/null
@@ -1,265 +0,0 @@
-package distribution
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "net/http"
- "time"
-
- "github.com/docker/distribution/reference"
- "github.com/opencontainers/go-digest"
- "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-var (
- // ErrBlobExists returned when blob already exists
- ErrBlobExists = errors.New("blob exists")
-
- // ErrBlobDigestUnsupported when blob digest is an unsupported version.
- ErrBlobDigestUnsupported = errors.New("unsupported blob digest")
-
- // ErrBlobUnknown when blob is not found.
- ErrBlobUnknown = errors.New("unknown blob")
-
- // ErrBlobUploadUnknown returned when upload is not found.
- ErrBlobUploadUnknown = errors.New("blob upload unknown")
-
- // ErrBlobInvalidLength returned when the blob has an expected length on
- // commit, meaning mismatched with the descriptor or an invalid value.
- ErrBlobInvalidLength = errors.New("blob invalid length")
-)
-
-// ErrBlobInvalidDigest returned when digest check fails.
-type ErrBlobInvalidDigest struct {
- Digest digest.Digest
- Reason error
-}
-
-func (err ErrBlobInvalidDigest) Error() string {
- return fmt.Sprintf("invalid digest for referenced layer: %v, %v",
- err.Digest, err.Reason)
-}
-
-// ErrBlobMounted returned when a blob is mounted from another repository
-// instead of initiating an upload session.
-type ErrBlobMounted struct {
- From reference.Canonical
- Descriptor Descriptor
-}
-
-func (err ErrBlobMounted) Error() string {
- return fmt.Sprintf("blob mounted from: %v to: %v",
- err.From, err.Descriptor)
-}
-
-// Descriptor describes targeted content. Used in conjunction with a blob
-// store, a descriptor can be used to fetch, store and target any kind of
-// blob. The struct also describes the wire protocol format. Fields should
-// only be added but never changed.
-type Descriptor struct {
- // MediaType describe the type of the content. All text based formats are
- // encoded as utf-8.
- MediaType string `json:"mediaType,omitempty"`
-
- // Size in bytes of content.
- Size int64 `json:"size,omitempty"`
-
- // Digest uniquely identifies the content. A byte stream can be verified
- // against this digest.
- Digest digest.Digest `json:"digest,omitempty"`
-
- // URLs contains the source URLs of this content.
- URLs []string `json:"urls,omitempty"`
-
- // Annotations contains arbitrary metadata relating to the targeted content.
- Annotations map[string]string `json:"annotations,omitempty"`
-
- // Platform describes the platform which the image in the manifest runs on.
- // This should only be used when referring to a manifest.
- Platform *v1.Platform `json:"platform,omitempty"`
-
- // NOTE: Before adding a field here, please ensure that all
- // other options have been exhausted. Much of the type relationships
- // depend on the simplicity of this type.
-}
-
-// Descriptor returns the descriptor, to make it satisfy the Describable
-// interface. Note that implementations of Describable are generally objects
-// which can be described, not simply descriptors; this exception is in place
-// to make it more convenient to pass actual descriptors to functions that
-// expect Describable objects.
-func (d Descriptor) Descriptor() Descriptor {
- return d
-}
-
-// BlobStatter makes blob descriptors available by digest. The service may
-// provide a descriptor of a different digest if the provided digest is not
-// canonical.
-type BlobStatter interface {
- // Stat provides metadata about a blob identified by the digest. If the
- // blob is unknown to the describer, ErrBlobUnknown will be returned.
- Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error)
-}
-
-// BlobDeleter enables deleting blobs from storage.
-type BlobDeleter interface {
- Delete(ctx context.Context, dgst digest.Digest) error
-}
-
-// BlobEnumerator enables iterating over blobs from storage
-type BlobEnumerator interface {
- Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error
-}
-
-// BlobDescriptorService manages metadata about a blob by digest. Most
-// implementations will not expose such an interface explicitly. Such mappings
-// should be maintained by interacting with the BlobIngester. Hence, this is
-// left off of BlobService and BlobStore.
-type BlobDescriptorService interface {
- BlobStatter
-
- // SetDescriptor assigns the descriptor to the digest. The provided digest and
- // the digest in the descriptor must map to identical content but they may
- // differ on their algorithm. The descriptor must have the canonical
- // digest of the content and the digest algorithm must match the
- // annotators canonical algorithm.
- //
- // Such a facility can be used to map blobs between digest domains, with
- // the restriction that the algorithm of the descriptor must match the
- // canonical algorithm (ie sha256) of the annotator.
- SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error
-
- // Clear enables descriptors to be unlinked
- Clear(ctx context.Context, dgst digest.Digest) error
-}
-
-// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService.
-type BlobDescriptorServiceFactory interface {
- BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
-}
-
-// ReadSeekCloser is the primary reader type for blob data, combining
-// io.ReadSeeker with io.Closer.
-type ReadSeekCloser interface {
- io.ReadSeeker
- io.Closer
-}
-
-// BlobProvider describes operations for getting blob data.
-type BlobProvider interface {
- // Get returns the entire blob identified by digest along with the descriptor.
- Get(ctx context.Context, dgst digest.Digest) ([]byte, error)
-
- // Open provides a ReadSeekCloser to the blob identified by the provided
- // descriptor. If the blob is not known to the service, an error will be
- // returned.
- Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error)
-}
-
-// BlobServer can serve blobs via http.
-type BlobServer interface {
- // ServeBlob attempts to serve the blob, identified by dgst, via http. The
- // service may decide to redirect the client elsewhere or serve the data
- // directly.
- //
- // This handler only issues successful responses, such as 2xx or 3xx,
- // meaning it serves data or issues a redirect. If the blob is not
- // available, an error will be returned and the caller may still issue a
- // response.
- //
- // The implementation may serve the same blob from a different digest
- // domain. The appropriate headers will be set for the blob, unless they
- // have already been set by the caller.
- ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error
-}
-
-// BlobIngester ingests blob data.
-type BlobIngester interface {
- // Put inserts the content p into the blob service, returning a descriptor
- // or an error.
- Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error)
-
- // Create allocates a new blob writer to add a blob to this service. The
- // returned handle can be written to and later resumed using an opaque
- // identifier. With this approach, one can Close and Resume a BlobWriter
- // multiple times until the BlobWriter is committed or cancelled.
- Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
-
- // Resume attempts to resume a write to a blob, identified by an id.
- Resume(ctx context.Context, id string) (BlobWriter, error)
-}
-
-// BlobCreateOption is a general extensible function argument for blob creation
-// methods. A BlobIngester may choose to honor any or none of the given
-// BlobCreateOptions, which can be specific to the implementation of the
-// BlobIngester receiving them.
-// TODO (brianbland): unify this with ManifestServiceOption in the future
-type BlobCreateOption interface {
- Apply(interface{}) error
-}
-
-// CreateOptions is a collection of blob creation modifiers relevant to general
-// blob storage intended to be configured by the BlobCreateOption.Apply method.
-type CreateOptions struct {
- Mount struct {
- ShouldMount bool
- From reference.Canonical
- // Stat allows to pass precalculated descriptor to link and return.
- // Blob access check will be skipped if set.
- Stat *Descriptor
- }
-}
-
-// BlobWriter provides a handle for inserting data into a blob store.
-// Instances should be obtained from BlobWriteService.Writer and
-// BlobWriteService.Resume. If supported by the store, a writer can be
-// recovered with the id.
-type BlobWriter interface {
- io.WriteCloser
- io.ReaderFrom
-
- // Size returns the number of bytes written to this blob.
- Size() int64
-
- // ID returns the identifier for this writer. The ID can be used with the
- // Blob service to later resume the write.
- ID() string
-
- // StartedAt returns the time this blob write was started.
- StartedAt() time.Time
-
- // Commit completes the blob writer process. The content is verified
- // against the provided provisional descriptor, which may result in an
- // error. Depending on the implementation, written data may be validated
- // against the provisional descriptor fields. If MediaType is not present,
- // the implementation may reject the commit or assign "application/octet-
- // stream" to the blob. The returned descriptor may have a different
- // digest depending on the blob store, referred to as the canonical
- // descriptor.
- Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error)
-
- // Cancel ends the blob write without storing any data and frees any
- // associated resources. Any data written thus far will be lost. Cancel
- // implementations should allow multiple calls even after a commit that
- // result in a no-op. This allows use of Cancel in a defer statement,
- // increasing the assurance that it is correctly called.
- Cancel(ctx context.Context) error
-}
-
-// BlobService combines the operations to access, read and write blobs. This
-// can be used to describe remote blob services.
-type BlobService interface {
- BlobStatter
- BlobProvider
- BlobIngester
-}
-
-// BlobStore represent the entire suite of blob related operations. Such an
-// implementation can access, read, write, delete and serve blobs.
-type BlobStore interface {
- BlobService
- BlobServer
- BlobDeleter
-}
diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go
deleted file mode 100644
index bdd8cb708e5..00000000000
--- a/vendor/github.com/docker/distribution/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// Package distribution will define the interfaces for the components of
-// docker distribution. The goal is to allow users to reliably package, ship
-// and store content related to docker images.
-//
-// This is currently a work in progress. More details are available in the
-// README.md.
-package distribution
diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go
deleted file mode 100644
index 8e0b788d6c5..00000000000
--- a/vendor/github.com/docker/distribution/errors.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package distribution
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/opencontainers/go-digest"
-)
-
-// ErrAccessDenied is returned when an access to a requested resource is
-// denied.
-var ErrAccessDenied = errors.New("access denied")
-
-// ErrManifestNotModified is returned when a conditional manifest GetByTag
-// returns nil due to the client indicating it has the latest version
-var ErrManifestNotModified = errors.New("manifest not modified")
-
-// ErrUnsupported is returned when an unimplemented or unsupported action is
-// performed
-var ErrUnsupported = errors.New("operation unsupported")
-
-// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1
-// manifest but the registry is configured to reject it
-var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported")
-
-// ErrTagUnknown is returned if the given tag is not known by the tag service
-type ErrTagUnknown struct {
- Tag string
-}
-
-func (err ErrTagUnknown) Error() string {
- return fmt.Sprintf("unknown tag=%s", err.Tag)
-}
-
-// ErrRepositoryUnknown is returned if the named repository is not known by
-// the registry.
-type ErrRepositoryUnknown struct {
- Name string
-}
-
-func (err ErrRepositoryUnknown) Error() string {
- return fmt.Sprintf("unknown repository name=%s", err.Name)
-}
-
-// ErrRepositoryNameInvalid should be used to denote an invalid repository
-// name. Reason may set, indicating the cause of invalidity.
-type ErrRepositoryNameInvalid struct {
- Name string
- Reason error
-}
-
-func (err ErrRepositoryNameInvalid) Error() string {
- return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason)
-}
-
-// ErrManifestUnknown is returned if the manifest is not known by the
-// registry.
-type ErrManifestUnknown struct {
- Name string
- Tag string
-}
-
-func (err ErrManifestUnknown) Error() string {
- return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag)
-}
-
-// ErrManifestUnknownRevision is returned when a manifest cannot be found by
-// revision within a repository.
-type ErrManifestUnknownRevision struct {
- Name string
- Revision digest.Digest
-}
-
-func (err ErrManifestUnknownRevision) Error() string {
- return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision)
-}
-
-// ErrManifestUnverified is returned when the registry is unable to verify
-// the manifest.
-type ErrManifestUnverified struct{}
-
-func (ErrManifestUnverified) Error() string {
- return "unverified manifest"
-}
-
-// ErrManifestVerification provides a type to collect errors encountered
-// during manifest verification. Currently, it accepts errors of all types,
-// but it may be narrowed to those involving manifest verification.
-type ErrManifestVerification []error
-
-func (errs ErrManifestVerification) Error() string {
- var parts []string
- for _, err := range errs {
- parts = append(parts, err.Error())
- }
-
- return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ","))
-}
-
-// ErrManifestBlobUnknown returned when a referenced blob cannot be found.
-type ErrManifestBlobUnknown struct {
- Digest digest.Digest
-}
-
-func (err ErrManifestBlobUnknown) Error() string {
- return fmt.Sprintf("unknown blob %v on manifest", err.Digest)
-}
-
-// ErrManifestNameInvalid should be used to denote an invalid manifest
-// name. Reason may set, indicating the cause of invalidity.
-type ErrManifestNameInvalid struct {
- Name string
- Reason error
-}
-
-func (err ErrManifestNameInvalid) Error() string {
- return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason)
-}
diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go
deleted file mode 100644
index 1816baea1d6..00000000000
--- a/vendor/github.com/docker/distribution/manifests.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package distribution
-
-import (
- "context"
- "fmt"
- "mime"
-
- "github.com/opencontainers/go-digest"
-)
-
-// Manifest represents a registry object specifying a set of
-// references and an optional target
-type Manifest interface {
- // References returns a list of objects which make up this manifest.
- // A reference is anything which can be represented by a
- // distribution.Descriptor. These can consist of layers, resources or other
- // manifests.
- //
- // While no particular order is required, implementations should return
- // them from highest to lowest priority. For example, one might want to
- // return the base layer before the top layer.
- References() []Descriptor
-
- // Payload provides the serialized format of the manifest, in addition to
- // the media type.
- Payload() (mediaType string, payload []byte, err error)
-}
-
-// ManifestBuilder creates a manifest allowing one to include dependencies.
-// Instances can be obtained from a version-specific manifest package. Manifest
-// specific data is passed into the function which creates the builder.
-type ManifestBuilder interface {
- // Build creates the manifest from his builder.
- Build(ctx context.Context) (Manifest, error)
-
- // References returns a list of objects which have been added to this
- // builder. The dependencies are returned in the order they were added,
- // which should be from base to head.
- References() []Descriptor
-
- // AppendReference includes the given object in the manifest after any
- // existing dependencies. If the add fails, such as when adding an
- // unsupported dependency, an error may be returned.
- //
- // The destination of the reference is dependent on the manifest type and
- // the dependency type.
- AppendReference(dependency Describable) error
-}
-
-// ManifestService describes operations on image manifests.
-type ManifestService interface {
- // Exists returns true if the manifest exists.
- Exists(ctx context.Context, dgst digest.Digest) (bool, error)
-
- // Get retrieves the manifest specified by the given digest
- Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error)
-
- // Put creates or updates the given manifest returning the manifest digest
- Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error)
-
- // Delete removes the manifest specified by the given digest. Deleting
- // a manifest that doesn't exist will return ErrManifestNotFound
- Delete(ctx context.Context, dgst digest.Digest) error
-}
-
-// ManifestEnumerator enables iterating over manifests
-type ManifestEnumerator interface {
- // Enumerate calls ingester for each manifest.
- Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
-}
-
-// Describable is an interface for descriptors
-type Describable interface {
- Descriptor() Descriptor
-}
-
-// ManifestMediaTypes returns the supported media types for manifests.
-func ManifestMediaTypes() (mediaTypes []string) {
- for t := range mappings {
- if t != "" {
- mediaTypes = append(mediaTypes, t)
- }
- }
- return
-}
-
-// UnmarshalFunc implements manifest unmarshalling a given MediaType
-type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
-
-var mappings = make(map[string]UnmarshalFunc, 0)
-
-// UnmarshalManifest looks up manifest unmarshal functions based on
-// MediaType
-func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) {
- // Need to look up by the actual media type, not the raw contents of
- // the header. Strip semicolons and anything following them.
- var mediaType string
- if ctHeader != "" {
- var err error
- mediaType, _, err = mime.ParseMediaType(ctHeader)
- if err != nil {
- return nil, Descriptor{}, err
- }
- }
-
- unmarshalFunc, ok := mappings[mediaType]
- if !ok {
- unmarshalFunc, ok = mappings[""]
- if !ok {
- return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType)
- }
- }
-
- return unmarshalFunc(p)
-}
-
-// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This
-// should be called from specific
-func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error {
- if _, ok := mappings[mediaType]; ok {
- return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType)
- }
- mappings[mediaType] = u
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/metrics/prometheus.go b/vendor/github.com/docker/distribution/metrics/prometheus.go
deleted file mode 100644
index b5a5321448a..00000000000
--- a/vendor/github.com/docker/distribution/metrics/prometheus.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package metrics
-
-import "github.com/docker/go-metrics"
-
-const (
- // NamespacePrefix is the namespace of prometheus metrics
- NamespacePrefix = "registry"
-)
-
-var (
- // StorageNamespace is the prometheus namespace of blob/cache related operations
- StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil)
-)
diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go
index 2d71fc5e9ff..b3dfb7a6d7e 100644
--- a/vendor/github.com/docker/distribution/reference/normalize.go
+++ b/vendor/github.com/docker/distribution/reference/normalize.go
@@ -56,6 +56,35 @@ func ParseNormalizedNamed(s string) (Named, error) {
return named, nil
}
+// ParseDockerRef normalizes the image reference following the docker convention. This is added
+// mainly for backward compatibility.
+// The reference returned can only be either tagged or digested. For reference contains both tag
+// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@
+// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as
+// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
+func ParseDockerRef(ref string) (Named, error) {
+ named, err := ParseNormalizedNamed(ref)
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := named.(NamedTagged); ok {
+ if canonical, ok := named.(Canonical); ok {
+ // The reference is both tagged and digested, only
+ // return digested.
+ newNamed, err := WithName(canonical.Name())
+ if err != nil {
+ return nil, err
+ }
+ newCanonical, err := WithDigest(newNamed, canonical.Digest())
+ if err != nil {
+ return nil, err
+ }
+ return newCanonical, nil
+ }
+ }
+ return TagNameOnly(named), nil
+}
+
// splitDockerDomain splits a repository name to domain and remotename string.
// If no valid domain is found, the default domain is used. Repository name
// needs to be already validated before.
diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go
index 2f66cca87a3..8c0c23b2fe1 100644
--- a/vendor/github.com/docker/distribution/reference/reference.go
+++ b/vendor/github.com/docker/distribution/reference/reference.go
@@ -205,7 +205,7 @@ func Parse(s string) (Reference, error) {
var repo repository
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
- if nameMatch != nil && len(nameMatch) == 3 {
+ if len(nameMatch) == 3 {
repo.domain = nameMatch[1]
repo.path = nameMatch[2]
} else {
diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go
deleted file mode 100644
index 6c32109894d..00000000000
--- a/vendor/github.com/docker/distribution/registry.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package distribution
-
-import (
- "context"
-
- "github.com/docker/distribution/reference"
-)
-
-// Scope defines the set of items that match a namespace.
-type Scope interface {
- // Contains returns true if the name belongs to the namespace.
- Contains(name string) bool
-}
-
-type fullScope struct{}
-
-func (f fullScope) Contains(string) bool {
- return true
-}
-
-// GlobalScope represents the full namespace scope which contains
-// all other scopes.
-var GlobalScope = Scope(fullScope{})
-
-// Namespace represents a collection of repositories, addressable by name.
-// Generally, a namespace is backed by a set of one or more services,
-// providing facilities such as registry access, trust, and indexing.
-type Namespace interface {
- // Scope describes the names that can be used with this Namespace. The
- // global namespace will have a scope that matches all names. The scope
- // effectively provides an identity for the namespace.
- Scope() Scope
-
- // Repository should return a reference to the named repository. The
- // registry may or may not have the repository but should always return a
- // reference.
- Repository(ctx context.Context, name reference.Named) (Repository, error)
-
- // Repositories fills 'repos' with a lexicographically sorted catalog of repositories
- // up to the size of 'repos' and returns the value 'n' for the number of entries
- // which were filled. 'last' contains an offset in the catalog, and 'err' will be
- // set to io.EOF if there are no more entries to obtain.
- Repositories(ctx context.Context, repos []string, last string) (n int, err error)
-
- // Blobs returns a blob enumerator to access all blobs
- Blobs() BlobEnumerator
-
- // BlobStatter returns a BlobStatter to control
- BlobStatter() BlobStatter
-}
-
-// RepositoryEnumerator describes an operation to enumerate repositories
-type RepositoryEnumerator interface {
- Enumerate(ctx context.Context, ingester func(string) error) error
-}
-
-// RepositoryRemover removes given repository
-type RepositoryRemover interface {
- Remove(ctx context.Context, name reference.Named) error
-}
-
-// ManifestServiceOption is a function argument for Manifest Service methods
-type ManifestServiceOption interface {
- Apply(ManifestService) error
-}
-
-// WithTag allows a tag to be passed into Put
-func WithTag(tag string) ManifestServiceOption {
- return WithTagOption{tag}
-}
-
-// WithTagOption holds a tag
-type WithTagOption struct{ Tag string }
-
-// Apply conforms to the ManifestServiceOption interface
-func (o WithTagOption) Apply(m ManifestService) error {
- // no implementation
- return nil
-}
-
-// WithManifestMediaTypes lists the media types the client wishes
-// the server to provide.
-func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption {
- return WithManifestMediaTypesOption{mediaTypes}
-}
-
-// WithManifestMediaTypesOption holds a list of accepted media types
-type WithManifestMediaTypesOption struct{ MediaTypes []string }
-
-// Apply conforms to the ManifestServiceOption interface
-func (o WithManifestMediaTypesOption) Apply(m ManifestService) error {
- // no implementation
- return nil
-}
-
-// Repository is a named collection of manifests and layers.
-type Repository interface {
- // Named returns the name of the repository.
- Named() reference.Named
-
- // Manifests returns a reference to this repository's manifest service.
- // with the supplied options applied.
- Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error)
-
- // Blobs returns a reference to this repository's blob service.
- Blobs(ctx context.Context) BlobStore
-
- // TODO(stevvooe): The above BlobStore return can probably be relaxed to
- // be a BlobService for use with clients. This will allow such
- // implementations to avoid implementing ServeBlob.
-
- // Tags returns a reference to this repositories tag service
- Tags(ctx context.Context) TagService
-}
-
-// TODO(stevvooe): Must add close methods to all these. May want to change the
-// way instances are created to better reflect internal dependency
-// relationships.
diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go
index 6d9bb4b62af..4c35b879afd 100644
--- a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go
+++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go
@@ -207,11 +207,11 @@ func (errs Errors) MarshalJSON() ([]byte, error) {
for _, daErr := range errs {
var err Error
- switch daErr.(type) {
+ switch daErr := daErr.(type) {
case ErrorCode:
- err = daErr.(ErrorCode).WithDetail(nil)
+ err = daErr.WithDetail(nil)
case Error:
- err = daErr.(Error)
+ err = daErr
default:
err = ErrorCodeUnknown.WithDetail(daErr)
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go
index 1337bdb1276..3c3ec989330 100644
--- a/vendor/github.com/docker/distribution/registry/api/v2/urls.go
+++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go
@@ -252,15 +252,3 @@ func appendValuesURL(u *url.URL, values ...url.Values) *url.URL {
u.RawQuery = merged.Encode()
return u
}
-
-// appendValues appends the parameters to the url. Panics if the string is not
-// a url.
-func appendValues(u string, values ...url.Values) string {
- up, err := url.Parse(u)
-
- if err != nil {
- panic(err) // should never happen
- }
-
- return appendValuesURL(up, values...).String()
-}
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
index 6e3f1ccc410..fe238210cd7 100644
--- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
+++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
@@ -117,8 +117,8 @@ func init() {
var t octetType
isCtl := c <= 31 || c == 127
isChar := 0 <= c && c <= 127
- isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
- if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
+ isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
+ if strings.ContainsRune(" \t\r\n", rune(c)) {
t |= isSpace
}
if isChar && !isCtl && !isSeparator {
diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go
deleted file mode 100644
index 695bf852f16..00000000000
--- a/vendor/github.com/docker/distribution/registry/client/blob_writer.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package client
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "time"
-
- "github.com/docker/distribution"
-)
-
-type httpBlobUpload struct {
- statter distribution.BlobStatter
- client *http.Client
-
- uuid string
- startedAt time.Time
-
- location string // always the last value of the location header.
- offset int64
- closed bool
-}
-
-func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) {
- panic("Not implemented")
-}
-
-func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error {
- if resp.StatusCode == http.StatusNotFound {
- return distribution.ErrBlobUploadUnknown
- }
- return HandleErrorResponse(resp)
-}
-
-func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) {
- req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r))
- if err != nil {
- return 0, err
- }
- defer req.Body.Close()
-
- resp, err := hbu.client.Do(req)
- if err != nil {
- return 0, err
- }
-
- if !SuccessStatus(resp.StatusCode) {
- return 0, hbu.handleErrorResponse(resp)
- }
-
- hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
- hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
- if err != nil {
- return 0, err
- }
- rng := resp.Header.Get("Range")
- var start, end int64
- if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
- return 0, err
- } else if n != 2 || end < start {
- return 0, fmt.Errorf("bad range format: %s", rng)
- }
-
- return (end - start + 1), nil
-
-}
-
-func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) {
- req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p))
- if err != nil {
- return 0, err
- }
- req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1)))
- req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p)))
- req.Header.Set("Content-Type", "application/octet-stream")
-
- resp, err := hbu.client.Do(req)
- if err != nil {
- return 0, err
- }
-
- if !SuccessStatus(resp.StatusCode) {
- return 0, hbu.handleErrorResponse(resp)
- }
-
- hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
- hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
- if err != nil {
- return 0, err
- }
- rng := resp.Header.Get("Range")
- var start, end int
- if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
- return 0, err
- } else if n != 2 || end < start {
- return 0, fmt.Errorf("bad range format: %s", rng)
- }
-
- return (end - start + 1), nil
-
-}
-
-func (hbu *httpBlobUpload) Size() int64 {
- return hbu.offset
-}
-
-func (hbu *httpBlobUpload) ID() string {
- return hbu.uuid
-}
-
-func (hbu *httpBlobUpload) StartedAt() time.Time {
- return hbu.startedAt
-}
-
-func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
- // TODO(dmcgowan): Check if already finished, if so just fetch
- req, err := http.NewRequest("PUT", hbu.location, nil)
- if err != nil {
- return distribution.Descriptor{}, err
- }
-
- values := req.URL.Query()
- values.Set("digest", desc.Digest.String())
- req.URL.RawQuery = values.Encode()
-
- resp, err := hbu.client.Do(req)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- defer resp.Body.Close()
-
- if !SuccessStatus(resp.StatusCode) {
- return distribution.Descriptor{}, hbu.handleErrorResponse(resp)
- }
-
- return hbu.statter.Stat(ctx, desc.Digest)
-}
-
-func (hbu *httpBlobUpload) Cancel(ctx context.Context) error {
- req, err := http.NewRequest("DELETE", hbu.location, nil)
- if err != nil {
- return err
- }
- resp, err := hbu.client.Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
-
- if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) {
- return nil
- }
- return hbu.handleErrorResponse(resp)
-}
-
-func (hbu *httpBlobUpload) Close() error {
- hbu.closed = true
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go
deleted file mode 100644
index aa442e65406..00000000000
--- a/vendor/github.com/docker/distribution/registry/client/repository.go
+++ /dev/null
@@ -1,867 +0,0 @@
-package client
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "github.com/docker/distribution"
- "github.com/docker/distribution/reference"
- "github.com/docker/distribution/registry/api/v2"
- "github.com/docker/distribution/registry/client/transport"
- "github.com/docker/distribution/registry/storage/cache"
- "github.com/docker/distribution/registry/storage/cache/memory"
- "github.com/opencontainers/go-digest"
-)
-
-// Registry provides an interface for calling Repositories, which returns a catalog of repositories.
-type Registry interface {
- Repositories(ctx context.Context, repos []string, last string) (n int, err error)
-}
-
-// checkHTTPRedirect is a callback that can manipulate redirected HTTP
-// requests. It is used to preserve Accept and Range headers.
-func checkHTTPRedirect(req *http.Request, via []*http.Request) error {
- if len(via) >= 10 {
- return errors.New("stopped after 10 redirects")
- }
-
- if len(via) > 0 {
- for headerName, headerVals := range via[0].Header {
- if headerName != "Accept" && headerName != "Range" {
- continue
- }
- for _, val := range headerVals {
- // Don't add to redirected request if redirected
- // request already has a header with the same
- // name and value.
- hasValue := false
- for _, existingVal := range req.Header[headerName] {
- if existingVal == val {
- hasValue = true
- break
- }
- }
- if !hasValue {
- req.Header.Add(headerName, val)
- }
- }
- }
- }
-
- return nil
-}
-
-// NewRegistry creates a registry namespace which can be used to get a listing of repositories
-func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) {
- ub, err := v2.NewURLBuilderFromString(baseURL, false)
- if err != nil {
- return nil, err
- }
-
- client := &http.Client{
- Transport: transport,
- Timeout: 1 * time.Minute,
- CheckRedirect: checkHTTPRedirect,
- }
-
- return ®istry{
- client: client,
- ub: ub,
- }, nil
-}
-
-type registry struct {
- client *http.Client
- ub *v2.URLBuilder
-}
-
-// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size
-// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there
-// are no more entries
-func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) {
- var numFilled int
- var returnErr error
-
- values := buildCatalogValues(len(entries), last)
- u, err := r.ub.BuildCatalogURL(values)
- if err != nil {
- return 0, err
- }
-
- resp, err := r.client.Get(u)
- if err != nil {
- return 0, err
- }
- defer resp.Body.Close()
-
- if SuccessStatus(resp.StatusCode) {
- var ctlg struct {
- Repositories []string `json:"repositories"`
- }
- decoder := json.NewDecoder(resp.Body)
-
- if err := decoder.Decode(&ctlg); err != nil {
- return 0, err
- }
-
- for cnt := range ctlg.Repositories {
- entries[cnt] = ctlg.Repositories[cnt]
- }
- numFilled = len(ctlg.Repositories)
-
- link := resp.Header.Get("Link")
- if link == "" {
- returnErr = io.EOF
- }
- } else {
- return 0, HandleErrorResponse(resp)
- }
-
- return numFilled, returnErr
-}
-
-// NewRepository creates a new Repository for the given repository name and base URL.
-func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
- ub, err := v2.NewURLBuilderFromString(baseURL, false)
- if err != nil {
- return nil, err
- }
-
- client := &http.Client{
- Transport: transport,
- CheckRedirect: checkHTTPRedirect,
- // TODO(dmcgowan): create cookie jar
- }
-
- return &repository{
- client: client,
- ub: ub,
- name: name,
- }, nil
-}
-
-type repository struct {
- client *http.Client
- ub *v2.URLBuilder
- name reference.Named
-}
-
-func (r *repository) Named() reference.Named {
- return r.name
-}
-
-func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
- statter := &blobStatter{
- name: r.name,
- ub: r.ub,
- client: r.client,
- }
- return &blobs{
- name: r.name,
- ub: r.ub,
- client: r.client,
- statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter),
- }
-}
-
-func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
- // todo(richardscothern): options should be sent over the wire
- return &manifests{
- name: r.name,
- ub: r.ub,
- client: r.client,
- etags: make(map[string]string),
- }, nil
-}
-
-func (r *repository) Tags(ctx context.Context) distribution.TagService {
- return &tags{
- client: r.client,
- ub: r.ub,
- name: r.Named(),
- }
-}
-
-// tags implements remote tagging operations.
-type tags struct {
- client *http.Client
- ub *v2.URLBuilder
- name reference.Named
-}
-
-// All returns all tags
-func (t *tags) All(ctx context.Context) ([]string, error) {
- var tags []string
-
- listURLStr, err := t.ub.BuildTagsURL(t.name)
- if err != nil {
- return tags, err
- }
-
- listURL, err := url.Parse(listURLStr)
- if err != nil {
- return tags, err
- }
-
- for {
- resp, err := t.client.Get(listURL.String())
- if err != nil {
- return tags, err
- }
- defer resp.Body.Close()
-
- if SuccessStatus(resp.StatusCode) {
- b, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return tags, err
- }
-
- tagsResponse := struct {
- Tags []string `json:"tags"`
- }{}
- if err := json.Unmarshal(b, &tagsResponse); err != nil {
- return tags, err
- }
- tags = append(tags, tagsResponse.Tags...)
- if link := resp.Header.Get("Link"); link != "" {
- linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
- linkURL, err := url.Parse(linkURLStr)
- if err != nil {
- return tags, err
- }
-
- listURL = listURL.ResolveReference(linkURL)
- } else {
- return tags, nil
- }
- } else {
- return tags, HandleErrorResponse(resp)
- }
- }
-}
-
-func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) {
- desc := distribution.Descriptor{}
- headers := response.Header
-
- ctHeader := headers.Get("Content-Type")
- if ctHeader == "" {
- return distribution.Descriptor{}, errors.New("missing or empty Content-Type header")
- }
- desc.MediaType = ctHeader
-
- digestHeader := headers.Get("Docker-Content-Digest")
- if digestHeader == "" {
- bytes, err := ioutil.ReadAll(response.Body)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- return desc, nil
- }
-
- dgst, err := digest.Parse(digestHeader)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- desc.Digest = dgst
-
- lengthHeader := headers.Get("Content-Length")
- if lengthHeader == "" {
- return distribution.Descriptor{}, errors.New("missing or empty Content-Length header")
- }
- length, err := strconv.ParseInt(lengthHeader, 10, 64)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- desc.Size = length
-
- return desc, nil
-
-}
-
-// Get issues a HEAD request for a Manifest against its named endpoint in order
-// to construct a descriptor for the tag. If the registry doesn't support HEADing
-// a manifest, fallback to GET.
-func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
- ref, err := reference.WithTag(t.name, tag)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- u, err := t.ub.BuildManifestURL(ref)
- if err != nil {
- return distribution.Descriptor{}, err
- }
-
- newRequest := func(method string) (*http.Response, error) {
- req, err := http.NewRequest(method, u, nil)
- if err != nil {
- return nil, err
- }
-
- for _, t := range distribution.ManifestMediaTypes() {
- req.Header.Add("Accept", t)
- }
- resp, err := t.client.Do(req)
- return resp, err
- }
-
- resp, err := newRequest("HEAD")
- if err != nil {
- return distribution.Descriptor{}, err
- }
- defer resp.Body.Close()
-
- switch {
- case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0:
- // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers
- return descriptorFromResponse(resp)
- default:
- // if the response is an error - there will be no body to decode.
- // Issue a GET request:
- // - for data from a server that does not handle HEAD
- // - to get error details in case of a failure
- resp, err = newRequest("GET")
- if err != nil {
- return distribution.Descriptor{}, err
- }
- defer resp.Body.Close()
-
- if resp.StatusCode >= 200 && resp.StatusCode < 400 {
- return descriptorFromResponse(resp)
- }
- return distribution.Descriptor{}, HandleErrorResponse(resp)
- }
-}
-
-func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) {
- panic("not implemented")
-}
-
-func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
- panic("not implemented")
-}
-
-func (t *tags) Untag(ctx context.Context, tag string) error {
- panic("not implemented")
-}
-
-type manifests struct {
- name reference.Named
- ub *v2.URLBuilder
- client *http.Client
- etags map[string]string
-}
-
-func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
- ref, err := reference.WithDigest(ms.name, dgst)
- if err != nil {
- return false, err
- }
- u, err := ms.ub.BuildManifestURL(ref)
- if err != nil {
- return false, err
- }
-
- resp, err := ms.client.Head(u)
- if err != nil {
- return false, err
- }
-
- if SuccessStatus(resp.StatusCode) {
- return true, nil
- } else if resp.StatusCode == http.StatusNotFound {
- return false, nil
- }
- return false, HandleErrorResponse(resp)
-}
-
-// AddEtagToTag allows a client to supply an eTag to Get which will be
-// used for a conditional HTTP request. If the eTag matches, a nil manifest
-// and ErrManifestNotModified error will be returned. etag is automatically
-// quoted when added to this map.
-func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption {
- return etagOption{tag, etag}
-}
-
-type etagOption struct{ tag, etag string }
-
-func (o etagOption) Apply(ms distribution.ManifestService) error {
- if ms, ok := ms.(*manifests); ok {
- ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag)
- return nil
- }
- return fmt.Errorf("etag options is a client-only option")
-}
-
-// ReturnContentDigest allows a client to set a the content digest on
-// a successful request from the 'Docker-Content-Digest' header. This
-// returned digest is represents the digest which the registry uses
-// to refer to the content and can be used to delete the content.
-func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption {
- return contentDigestOption{dgst}
-}
-
-type contentDigestOption struct{ digest *digest.Digest }
-
-func (o contentDigestOption) Apply(ms distribution.ManifestService) error {
- return nil
-}
-
-func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
- var (
- digestOrTag string
- ref reference.Named
- err error
- contentDgst *digest.Digest
- mediaTypes []string
- )
-
- for _, option := range options {
- switch opt := option.(type) {
- case distribution.WithTagOption:
- digestOrTag = opt.Tag
- ref, err = reference.WithTag(ms.name, opt.Tag)
- if err != nil {
- return nil, err
- }
- case contentDigestOption:
- contentDgst = opt.digest
- case distribution.WithManifestMediaTypesOption:
- mediaTypes = opt.MediaTypes
- default:
- err := option.Apply(ms)
- if err != nil {
- return nil, err
- }
- }
- }
-
- if digestOrTag == "" {
- digestOrTag = dgst.String()
- ref, err = reference.WithDigest(ms.name, dgst)
- if err != nil {
- return nil, err
- }
- }
-
- if len(mediaTypes) == 0 {
- mediaTypes = distribution.ManifestMediaTypes()
- }
-
- u, err := ms.ub.BuildManifestURL(ref)
- if err != nil {
- return nil, err
- }
-
- req, err := http.NewRequest("GET", u, nil)
- if err != nil {
- return nil, err
- }
-
- for _, t := range mediaTypes {
- req.Header.Add("Accept", t)
- }
-
- if _, ok := ms.etags[digestOrTag]; ok {
- req.Header.Set("If-None-Match", ms.etags[digestOrTag])
- }
-
- resp, err := ms.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- if resp.StatusCode == http.StatusNotModified {
- return nil, distribution.ErrManifestNotModified
- } else if SuccessStatus(resp.StatusCode) {
- if contentDgst != nil {
- dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest"))
- if err == nil {
- *contentDgst = dgst
- }
- }
- mt := resp.Header.Get("Content-Type")
- body, err := ioutil.ReadAll(resp.Body)
-
- if err != nil {
- return nil, err
- }
- m, _, err := distribution.UnmarshalManifest(mt, body)
- if err != nil {
- return nil, err
- }
- return m, nil
- }
- return nil, HandleErrorResponse(resp)
-}
-
-// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the
-// tag name in order to build the correct upload URL.
-func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
- ref := ms.name
- var tagged bool
-
- for _, option := range options {
- if opt, ok := option.(distribution.WithTagOption); ok {
- var err error
- ref, err = reference.WithTag(ref, opt.Tag)
- if err != nil {
- return "", err
- }
- tagged = true
- } else {
- err := option.Apply(ms)
- if err != nil {
- return "", err
- }
- }
- }
- mediaType, p, err := m.Payload()
- if err != nil {
- return "", err
- }
-
- if !tagged {
- // generate a canonical digest and Put by digest
- _, d, err := distribution.UnmarshalManifest(mediaType, p)
- if err != nil {
- return "", err
- }
- ref, err = reference.WithDigest(ref, d.Digest)
- if err != nil {
- return "", err
- }
- }
-
- manifestURL, err := ms.ub.BuildManifestURL(ref)
- if err != nil {
- return "", err
- }
-
- putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p))
- if err != nil {
- return "", err
- }
-
- putRequest.Header.Set("Content-Type", mediaType)
-
- resp, err := ms.client.Do(putRequest)
- if err != nil {
- return "", err
- }
- defer resp.Body.Close()
-
- if SuccessStatus(resp.StatusCode) {
- dgstHeader := resp.Header.Get("Docker-Content-Digest")
- dgst, err := digest.Parse(dgstHeader)
- if err != nil {
- return "", err
- }
-
- return dgst, nil
- }
-
- return "", HandleErrorResponse(resp)
-}
-
-func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error {
- ref, err := reference.WithDigest(ms.name, dgst)
- if err != nil {
- return err
- }
- u, err := ms.ub.BuildManifestURL(ref)
- if err != nil {
- return err
- }
- req, err := http.NewRequest("DELETE", u, nil)
- if err != nil {
- return err
- }
-
- resp, err := ms.client.Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
-
- if SuccessStatus(resp.StatusCode) {
- return nil
- }
- return HandleErrorResponse(resp)
-}
-
-// todo(richardscothern): Restore interface and implementation with merge of #1050
-/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) {
- panic("not supported")
-}*/
-
-type blobs struct {
- name reference.Named
- ub *v2.URLBuilder
- client *http.Client
-
- statter distribution.BlobDescriptorService
- distribution.BlobDeleter
-}
-
-func sanitizeLocation(location, base string) (string, error) {
- baseURL, err := url.Parse(base)
- if err != nil {
- return "", err
- }
-
- locationURL, err := url.Parse(location)
- if err != nil {
- return "", err
- }
-
- return baseURL.ResolveReference(locationURL).String(), nil
-}
-
-func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
- return bs.statter.Stat(ctx, dgst)
-
-}
-
-func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
- reader, err := bs.Open(ctx, dgst)
- if err != nil {
- return nil, err
- }
- defer reader.Close()
-
- return ioutil.ReadAll(reader)
-}
-
-func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
- ref, err := reference.WithDigest(bs.name, dgst)
- if err != nil {
- return nil, err
- }
- blobURL, err := bs.ub.BuildBlobURL(ref)
- if err != nil {
- return nil, err
- }
-
- return transport.NewHTTPReadSeeker(bs.client, blobURL,
- func(resp *http.Response) error {
- if resp.StatusCode == http.StatusNotFound {
- return distribution.ErrBlobUnknown
- }
- return HandleErrorResponse(resp)
- }), nil
-}
-
-func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
- panic("not implemented")
-}
-
-func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
- writer, err := bs.Create(ctx)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- dgstr := digest.Canonical.Digester()
- n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash()))
- if err != nil {
- return distribution.Descriptor{}, err
- }
- if n < int64(len(p)) {
- return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p))
- }
-
- desc := distribution.Descriptor{
- MediaType: mediaType,
- Size: int64(len(p)),
- Digest: dgstr.Digest(),
- }
-
- return writer.Commit(ctx, desc)
-}
-
-type optionFunc func(interface{}) error
-
-func (f optionFunc) Apply(v interface{}) error {
- return f(v)
-}
-
-// WithMountFrom returns a BlobCreateOption which designates that the blob should be
-// mounted from the given canonical reference.
-func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
- return optionFunc(func(v interface{}) error {
- opts, ok := v.(*distribution.CreateOptions)
- if !ok {
- return fmt.Errorf("unexpected options type: %T", v)
- }
-
- opts.Mount.ShouldMount = true
- opts.Mount.From = ref
-
- return nil
- })
-}
-
-func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
- var opts distribution.CreateOptions
-
- for _, option := range options {
- err := option.Apply(&opts)
- if err != nil {
- return nil, err
- }
- }
-
- var values []url.Values
-
- if opts.Mount.ShouldMount {
- values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}})
- }
-
- u, err := bs.ub.BuildBlobUploadURL(bs.name, values...)
- if err != nil {
- return nil, err
- }
-
- resp, err := bs.client.Post(u, "", nil)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- switch resp.StatusCode {
- case http.StatusCreated:
- desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest())
- if err != nil {
- return nil, err
- }
- return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
- case http.StatusAccepted:
- // TODO(dmcgowan): Check for invalid UUID
- uuid := resp.Header.Get("Docker-Upload-UUID")
- location, err := sanitizeLocation(resp.Header.Get("Location"), u)
- if err != nil {
- return nil, err
- }
-
- return &httpBlobUpload{
- statter: bs.statter,
- client: bs.client,
- uuid: uuid,
- startedAt: time.Now(),
- location: location,
- }, nil
- default:
- return nil, HandleErrorResponse(resp)
- }
-}
-
-func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
- panic("not implemented")
-}
-
-func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error {
- return bs.statter.Clear(ctx, dgst)
-}
-
-type blobStatter struct {
- name reference.Named
- ub *v2.URLBuilder
- client *http.Client
-}
-
-func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
- ref, err := reference.WithDigest(bs.name, dgst)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- u, err := bs.ub.BuildBlobURL(ref)
- if err != nil {
- return distribution.Descriptor{}, err
- }
-
- resp, err := bs.client.Head(u)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- defer resp.Body.Close()
-
- if SuccessStatus(resp.StatusCode) {
- lengthHeader := resp.Header.Get("Content-Length")
- if lengthHeader == "" {
- return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u)
- }
-
- length, err := strconv.ParseInt(lengthHeader, 10, 64)
- if err != nil {
- return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err)
- }
-
- return distribution.Descriptor{
- MediaType: resp.Header.Get("Content-Type"),
- Size: length,
- Digest: dgst,
- }, nil
- } else if resp.StatusCode == http.StatusNotFound {
- return distribution.Descriptor{}, distribution.ErrBlobUnknown
- }
- return distribution.Descriptor{}, HandleErrorResponse(resp)
-}
-
-func buildCatalogValues(maxEntries int, last string) url.Values {
- values := url.Values{}
-
- if maxEntries > 0 {
- values.Add("n", strconv.Itoa(maxEntries))
- }
-
- if last != "" {
- values.Add("last", last)
- }
-
- return values
-}
-
-func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
- ref, err := reference.WithDigest(bs.name, dgst)
- if err != nil {
- return err
- }
- blobURL, err := bs.ub.BuildBlobURL(ref)
- if err != nil {
- return err
- }
-
- req, err := http.NewRequest("DELETE", blobURL, nil)
- if err != nil {
- return err
- }
-
- resp, err := bs.client.Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
-
- if SuccessStatus(resp.StatusCode) {
- return nil
- }
- return HandleErrorResponse(resp)
-}
-
-func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
deleted file mode 100644
index 1d0b382fb51..00000000000
--- a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
+++ /dev/null
@@ -1,250 +0,0 @@
-package transport
-
-import (
- "errors"
- "fmt"
- "io"
- "net/http"
- "regexp"
- "strconv"
-)
-
-var (
- contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
-
- // ErrWrongCodeForByteRange is returned if the client sends a request
- // with a Range header but the server returns a 2xx or 3xx code other
- // than 206 Partial Content.
- ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request")
-)
-
-// ReadSeekCloser combines io.ReadSeeker with io.Closer.
-type ReadSeekCloser interface {
- io.ReadSeeker
- io.Closer
-}
-
-// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET
-// request. When seeking and starting a read from a non-zero offset
-// the a "Range" header will be added which sets the offset.
-// TODO(dmcgowan): Move this into a separate utility package
-func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser {
- return &httpReadSeeker{
- client: client,
- url: url,
- errorHandler: errorHandler,
- }
-}
-
-type httpReadSeeker struct {
- client *http.Client
- url string
-
- // errorHandler creates an error from an unsuccessful HTTP response.
- // This allows the error to be created with the HTTP response body
- // without leaking the body through a returned error.
- errorHandler func(*http.Response) error
-
- size int64
-
- // rc is the remote read closer.
- rc io.ReadCloser
- // readerOffset tracks the offset as of the last read.
- readerOffset int64
- // seekOffset allows Seek to override the offset. Seek changes
- // seekOffset instead of changing readOffset directly so that
- // connection resets can be delayed and possibly avoided if the
- // seek is undone (i.e. seeking to the end and then back to the
- // beginning).
- seekOffset int64
- err error
-}
-
-func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
- if hrs.err != nil {
- return 0, hrs.err
- }
-
- // If we sought to a different position, we need to reset the
- // connection. This logic is here instead of Seek so that if
- // a seek is undone before the next read, the connection doesn't
- // need to be closed and reopened. A common example of this is
- // seeking to the end to determine the length, and then seeking
- // back to the original position.
- if hrs.readerOffset != hrs.seekOffset {
- hrs.reset()
- }
-
- hrs.readerOffset = hrs.seekOffset
-
- rd, err := hrs.reader()
- if err != nil {
- return 0, err
- }
-
- n, err = rd.Read(p)
- hrs.seekOffset += int64(n)
- hrs.readerOffset += int64(n)
-
- return n, err
-}
-
-func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
- if hrs.err != nil {
- return 0, hrs.err
- }
-
- lastReaderOffset := hrs.readerOffset
-
- if whence == io.SeekStart && hrs.rc == nil {
- // If no request has been made yet, and we are seeking to an
- // absolute position, set the read offset as well to avoid an
- // unnecessary request.
- hrs.readerOffset = offset
- }
-
- _, err := hrs.reader()
- if err != nil {
- hrs.readerOffset = lastReaderOffset
- return 0, err
- }
-
- newOffset := hrs.seekOffset
-
- switch whence {
- case io.SeekCurrent:
- newOffset += offset
- case io.SeekEnd:
- if hrs.size < 0 {
- return 0, errors.New("content length not known")
- }
- newOffset = hrs.size + offset
- case io.SeekStart:
- newOffset = offset
- }
-
- if newOffset < 0 {
- err = errors.New("cannot seek to negative position")
- } else {
- hrs.seekOffset = newOffset
- }
-
- return hrs.seekOffset, err
-}
-
-func (hrs *httpReadSeeker) Close() error {
- if hrs.err != nil {
- return hrs.err
- }
-
- // close and release reader chain
- if hrs.rc != nil {
- hrs.rc.Close()
- }
-
- hrs.rc = nil
-
- hrs.err = errors.New("httpLayer: closed")
-
- return nil
-}
-
-func (hrs *httpReadSeeker) reset() {
- if hrs.err != nil {
- return
- }
- if hrs.rc != nil {
- hrs.rc.Close()
- hrs.rc = nil
- }
-}
-
-func (hrs *httpReadSeeker) reader() (io.Reader, error) {
- if hrs.err != nil {
- return nil, hrs.err
- }
-
- if hrs.rc != nil {
- return hrs.rc, nil
- }
-
- req, err := http.NewRequest("GET", hrs.url, nil)
- if err != nil {
- return nil, err
- }
-
- if hrs.readerOffset > 0 {
- // If we are at different offset, issue a range request from there.
- req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset))
- // TODO: get context in here
- // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
- }
-
- req.Header.Add("Accept-Encoding", "identity")
- resp, err := hrs.client.Do(req)
- if err != nil {
- return nil, err
- }
-
- // Normally would use client.SuccessStatus, but that would be a cyclic
- // import
- if resp.StatusCode >= 200 && resp.StatusCode <= 399 {
- if hrs.readerOffset > 0 {
- if resp.StatusCode != http.StatusPartialContent {
- return nil, ErrWrongCodeForByteRange
- }
-
- contentRange := resp.Header.Get("Content-Range")
- if contentRange == "" {
- return nil, errors.New("no Content-Range header found in HTTP 206 response")
- }
-
- submatches := contentRangeRegexp.FindStringSubmatch(contentRange)
- if len(submatches) < 4 {
- return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange)
- }
-
- startByte, err := strconv.ParseUint(submatches[1], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange)
- }
-
- if startByte != uint64(hrs.readerOffset) {
- return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset)
- }
-
- endByte, err := strconv.ParseUint(submatches[2], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange)
- }
-
- if submatches[3] == "*" {
- hrs.size = -1
- } else {
- size, err := strconv.ParseUint(submatches[3], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange)
- }
-
- if endByte+1 != size {
- return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange)
- }
-
- hrs.size = int64(size)
- }
- } else if resp.StatusCode == http.StatusOK {
- hrs.size = resp.ContentLength
- } else {
- hrs.size = -1
- }
- hrs.rc = resp.Body
- } else {
- defer resp.Body.Close()
- if hrs.errorHandler != nil {
- return nil, hrs.errorHandler(resp)
- }
- return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status)
- }
-
- return hrs.rc, nil
-}
diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go
deleted file mode 100644
index 30e45fab0f7..00000000000
--- a/vendor/github.com/docker/distribution/registry/client/transport/transport.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package transport
-
-import (
- "io"
- "net/http"
- "sync"
-)
-
-// RequestModifier represents an object which will do an inplace
-// modification of an HTTP request.
-type RequestModifier interface {
- ModifyRequest(*http.Request) error
-}
-
-type headerModifier http.Header
-
-// NewHeaderRequestModifier returns a new RequestModifier which will
-// add the given headers to a request.
-func NewHeaderRequestModifier(header http.Header) RequestModifier {
- return headerModifier(header)
-}
-
-func (h headerModifier) ModifyRequest(req *http.Request) error {
- for k, s := range http.Header(h) {
- req.Header[k] = append(req.Header[k], s...)
- }
-
- return nil
-}
-
-// NewTransport creates a new transport which will apply modifiers to
-// the request on a RoundTrip call.
-func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper {
- return &transport{
- Modifiers: modifiers,
- Base: base,
- }
-}
-
-// transport is an http.RoundTripper that makes HTTP requests after
-// copying and modifying the request
-type transport struct {
- Modifiers []RequestModifier
- Base http.RoundTripper
-
- mu sync.Mutex // guards modReq
- modReq map[*http.Request]*http.Request // original -> modified
-}
-
-// RoundTrip authorizes and authenticates the request with an
-// access token. If no token exists or token is expired,
-// tries to refresh/fetch a new token.
-func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
- req2 := cloneRequest(req)
- for _, modifier := range t.Modifiers {
- if err := modifier.ModifyRequest(req2); err != nil {
- return nil, err
- }
- }
-
- t.setModReq(req, req2)
- res, err := t.base().RoundTrip(req2)
- if err != nil {
- t.setModReq(req, nil)
- return nil, err
- }
- res.Body = &onEOFReader{
- rc: res.Body,
- fn: func() { t.setModReq(req, nil) },
- }
- return res, nil
-}
-
-// CancelRequest cancels an in-flight request by closing its connection.
-func (t *transport) CancelRequest(req *http.Request) {
- type canceler interface {
- CancelRequest(*http.Request)
- }
- if cr, ok := t.base().(canceler); ok {
- t.mu.Lock()
- modReq := t.modReq[req]
- delete(t.modReq, req)
- t.mu.Unlock()
- cr.CancelRequest(modReq)
- }
-}
-
-func (t *transport) base() http.RoundTripper {
- if t.Base != nil {
- return t.Base
- }
- return http.DefaultTransport
-}
-
-func (t *transport) setModReq(orig, mod *http.Request) {
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.modReq == nil {
- t.modReq = make(map[*http.Request]*http.Request)
- }
- if mod == nil {
- delete(t.modReq, orig)
- } else {
- t.modReq[orig] = mod
- }
-}
-
-// cloneRequest returns a clone of the provided *http.Request.
-// The clone is a shallow copy of the struct and its Header map.
-func cloneRequest(r *http.Request) *http.Request {
- // shallow copy of the struct
- r2 := new(http.Request)
- *r2 = *r
- // deep copy of the Header
- r2.Header = make(http.Header, len(r.Header))
- for k, s := range r.Header {
- r2.Header[k] = append([]string(nil), s...)
- }
-
- return r2
-}
-
-type onEOFReader struct {
- rc io.ReadCloser
- fn func()
-}
-
-func (r *onEOFReader) Read(p []byte) (n int, err error) {
- n, err = r.rc.Read(p)
- if err == io.EOF {
- r.runFunc()
- }
- return
-}
-
-func (r *onEOFReader) Close() error {
- err := r.rc.Close()
- r.runFunc()
- return err
-}
-
-func (r *onEOFReader) runFunc() {
- if fn := r.fn; fn != nil {
- fn()
- r.fn = nil
- }
-}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go
deleted file mode 100644
index 10a3909197c..00000000000
--- a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Package cache provides facilities to speed up access to the storage
-// backend.
-package cache
-
-import (
- "fmt"
-
- "github.com/docker/distribution"
-)
-
-// BlobDescriptorCacheProvider provides repository scoped
-// BlobDescriptorService cache instances and a global descriptor cache.
-type BlobDescriptorCacheProvider interface {
- distribution.BlobDescriptorService
-
- RepositoryScoped(repo string) (distribution.BlobDescriptorService, error)
-}
-
-// ValidateDescriptor provides a helper function to ensure that caches have
-// common criteria for admitting descriptors.
-func ValidateDescriptor(desc distribution.Descriptor) error {
- if err := desc.Digest.Validate(); err != nil {
- return err
- }
-
- if desc.Size < 0 {
- return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size)
- }
-
- if desc.MediaType == "" {
- return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc)
- }
-
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
deleted file mode 100644
index ac4c452117d..00000000000
--- a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package cache
-
-import (
- "context"
-
- "github.com/docker/distribution"
- prometheus "github.com/docker/distribution/metrics"
- "github.com/opencontainers/go-digest"
-)
-
-// Metrics is used to hold metric counters
-// related to the number of times a cache was
-// hit or missed.
-type Metrics struct {
- Requests uint64
- Hits uint64
- Misses uint64
-}
-
-// Logger can be provided on the MetricsTracker to log errors.
-//
-// Usually, this is just a proxy to dcontext.GetLogger.
-type Logger interface {
- Errorf(format string, args ...interface{})
-}
-
-// MetricsTracker represents a metric tracker
-// which simply counts the number of hits and misses.
-type MetricsTracker interface {
- Hit()
- Miss()
- Metrics() Metrics
- Logger(context.Context) Logger
-}
-
-type cachedBlobStatter struct {
- cache distribution.BlobDescriptorService
- backend distribution.BlobDescriptorService
- tracker MetricsTracker
-}
-
-var (
- // cacheCount is the number of total cache request received/hits/misses
- cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type")
-)
-
-// NewCachedBlobStatter creates a new statter which prefers a cache and
-// falls back to a backend.
-func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService {
- return &cachedBlobStatter{
- cache: cache,
- backend: backend,
- }
-}
-
-// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and
-// falls back to a backend. Hits and misses will send to the tracker.
-func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter {
- return &cachedBlobStatter{
- cache: cache,
- backend: backend,
- tracker: tracker,
- }
-}
-
-func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
- cacheCount.WithValues("Request").Inc(1)
- desc, err := cbds.cache.Stat(ctx, dgst)
- if err != nil {
- if err != distribution.ErrBlobUnknown {
- logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err)
- }
-
- goto fallback
- }
- cacheCount.WithValues("Hit").Inc(1)
- if cbds.tracker != nil {
- cbds.tracker.Hit()
- }
- return desc, nil
-fallback:
- cacheCount.WithValues("Miss").Inc(1)
- if cbds.tracker != nil {
- cbds.tracker.Miss()
- }
- desc, err = cbds.backend.Stat(ctx, dgst)
- if err != nil {
- return desc, err
- }
-
- if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
- logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err)
- }
-
- return desc, err
-
-}
-
-func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
- err := cbds.cache.Clear(ctx, dgst)
- if err != nil {
- return err
- }
-
- err = cbds.backend.Clear(ctx, dgst)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
- if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
- logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err)
- }
- return nil
-}
-
-func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) {
- if tracker == nil {
- return
- }
-
- logger := tracker.Logger(ctx)
- if logger == nil {
- return
- }
- logger.Errorf(format, args...)
-}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
deleted file mode 100644
index 42d94d9bde6..00000000000
--- a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package memory
-
-import (
- "context"
- "sync"
-
- "github.com/docker/distribution"
- "github.com/docker/distribution/reference"
- "github.com/docker/distribution/registry/storage/cache"
- "github.com/opencontainers/go-digest"
-)
-
-type inMemoryBlobDescriptorCacheProvider struct {
- global *mapBlobDescriptorCache
- repositories map[string]*mapBlobDescriptorCache
- mu sync.RWMutex
-}
-
-// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for
-// storing blob descriptor data.
-func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider {
- return &inMemoryBlobDescriptorCacheProvider{
- global: newMapBlobDescriptorCache(),
- repositories: make(map[string]*mapBlobDescriptorCache),
- }
-}
-
-func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) {
- if _, err := reference.ParseNormalizedNamed(repo); err != nil {
- return nil, err
- }
-
- imbdcp.mu.RLock()
- defer imbdcp.mu.RUnlock()
-
- return &repositoryScopedInMemoryBlobDescriptorCache{
- repo: repo,
- parent: imbdcp,
- repository: imbdcp.repositories[repo],
- }, nil
-}
-
-func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
- return imbdcp.global.Stat(ctx, dgst)
-}
-
-func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error {
- return imbdcp.global.Clear(ctx, dgst)
-}
-
-func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
- _, err := imbdcp.Stat(ctx, dgst)
- if err == distribution.ErrBlobUnknown {
-
- if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest {
- // if the digests differ, set the other canonical mapping
- if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil {
- return err
- }
- }
-
- // unknown, just set it
- return imbdcp.global.SetDescriptor(ctx, dgst, desc)
- }
-
- // we already know it, do nothing
- return err
-}
-
-// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped
-// repository cache. Instances are not thread-safe but the delegated
-// operations are.
-type repositoryScopedInMemoryBlobDescriptorCache struct {
- repo string
- parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map
- repository *mapBlobDescriptorCache
-}
-
-func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
- rsimbdcp.parent.mu.Lock()
- repo := rsimbdcp.repository
- rsimbdcp.parent.mu.Unlock()
-
- if repo == nil {
- return distribution.Descriptor{}, distribution.ErrBlobUnknown
- }
-
- return repo.Stat(ctx, dgst)
-}
-
-func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
- rsimbdcp.parent.mu.Lock()
- repo := rsimbdcp.repository
- rsimbdcp.parent.mu.Unlock()
-
- if repo == nil {
- return distribution.ErrBlobUnknown
- }
-
- return repo.Clear(ctx, dgst)
-}
-
-func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
- rsimbdcp.parent.mu.Lock()
- repo := rsimbdcp.repository
- if repo == nil {
- // allocate map since we are setting it now.
- var ok bool
- // have to read back value since we may have allocated elsewhere.
- repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo]
- if !ok {
- repo = newMapBlobDescriptorCache()
- rsimbdcp.parent.repositories[rsimbdcp.repo] = repo
- }
- rsimbdcp.repository = repo
- }
- rsimbdcp.parent.mu.Unlock()
-
- if err := repo.SetDescriptor(ctx, dgst, desc); err != nil {
- return err
- }
-
- return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc)
-}
-
-// mapBlobDescriptorCache provides a simple map-based implementation of the
-// descriptor cache.
-type mapBlobDescriptorCache struct {
- descriptors map[digest.Digest]distribution.Descriptor
- mu sync.RWMutex
-}
-
-var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{}
-
-func newMapBlobDescriptorCache() *mapBlobDescriptorCache {
- return &mapBlobDescriptorCache{
- descriptors: make(map[digest.Digest]distribution.Descriptor),
- }
-}
-
-func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
- if err := dgst.Validate(); err != nil {
- return distribution.Descriptor{}, err
- }
-
- mbdc.mu.RLock()
- defer mbdc.mu.RUnlock()
-
- desc, ok := mbdc.descriptors[dgst]
- if !ok {
- return distribution.Descriptor{}, distribution.ErrBlobUnknown
- }
-
- return desc, nil
-}
-
-func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
- mbdc.mu.Lock()
- defer mbdc.mu.Unlock()
-
- delete(mbdc.descriptors, dgst)
- return nil
-}
-
-func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
- if err := dgst.Validate(); err != nil {
- return err
- }
-
- if err := cache.ValidateDescriptor(desc); err != nil {
- return err
- }
-
- mbdc.mu.Lock()
- defer mbdc.mu.Unlock()
-
- mbdc.descriptors[dgst] = desc
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go
deleted file mode 100644
index f22df2b850e..00000000000
--- a/vendor/github.com/docker/distribution/tags.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package distribution
-
-import (
- "context"
-)
-
-// TagService provides access to information about tagged objects.
-type TagService interface {
- // Get retrieves the descriptor identified by the tag. Some
- // implementations may differentiate between "trusted" tags and
- // "untrusted" tags. If a tag is "untrusted", the mapping will be returned
- // as an ErrTagUntrusted error, with the target descriptor.
- Get(ctx context.Context, tag string) (Descriptor, error)
-
- // Tag associates the tag with the provided descriptor, updating the
- // current association, if needed.
- Tag(ctx context.Context, tag string, desc Descriptor) error
-
- // Untag removes the given tag association
- Untag(ctx context.Context, tag string) error
-
- // All returns the set of tags managed by this tag service
- All(ctx context.Context) ([]string, error)
-
- // Lookup returns the set of tags referencing the given digest.
- Lookup(ctx context.Context, digest Descriptor) ([]string, error)
-}
diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf
deleted file mode 100644
index a249caf269d..00000000000
--- a/vendor/github.com/docker/distribution/vendor.conf
+++ /dev/null
@@ -1,51 +0,0 @@
-github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b
-github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052
-github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4
-github.com/aws/aws-sdk-go f831d5a0822a1ad72420ab18c6269bca1ddaf490
-github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a
-github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
-github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274
-github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
-github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
-github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2
-github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04
-github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab
-github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21
-github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257
-github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c
-github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
-github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b
-github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604
-github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
-github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
-github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f
-github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3
-github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
-github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39
-github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef
-github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6
-github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564
-github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
-github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
-github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
-github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd
-github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064
-github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842
-github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985
-github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e
-github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128
-github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6
-golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b
-golang.org/x/net 4876518f9e71663000c348837735820161a42df7
-golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf
-golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
-google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54
-google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19
-google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2
-google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994
-gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673
-gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b
-gopkg.in/yaml.v2 v2.2.1
-rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git
-github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
-github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
index da8b594e7f8..91d9d4bbae9 100644
--- a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
@@ -169,8 +169,8 @@ func Erase(helper Helper, reader io.Reader) error {
return helper.Delete(serverURL)
}
-//List returns all the serverURLs of keys in
-//the OS store as a list of strings
+// List returns all the serverURLs of keys in
+// the OS store as a list of strings
func List(helper Helper, writer io.Writer) error {
accts, err := helper.List()
if err != nil {
@@ -179,8 +179,8 @@ func List(helper Helper, writer io.Writer) error {
return json.NewEncoder(writer).Encode(accts)
}
-//PrintVersion outputs the current version.
+// PrintVersion outputs the current version.
func PrintVersion(writer io.Writer) error {
- fmt.Fprintln(writer, Version)
+ fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version)
return nil
}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
index 185e367961a..84377c26309 100644
--- a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
@@ -1,4 +1,16 @@
package credentials
-// Version holds a string describing the current version
-const Version = "0.6.4"
+var (
+ // Name is filled at linking time
+ Name = ""
+
+ // Package is filled at linking time
+ Package = "github.com/docker/docker-credential-helpers"
+
+ // Version holds the complete version number. Filled in at linking time.
+ Version = "v0.0.0+unknown"
+
+ // Revision is filled with the VCS (e.g. git) revision being used to build
+ // the program at linking time.
+ Revision = ""
+)
diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go
index 504b0c90d7b..19fc63d6589 100644
--- a/vendor/github.com/docker/docker/api/common_unix.go
+++ b/vendor/github.com/docker/docker/api/common_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package api // import "github.com/docker/docker/api"
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
index bada4a8e3c8..0756ff1bb5e 100644
--- a/vendor/github.com/docker/docker/api/swagger.yaml
+++ b/vendor/github.com/docker/docker/api/swagger.yaml
@@ -1891,23 +1891,52 @@ definitions:
BuildCache:
type: "object"
+ description: |
+ BuildCache contains information about a build cache record.
properties:
ID:
type: "string"
+ description: |
+ Unique ID of the build cache record.
+ example: "ndlpt0hhvkqcdfkputsk4cq9c"
Parent:
+ description: |
+ ID of the parent build cache record.
type: "string"
+ example: "hw53o5aio51xtltp5xjp8v7fx"
Type:
type: "string"
+ description: |
+ Cache record type.
+ example: "regular"
+ # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84
+ enum:
+ - "internal"
+ - "frontend"
+ - "source.local"
+ - "source.git.checkout"
+ - "exec.cachemount"
+ - "regular"
Description:
type: "string"
+ description: |
+ Description of the build-step that produced the build cache.
+ example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache"
InUse:
type: "boolean"
+ description: |
+ Indicates if the build cache is in use.
+ example: false
Shared:
type: "boolean"
+ description: |
+ Indicates if the build cache is shared.
+ example: true
Size:
description: |
Amount of disk space used by the build cache (in bytes).
type: "integer"
+ example: 51
CreatedAt:
description: |
Date and time at which the build cache was created in
@@ -1925,6 +1954,7 @@ definitions:
example: "2017-08-09T07:09:37.632105588Z"
UsageCount:
type: "integer"
+ example: 26
ImageID:
type: "object"
@@ -3347,7 +3377,7 @@ definitions:
Limits:
description: "Define resources limits."
$ref: "#/definitions/Limit"
- Reservation:
+ Reservations:
description: "Define resources reservation."
$ref: "#/definitions/ResourceObject"
RestartPolicy:
@@ -5415,6 +5445,28 @@ paths:
`/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`.
type: "string"
pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$"
+ - name: "platform"
+ in: "query"
+ description: |
+ Platform in the format `os[/arch[/variant]]` used for image lookup.
+
+ When specified, the daemon checks if the requested image is present
+ in the local image cache with the given OS and Architecture, and
+ otherwise returns a `404` status.
+
+ If the option is not set, the host's native OS and Architecture are
+ used to look up the image in the image cache. However, if no platform
+ is passed and the given image does exist in the local image cache,
+ but its OS or architecture does not match, the container is created
+ with the available image, and a warning is added to the `Warnings`
+ field in the response, for example;
+
+ WARNING: The requested image's platform (linux/arm64/v8) does not
+ match the detected host platform (linux/amd64) and no
+ specific platform was requested
+
+ type: "string"
+ default: ""
- name: "body"
in: "body"
description: "Container to create"
@@ -5755,7 +5807,6 @@ paths:
property1: "string"
property2: "string"
IpcMode: ""
- LxcConf: []
Memory: 0
MemorySwap: 0
MemoryReservation: 0
@@ -8607,12 +8658,20 @@ paths:
if `tty` was specified as part of creating and starting the exec instance.
operationId: "ExecResize"
responses:
- 201:
+ 200:
description: "No error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
404:
description: "No such exec instance"
schema:
$ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
index cf6fdf44026..24c4fa8d900 100644
--- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
+++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package container // import "github.com/docker/docker/api/types/container"
diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go
index 21edf1fa1fc..9b2b2eaeb8a 100644
--- a/vendor/github.com/docker/docker/client/client.go
+++ b/vendor/github.com/docker/docker/client/client.go
@@ -135,9 +135,6 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) {
}
}
- if _, ok := c.client.Transport.(http.RoundTripper); !ok {
- return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", c.client.Transport)
- }
if c.scheme == "" {
c.scheme = "http"
diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go
index 9d0f0dcbf0b..5846f888fea 100644
--- a/vendor/github.com/docker/docker/client/client_unix.go
+++ b/vendor/github.com/docker/docker/client/client_unix.go
@@ -1,3 +1,4 @@
+//go:build linux || freebsd || openbsd || netbsd || darwin || solaris || illumos || dragonfly
// +build linux freebsd openbsd netbsd darwin solaris illumos dragonfly
package client // import "github.com/docker/docker/client"
diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go
index b1d5fea5bd5..c5079ee539e 100644
--- a/vendor/github.com/docker/docker/client/container_create.go
+++ b/vendor/github.com/docker/docker/client/container_create.go
@@ -4,8 +4,8 @@ import (
"context"
"encoding/json"
"net/url"
+ "path"
- "github.com/containerd/containerd/platforms"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/versions"
@@ -16,7 +16,6 @@ type configWrapper struct {
*container.Config
HostConfig *container.HostConfig
NetworkingConfig *network.NetworkingConfig
- Platform *specs.Platform
}
// ContainerCreate creates a new container based in the given configuration.
@@ -38,8 +37,8 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config
}
query := url.Values{}
- if platform != nil {
- query.Set("platform", platforms.Format(*platform))
+ if p := formatPlatform(platform); p != "" {
+ query.Set("platform", p)
}
if containerName != "" {
@@ -61,3 +60,15 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config
err = json.NewDecoder(serverResp.body).Decode(&response)
return response, err
}
+
+// formatPlatform returns a formatted string representing platform (e.g. linux/arm/v7).
+//
+// Similar to containerd's platforms.Format(), but does allow components to be
+// omitted (e.g. pass "architecture" only, without "os":
+// https://github.com/containerd/containerd/blob/v1.5.2/platforms/platforms.go#L243-L263
+func formatPlatform(platform *specs.Platform) string {
+ if platform == nil {
+ return ""
+ }
+ return path.Join(platform.OS, platform.Architecture, platform.Variant)
+}
diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go
index 813eac2c9e0..7f54b1dd806 100644
--- a/vendor/github.com/docker/docker/client/request.go
+++ b/vendor/github.com/docker/docker/client/request.go
@@ -150,10 +150,8 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp
if err.Timeout() {
return serverResp, ErrorConnectionFailed(cli.host)
}
- if !err.Temporary() {
- if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
- return serverResp, ErrorConnectionFailed(cli.host)
- }
+ if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
+ return serverResp, ErrorConnectionFailed(cli.host)
}
}
@@ -242,10 +240,8 @@ func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request
req.Header.Set(k, v)
}
- if headers != nil {
- for k, v := range headers {
- req.Header[k] = v
- }
+ for k, v := range headers {
+ req.Header[k] = v
}
return req
}
diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go
index 07552f1cc1d..5afe486779d 100644
--- a/vendor/github.com/docker/docker/errdefs/http_helpers.go
+++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go
@@ -1,78 +1,11 @@
package errdefs // import "github.com/docker/docker/errdefs"
import (
- "fmt"
"net/http"
- containerderrors "github.com/containerd/containerd/errdefs"
- "github.com/docker/distribution/registry/api/errcode"
"github.com/sirupsen/logrus"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
)
-// GetHTTPErrorStatusCode retrieves status code from error message.
-func GetHTTPErrorStatusCode(err error) int {
- if err == nil {
- logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling")
- return http.StatusInternalServerError
- }
-
- var statusCode int
-
- // Stop right there
- // Are you sure you should be adding a new error class here? Do one of the existing ones work?
-
- // Note that the below functions are already checking the error causal chain for matches.
- switch {
- case IsNotFound(err):
- statusCode = http.StatusNotFound
- case IsInvalidParameter(err):
- statusCode = http.StatusBadRequest
- case IsConflict(err):
- statusCode = http.StatusConflict
- case IsUnauthorized(err):
- statusCode = http.StatusUnauthorized
- case IsUnavailable(err):
- statusCode = http.StatusServiceUnavailable
- case IsForbidden(err):
- statusCode = http.StatusForbidden
- case IsNotModified(err):
- statusCode = http.StatusNotModified
- case IsNotImplemented(err):
- statusCode = http.StatusNotImplemented
- case IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) || IsCancelled(err):
- statusCode = http.StatusInternalServerError
- default:
- statusCode = statusCodeFromGRPCError(err)
- if statusCode != http.StatusInternalServerError {
- return statusCode
- }
- statusCode = statusCodeFromContainerdError(err)
- if statusCode != http.StatusInternalServerError {
- return statusCode
- }
- statusCode = statusCodeFromDistributionError(err)
- if statusCode != http.StatusInternalServerError {
- return statusCode
- }
- if e, ok := err.(causer); ok {
- return GetHTTPErrorStatusCode(e.Cause())
- }
-
- logrus.WithFields(logrus.Fields{
- "module": "api",
- "error_type": fmt.Sprintf("%T", err),
- }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err)
- }
-
- if statusCode == 0 {
- statusCode = http.StatusInternalServerError
- }
-
- return statusCode
-}
-
// FromStatusCode creates an errdef error, based on the provided HTTP status-code
func FromStatusCode(err error, statusCode int) error {
if err == nil {
@@ -100,10 +33,10 @@ func FromStatusCode(err error, statusCode int) error {
err = System(err)
}
default:
- logrus.WithFields(logrus.Fields{
+ logrus.WithError(err).WithFields(logrus.Fields{
"module": "api",
- "status_code": fmt.Sprintf("%d", statusCode),
- }).Debugf("FIXME: Got an status-code for which error does not match any expected type!!!: %d", statusCode)
+ "status_code": statusCode,
+ }).Debug("FIXME: Got an status-code for which error does not match any expected type!!!")
switch {
case statusCode >= 200 && statusCode < 400:
@@ -118,74 +51,3 @@ func FromStatusCode(err error, statusCode int) error {
}
return err
}
-
-// statusCodeFromGRPCError returns status code according to gRPC error
-func statusCodeFromGRPCError(err error) int {
- switch status.Code(err) {
- case codes.InvalidArgument: // code 3
- return http.StatusBadRequest
- case codes.NotFound: // code 5
- return http.StatusNotFound
- case codes.AlreadyExists: // code 6
- return http.StatusConflict
- case codes.PermissionDenied: // code 7
- return http.StatusForbidden
- case codes.FailedPrecondition: // code 9
- return http.StatusBadRequest
- case codes.Unauthenticated: // code 16
- return http.StatusUnauthorized
- case codes.OutOfRange: // code 11
- return http.StatusBadRequest
- case codes.Unimplemented: // code 12
- return http.StatusNotImplemented
- case codes.Unavailable: // code 14
- return http.StatusServiceUnavailable
- default:
- // codes.Canceled(1)
- // codes.Unknown(2)
- // codes.DeadlineExceeded(4)
- // codes.ResourceExhausted(8)
- // codes.Aborted(10)
- // codes.Internal(13)
- // codes.DataLoss(15)
- return http.StatusInternalServerError
- }
-}
-
-// statusCodeFromDistributionError returns status code according to registry errcode
-// code is loosely based on errcode.ServeJSON() in docker/distribution
-func statusCodeFromDistributionError(err error) int {
- switch errs := err.(type) {
- case errcode.Errors:
- if len(errs) < 1 {
- return http.StatusInternalServerError
- }
- if _, ok := errs[0].(errcode.ErrorCoder); ok {
- return statusCodeFromDistributionError(errs[0])
- }
- case errcode.ErrorCoder:
- return errs.ErrorCode().Descriptor().HTTPStatusCode
- }
- return http.StatusInternalServerError
-}
-
-// statusCodeFromContainerdError returns status code for containerd errors when
-// consumed directly (not through gRPC)
-func statusCodeFromContainerdError(err error) int {
- switch {
- case containerderrors.IsInvalidArgument(err):
- return http.StatusBadRequest
- case containerderrors.IsNotFound(err):
- return http.StatusNotFound
- case containerderrors.IsAlreadyExists(err):
- return http.StatusConflict
- case containerderrors.IsFailedPrecondition(err):
- return http.StatusPreconditionFailed
- case containerderrors.IsUnavailable(err):
- return http.StatusServiceUnavailable
- case containerderrors.IsNotImplemented(err):
- return http.StatusNotImplemented
- default:
- return http.StatusInternalServerError
- }
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
index 2a3dc95398e..28ae2769c5a 100644
--- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package archive // import "github.com/docker/docker/pkg/archive"
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
index 0b92bb0f4a3..fea53d3ae21 100644
--- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package archive // import "github.com/docker/docker/pkg/archive"
@@ -51,8 +52,8 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (
// Currently go does not fill in the major/minors
if s.Mode&unix.S_IFBLK != 0 ||
s.Mode&unix.S_IFCHR != 0 {
- hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
- hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
+ hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
+ hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
}
}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
index ba744741cd0..0e4399a43b2 100644
--- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package archive // import "github.com/docker/docker/pkg/archive"
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
index 06217b7161e..54aace970ed 100644
--- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package archive // import "github.com/docker/docker/pkg/archive"
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go
index 57fddac078d..4b9f504d7de 100644
--- a/vendor/github.com/docker/docker/pkg/archive/copy.go
+++ b/vendor/github.com/docker/docker/pkg/archive/copy.go
@@ -354,6 +354,16 @@ func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.Read
return
}
+ // Ignoring GoSec G110. See https://github.com/securego/gosec/pull/433
+ // and https://cure53.de/pentest-report_opa.pdf, which recommends to
+ // replace io.Copy with io.CopyN7. The latter allows to specify the
+ // maximum number of bytes that should be read. By properly defining
+ // the limit, it can be assured that a GZip compression bomb cannot
+ // easily cause a Denial-of-Service.
+ // After reviewing with @tonistiigi and @cpuguy83, this should not
+ // affect us, because here we do not read into memory, hence should
+ // not be vulnerable to this code consuming memory.
+ //nolint:gosec // G110: Potential DoS vulnerability via decompression bomb (gosec)
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
w.CloseWithError(err)
return
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
index 3958364f5ba..2ac7729f4cf 100644
--- a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package archive // import "github.com/docker/docker/pkg/archive"
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
index f58bf227fd3..d0877968617 100644
--- a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
+++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package archive // import "github.com/docker/docker/pkg/archive"
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
index 565396f1c7f..af0c26b614d 100644
--- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
@@ -1,3 +1,4 @@
+//go:build linux || freebsd
// +build linux freebsd
package fileutils // import "github.com/docker/docker/pkg/fileutils"
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
index 67ab9e9b31e..fc48e674c11 100644
--- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package homedir // import "github.com/docker/docker/pkg/homedir"
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
index 441bd727b60..d1732dee52f 100644
--- a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package homedir // import "github.com/docker/docker/pkg/homedir"
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
index e7d25ee4713..ceec0339b56 100644
--- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package idtools // import "github.com/docker/docker/pkg/idtools"
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
index e7c4d63118c..5e24577e2c2 100644
--- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package idtools // import "github.com/docker/docker/pkg/idtools"
diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
index 1e2d4a7a75a..540672af5af 100644
--- a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
+++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package idtools // import "github.com/docker/docker/pkg/idtools"
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
index dc894f91314..4e67ec2f53f 100644
--- a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
+++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package ioutils // import "github.com/docker/docker/pkg/ioutils"
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go
index d5fab96f9d1..84ae1570513 100644
--- a/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go
index dcee3e9f984..186d9d9a11d 100644
--- a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go
+++ b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/docker/pkg/system/lcow.go b/vendor/github.com/docker/docker/pkg/system/lcow.go
index 0f00028fbdd..4599a3f23c3 100644
--- a/vendor/github.com/docker/docker/pkg/system/lcow.go
+++ b/vendor/github.com/docker/docker/pkg/system/lcow.go
@@ -1,3 +1,4 @@
+//go:build windows && !no_lcow
// +build windows,!no_lcow
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go b/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go
index 3d3cf775a70..daadef31d54 100644
--- a/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go
+++ b/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !windows || (windows && no_lcow)
// +build !windows windows,no_lcow
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go
index de5a1c0fb26..654b9f2c9e6 100644
--- a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go
+++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
index 56f44942680..207ee58ee6c 100644
--- a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux && !windows
// +build !linux,!windows
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go
index b132482e038..d27152c0f5b 100644
--- a/vendor/github.com/docker/docker/pkg/system/mknod.go
+++ b/vendor/github.com/docker/docker/pkg/system/mknod.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package system // import "github.com/docker/docker/pkg/system"
@@ -6,12 +7,6 @@ import (
"golang.org/x/sys/unix"
)
-// Mknod creates a filesystem node (file, device special file or named pipe) named path
-// with attributes specified by mode and dev.
-func Mknod(path string, mode uint32, dev int) error {
- return unix.Mknod(path, mode, dev)
-}
-
// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
// and minor number of the newly created device special file.
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go
new file mode 100644
index 00000000000..c890be116f7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go
@@ -0,0 +1,14 @@
+//go:build freebsd
+// +build freebsd
+
+package system // import "github.com/docker/docker/pkg/system"
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Mknod creates a filesystem node (file, device special file or named pipe) named path
+// with attributes specified by mode and dev.
+func Mknod(path string, mode uint32, dev int) error {
+ return unix.Mknod(path, mode, uint64(dev))
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go
new file mode 100644
index 00000000000..4586aad19e6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go
@@ -0,0 +1,14 @@
+//go:build !freebsd && !windows
+// +build !freebsd,!windows
+
+package system // import "github.com/docker/docker/pkg/system"
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Mknod creates a filesystem node (file, device special file or named pipe) named path
+// with attributes specified by mode and dev.
+func Mknod(path string, mode uint32, dev int) error {
+ return unix.Mknod(path, mode, dev)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go
index b0b93196a15..2c85371b5e8 100644
--- a/vendor/github.com/docker/docker/pkg/system/path_unix.go
+++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/docker/docker/pkg/system/process_unix.go
index 79aebb5272a..145689b88af 100644
--- a/vendor/github.com/docker/docker/pkg/system/process_unix.go
+++ b/vendor/github.com/docker/docker/pkg/system/process_unix.go
@@ -1,3 +1,4 @@
+//go:build linux || freebsd || darwin
// +build linux freebsd darwin
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go
index c5d80ebda16..f2d81597c9d 100644
--- a/vendor/github.com/docker/docker/pkg/system/rm.go
+++ b/vendor/github.com/docker/docker/pkg/system/rm.go
@@ -1,3 +1,4 @@
+//go:build !darwin && !windows
// +build !darwin,!windows
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_bsd.go b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go
index ea55c3dbb5e..8e61d820f02 100644
--- a/vendor/github.com/docker/docker/pkg/system/stat_bsd.go
+++ b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go
@@ -1,3 +1,4 @@
+//go:build freebsd || netbsd
// +build freebsd netbsd
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
index 17d5d131a3c..3ac02393f0a 100644
--- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go
+++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
@@ -9,7 +9,7 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
uid: s.Uid,
gid: s.Gid,
// the type is 32bit on mips
- rdev: uint64(s.Rdev), // nolint: unconvert
+ rdev: uint64(s.Rdev), //nolint: unconvert
mtim: s.Mtim}, nil
}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go
index 86bb6dd55e6..a45ffddf750 100644
--- a/vendor/github.com/docker/docker/pkg/system/stat_unix.go
+++ b/vendor/github.com/docker/docker/pkg/system/stat_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
index 905d10f1532..7c90bffaa59 100644
--- a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
@@ -1,3 +1,4 @@
+//go:build linux || freebsd
// +build linux freebsd
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go
index 9912a2babb3..d4a15cbedc3 100644
--- a/vendor/github.com/docker/docker/pkg/system/umask.go
+++ b/vendor/github.com/docker/docker/pkg/system/umask.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go
index 61ba8c474c3..2768750a00b 100644
--- a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go
@@ -1,3 +1,4 @@
+//go:build linux || freebsd
// +build linux freebsd
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
index 095e072e1df..bfed4af0325 100644
--- a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux && !freebsd
// +build !linux,!freebsd
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
index d780a90cd38..b165a5dbfe9 100644
--- a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package system // import "github.com/docker/docker/pkg/system"
diff --git a/vendor/github.com/docker/go-metrics/CONTRIBUTING.md b/vendor/github.com/docker/go-metrics/CONTRIBUTING.md
deleted file mode 100644
index b8a512c3665..00000000000
--- a/vendor/github.com/docker/go-metrics/CONTRIBUTING.md
+++ /dev/null
@@ -1,55 +0,0 @@
-# Contributing
-
-## Sign your work
-
-The sign-off is a simple line at the end of the explanation for the patch. Your
-signature certifies that you wrote the patch or otherwise have the right to pass
-it on as an open-source patch. The rules are pretty simple: if you can certify
-the below (from [developercertificate.org](http://developercertificate.org/)):
-
-```
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-660 York Street, Suite 102,
-San Francisco, CA 94110 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
-```
-
-Then you just add a line to every git commit message:
-
- Signed-off-by: Joe Smith
-
-Use your real name (sorry, no pseudonyms or anonymous contributions.)
-
-If you set your `user.name` and `user.email` git configs, you can sign your
-commit automatically with `git commit -s`.
diff --git a/vendor/github.com/docker/go-metrics/LICENSE b/vendor/github.com/docker/go-metrics/LICENSE
deleted file mode 100644
index 8f3fee627a4..00000000000
--- a/vendor/github.com/docker/go-metrics/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- https://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2013-2016 Docker, Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/docker/go-metrics/LICENSE.docs b/vendor/github.com/docker/go-metrics/LICENSE.docs
deleted file mode 100644
index e26cd4fc8ed..00000000000
--- a/vendor/github.com/docker/go-metrics/LICENSE.docs
+++ /dev/null
@@ -1,425 +0,0 @@
-Attribution-ShareAlike 4.0 International
-
-=======================================================================
-
-Creative Commons Corporation ("Creative Commons") is not a law firm and
-does not provide legal services or legal advice. Distribution of
-Creative Commons public licenses does not create a lawyer-client or
-other relationship. Creative Commons makes its licenses and related
-information available on an "as-is" basis. Creative Commons gives no
-warranties regarding its licenses, any material licensed under their
-terms and conditions, or any related information. Creative Commons
-disclaims all liability for damages resulting from their use to the
-fullest extent possible.
-
-Using Creative Commons Public Licenses
-
-Creative Commons public licenses provide a standard set of terms and
-conditions that creators and other rights holders may use to share
-original works of authorship and other material subject to copyright
-and certain other rights specified in the public license below. The
-following considerations are for informational purposes only, are not
-exhaustive, and do not form part of our licenses.
-
- Considerations for licensors: Our public licenses are
- intended for use by those authorized to give the public
- permission to use material in ways otherwise restricted by
- copyright and certain other rights. Our licenses are
- irrevocable. Licensors should read and understand the terms
- and conditions of the license they choose before applying it.
- Licensors should also secure all rights necessary before
- applying our licenses so that the public can reuse the
- material as expected. Licensors should clearly mark any
- material not subject to the license. This includes other CC-
- licensed material, or material used under an exception or
- limitation to copyright. More considerations for licensors:
- wiki.creativecommons.org/Considerations_for_licensors
-
- Considerations for the public: By using one of our public
- licenses, a licensor grants the public permission to use the
- licensed material under specified terms and conditions. If
- the licensor's permission is not necessary for any reason--for
- example, because of any applicable exception or limitation to
- copyright--then that use is not regulated by the license. Our
- licenses grant only permissions under copyright and certain
- other rights that a licensor has authority to grant. Use of
- the licensed material may still be restricted for other
- reasons, including because others have copyright or other
- rights in the material. A licensor may make special requests,
- such as asking that all changes be marked or described.
- Although not required by our licenses, you are encouraged to
- respect those requests where reasonable. More_considerations
- for the public:
- wiki.creativecommons.org/Considerations_for_licensees
-
-=======================================================================
-
-Creative Commons Attribution-ShareAlike 4.0 International Public
-License
-
-By exercising the Licensed Rights (defined below), You accept and agree
-to be bound by the terms and conditions of this Creative Commons
-Attribution-ShareAlike 4.0 International Public License ("Public
-License"). To the extent this Public License may be interpreted as a
-contract, You are granted the Licensed Rights in consideration of Your
-acceptance of these terms and conditions, and the Licensor grants You
-such rights in consideration of benefits the Licensor receives from
-making the Licensed Material available under these terms and
-conditions.
-
-
-Section 1 -- Definitions.
-
- a. Adapted Material means material subject to Copyright and Similar
- Rights that is derived from or based upon the Licensed Material
- and in which the Licensed Material is translated, altered,
- arranged, transformed, or otherwise modified in a manner requiring
- permission under the Copyright and Similar Rights held by the
- Licensor. For purposes of this Public License, where the Licensed
- Material is a musical work, performance, or sound recording,
- Adapted Material is always produced where the Licensed Material is
- synched in timed relation with a moving image.
-
- b. Adapter's License means the license You apply to Your Copyright
- and Similar Rights in Your contributions to Adapted Material in
- accordance with the terms and conditions of this Public License.
-
- c. BY-SA Compatible License means a license listed at
- creativecommons.org/compatiblelicenses, approved by Creative
- Commons as essentially the equivalent of this Public License.
-
- d. Copyright and Similar Rights means copyright and/or similar rights
- closely related to copyright including, without limitation,
- performance, broadcast, sound recording, and Sui Generis Database
- Rights, without regard to how the rights are labeled or
- categorized. For purposes of this Public License, the rights
- specified in Section 2(b)(1)-(2) are not Copyright and Similar
- Rights.
-
- e. Effective Technological Measures means those measures that, in the
- absence of proper authority, may not be circumvented under laws
- fulfilling obligations under Article 11 of the WIPO Copyright
- Treaty adopted on December 20, 1996, and/or similar international
- agreements.
-
- f. Exceptions and Limitations means fair use, fair dealing, and/or
- any other exception or limitation to Copyright and Similar Rights
- that applies to Your use of the Licensed Material.
-
- g. License Elements means the license attributes listed in the name
- of a Creative Commons Public License. The License Elements of this
- Public License are Attribution and ShareAlike.
-
- h. Licensed Material means the artistic or literary work, database,
- or other material to which the Licensor applied this Public
- License.
-
- i. Licensed Rights means the rights granted to You subject to the
- terms and conditions of this Public License, which are limited to
- all Copyright and Similar Rights that apply to Your use of the
- Licensed Material and that the Licensor has authority to license.
-
- j. Licensor means the individual(s) or entity(ies) granting rights
- under this Public License.
-
- k. Share means to provide material to the public by any means or
- process that requires permission under the Licensed Rights, such
- as reproduction, public display, public performance, distribution,
- dissemination, communication, or importation, and to make material
- available to the public including in ways that members of the
- public may access the material from a place and at a time
- individually chosen by them.
-
- l. Sui Generis Database Rights means rights other than copyright
- resulting from Directive 96/9/EC of the European Parliament and of
- the Council of 11 March 1996 on the legal protection of databases,
- as amended and/or succeeded, as well as other essentially
- equivalent rights anywhere in the world.
-
- m. You means the individual or entity exercising the Licensed Rights
- under this Public License. Your has a corresponding meaning.
-
-
-Section 2 -- Scope.
-
- a. License grant.
-
- 1. Subject to the terms and conditions of this Public License,
- the Licensor hereby grants You a worldwide, royalty-free,
- non-sublicensable, non-exclusive, irrevocable license to
- exercise the Licensed Rights in the Licensed Material to:
-
- a. reproduce and Share the Licensed Material, in whole or
- in part; and
-
- b. produce, reproduce, and Share Adapted Material.
-
- 2. Exceptions and Limitations. For the avoidance of doubt, where
- Exceptions and Limitations apply to Your use, this Public
- License does not apply, and You do not need to comply with
- its terms and conditions.
-
- 3. Term. The term of this Public License is specified in Section
- 6(a).
-
- 4. Media and formats; technical modifications allowed. The
- Licensor authorizes You to exercise the Licensed Rights in
- all media and formats whether now known or hereafter created,
- and to make technical modifications necessary to do so. The
- Licensor waives and/or agrees not to assert any right or
- authority to forbid You from making technical modifications
- necessary to exercise the Licensed Rights, including
- technical modifications necessary to circumvent Effective
- Technological Measures. For purposes of this Public License,
- simply making modifications authorized by this Section 2(a)
- (4) never produces Adapted Material.
-
- 5. Downstream recipients.
-
- a. Offer from the Licensor -- Licensed Material. Every
- recipient of the Licensed Material automatically
- receives an offer from the Licensor to exercise the
- Licensed Rights under the terms and conditions of this
- Public License.
-
- b. Additional offer from the Licensor -- Adapted Material.
- Every recipient of Adapted Material from You
- automatically receives an offer from the Licensor to
- exercise the Licensed Rights in the Adapted Material
- under the conditions of the Adapter's License You apply.
-
- c. No downstream restrictions. You may not offer or impose
- any additional or different terms or conditions on, or
- apply any Effective Technological Measures to, the
- Licensed Material if doing so restricts exercise of the
- Licensed Rights by any recipient of the Licensed
- Material.
-
- 6. No endorsement. Nothing in this Public License constitutes or
- may be construed as permission to assert or imply that You
- are, or that Your use of the Licensed Material is, connected
- with, or sponsored, endorsed, or granted official status by,
- the Licensor or others designated to receive attribution as
- provided in Section 3(a)(1)(A)(i).
-
- b. Other rights.
-
- 1. Moral rights, such as the right of integrity, are not
- licensed under this Public License, nor are publicity,
- privacy, and/or other similar personality rights; however, to
- the extent possible, the Licensor waives and/or agrees not to
- assert any such rights held by the Licensor to the limited
- extent necessary to allow You to exercise the Licensed
- Rights, but not otherwise.
-
- 2. Patent and trademark rights are not licensed under this
- Public License.
-
- 3. To the extent possible, the Licensor waives any right to
- collect royalties from You for the exercise of the Licensed
- Rights, whether directly or through a collecting society
- under any voluntary or waivable statutory or compulsory
- licensing scheme. In all other cases the Licensor expressly
- reserves any right to collect such royalties.
-
-
-Section 3 -- License Conditions.
-
-Your exercise of the Licensed Rights is expressly made subject to the
-following conditions.
-
- a. Attribution.
-
- 1. If You Share the Licensed Material (including in modified
- form), You must:
-
- a. retain the following if it is supplied by the Licensor
- with the Licensed Material:
-
- i. identification of the creator(s) of the Licensed
- Material and any others designated to receive
- attribution, in any reasonable manner requested by
- the Licensor (including by pseudonym if
- designated);
-
- ii. a copyright notice;
-
- iii. a notice that refers to this Public License;
-
- iv. a notice that refers to the disclaimer of
- warranties;
-
- v. a URI or hyperlink to the Licensed Material to the
- extent reasonably practicable;
-
- b. indicate if You modified the Licensed Material and
- retain an indication of any previous modifications; and
-
- c. indicate the Licensed Material is licensed under this
- Public License, and include the text of, or the URI or
- hyperlink to, this Public License.
-
- 2. You may satisfy the conditions in Section 3(a)(1) in any
- reasonable manner based on the medium, means, and context in
- which You Share the Licensed Material. For example, it may be
- reasonable to satisfy the conditions by providing a URI or
- hyperlink to a resource that includes the required
- information.
-
- 3. If requested by the Licensor, You must remove any of the
- information required by Section 3(a)(1)(A) to the extent
- reasonably practicable.
-
- b. ShareAlike.
-
- In addition to the conditions in Section 3(a), if You Share
- Adapted Material You produce, the following conditions also apply.
-
- 1. The Adapter's License You apply must be a Creative Commons
- license with the same License Elements, this version or
- later, or a BY-SA Compatible License.
-
- 2. You must include the text of, or the URI or hyperlink to, the
- Adapter's License You apply. You may satisfy this condition
- in any reasonable manner based on the medium, means, and
- context in which You Share Adapted Material.
-
- 3. You may not offer or impose any additional or different terms
- or conditions on, or apply any Effective Technological
- Measures to, Adapted Material that restrict exercise of the
- rights granted under the Adapter's License You apply.
-
-
-Section 4 -- Sui Generis Database Rights.
-
-Where the Licensed Rights include Sui Generis Database Rights that
-apply to Your use of the Licensed Material:
-
- a. for the avoidance of doubt, Section 2(a)(1) grants You the right
- to extract, reuse, reproduce, and Share all or a substantial
- portion of the contents of the database;
-
- b. if You include all or a substantial portion of the database
- contents in a database in which You have Sui Generis Database
- Rights, then the database in which You have Sui Generis Database
- Rights (but not its individual contents) is Adapted Material,
-
- including for purposes of Section 3(b); and
- c. You must comply with the conditions in Section 3(a) if You Share
- all or a substantial portion of the contents of the database.
-
-For the avoidance of doubt, this Section 4 supplements and does not
-replace Your obligations under this Public License where the Licensed
-Rights include other Copyright and Similar Rights.
-
-
-Section 5 -- Disclaimer of Warranties and Limitation of Liability.
-
- a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
- EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
- AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
- ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
- IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
- WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
- PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
- ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
- KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
- ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
-
- b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
- TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
- NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
- INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
- COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
- USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
- ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
- DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
- IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
-
- c. The disclaimer of warranties and limitation of liability provided
- above shall be interpreted in a manner that, to the extent
- possible, most closely approximates an absolute disclaimer and
- waiver of all liability.
-
-
-Section 6 -- Term and Termination.
-
- a. This Public License applies for the term of the Copyright and
- Similar Rights licensed here. However, if You fail to comply with
- this Public License, then Your rights under this Public License
- terminate automatically.
-
- b. Where Your right to use the Licensed Material has terminated under
- Section 6(a), it reinstates:
-
- 1. automatically as of the date the violation is cured, provided
- it is cured within 30 days of Your discovery of the
- violation; or
-
- 2. upon express reinstatement by the Licensor.
-
- For the avoidance of doubt, this Section 6(b) does not affect any
- right the Licensor may have to seek remedies for Your violations
- of this Public License.
-
- c. For the avoidance of doubt, the Licensor may also offer the
- Licensed Material under separate terms or conditions or stop
- distributing the Licensed Material at any time; however, doing so
- will not terminate this Public License.
-
- d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
- License.
-
-
-Section 7 -- Other Terms and Conditions.
-
- a. The Licensor shall not be bound by any additional or different
- terms or conditions communicated by You unless expressly agreed.
-
- b. Any arrangements, understandings, or agreements regarding the
- Licensed Material not stated herein are separate from and
- independent of the terms and conditions of this Public License.
-
-
-Section 8 -- Interpretation.
-
- a. For the avoidance of doubt, this Public License does not, and
- shall not be interpreted to, reduce, limit, restrict, or impose
- conditions on any use of the Licensed Material that could lawfully
- be made without permission under this Public License.
-
- b. To the extent possible, if any provision of this Public License is
- deemed unenforceable, it shall be automatically reformed to the
- minimum extent necessary to make it enforceable. If the provision
- cannot be reformed, it shall be severed from this Public License
- without affecting the enforceability of the remaining terms and
- conditions.
-
- c. No term or condition of this Public License will be waived and no
- failure to comply consented to unless expressly agreed to by the
- Licensor.
-
- d. Nothing in this Public License constitutes or may be interpreted
- as a limitation upon, or waiver of, any privileges and immunities
- that apply to the Licensor or You, including from the legal
- processes of any jurisdiction or authority.
-
-
-=======================================================================
-
-Creative Commons is not a party to its public licenses.
-Notwithstanding, Creative Commons may elect to apply one of its public
-licenses to material it publishes and in those instances will be
-considered the "Licensor." Except for the limited purpose of indicating
-that material is shared under a Creative Commons public license or as
-otherwise permitted by the Creative Commons policies published at
-creativecommons.org/policies, Creative Commons does not authorize the
-use of the trademark "Creative Commons" or any other trademark or logo
-of Creative Commons without its prior written consent including,
-without limitation, in connection with any unauthorized modifications
-to any of its public licenses or any other arrangements,
-understandings, or agreements concerning use of licensed material. For
-the avoidance of doubt, this paragraph does not form part of the public
-licenses.
-
-Creative Commons may be contacted at creativecommons.org.
diff --git a/vendor/github.com/docker/go-metrics/NOTICE b/vendor/github.com/docker/go-metrics/NOTICE
deleted file mode 100644
index 8915f02773f..00000000000
--- a/vendor/github.com/docker/go-metrics/NOTICE
+++ /dev/null
@@ -1,16 +0,0 @@
-Docker
-Copyright 2012-2015 Docker, Inc.
-
-This product includes software developed at Docker, Inc. (https://www.docker.com).
-
-The following is courtesy of our legal counsel:
-
-
-Use and transfer of Docker may be subject to certain restrictions by the
-United States and other governments.
-It is your responsibility to ensure that your use and/or transfer does not
-violate applicable laws.
-
-For more information, please see https://www.bis.doc.gov
-
-See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/docker/go-metrics/README.md b/vendor/github.com/docker/go-metrics/README.md
deleted file mode 100644
index a9e947cb566..00000000000
--- a/vendor/github.com/docker/go-metrics/README.md
+++ /dev/null
@@ -1,91 +0,0 @@
-# go-metrics [](https://godoc.org/github.com/docker/go-metrics) 
-
-This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects.
-
-## Best Practices
-
-This packages is meant to be used for collecting metrics in Docker projects.
-It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected.
-If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/).
-
-The following are a few Docker specific rules that will help you name and work with metrics in your project.
-
-1. Namespace and Subsystem
-
-This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics.
-
-```go
-ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{
- "version": dockerversion.Version,
- "commit": dockerversion.GitCommit,
-})
-```
-
-In the example above we are creating metrics for the Docker engine's daemon package.
-`engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics.
-
-A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting.
-
-2. Declaring your Metrics
-
-Try to keep all your metric declarations in one file.
-This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created.
-
-3. Use labels instead of multiple metrics
-
-Labels allow you to define one metric such as the time it takes to perform a certain action on an object.
-If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action.
-
-
-```go
-containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action")
-```
-
-The last parameter is the label name or key.
-When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for.
-
-```go
-containerActions.WithValues("create").UpdateSince(start)
-```
-
-4. Always use a unit
-
-The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with.
-For a timer, the standard unit is seconds and a counter's standard unit is a total.
-For gauges you must provide the unit.
-This package provides a standard set of units for use within the Docker projects.
-
-```go
-Nanoseconds Unit = "nanoseconds"
-Seconds Unit = "seconds"
-Bytes Unit = "bytes"
-Total Unit = "total"
-```
-
-If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds.
-
-## Docs
-
-Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics).
-
-## HTTP Metrics
-
-To instrument a http handler, you can wrap the code like this:
-
-```go
-namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"})
-httpMetrics := namespace.NewDefaultHttpMetrics()
-metrics.Register(namespace)
-instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler)
-```
-Note: The `handler` label must be provided when a new namespace is created.
-
-## Additional Metrics
-
-Additional metrics are also defined here that are not available in the prometheus client.
-If you need a custom metrics and it is generic enough to be used by multiple projects, define it here.
-
-
-## Copyright and license
-
-Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/vendor/github.com/docker/go-metrics/counter.go b/vendor/github.com/docker/go-metrics/counter.go
deleted file mode 100644
index fe36316a45c..00000000000
--- a/vendor/github.com/docker/go-metrics/counter.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package metrics
-
-import "github.com/prometheus/client_golang/prometheus"
-
-// Counter is a metrics that can only increment its current count
-type Counter interface {
- // Inc adds Sum(vs) to the counter. Sum(vs) must be positive.
- //
- // If len(vs) == 0, increments the counter by 1.
- Inc(vs ...float64)
-}
-
-// LabeledCounter is counter that must have labels populated before use.
-type LabeledCounter interface {
- WithValues(vs ...string) Counter
-}
-
-type labeledCounter struct {
- pc *prometheus.CounterVec
-}
-
-func (lc *labeledCounter) WithValues(vs ...string) Counter {
- return &counter{pc: lc.pc.WithLabelValues(vs...)}
-}
-
-func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) {
- lc.pc.Describe(ch)
-}
-
-func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) {
- lc.pc.Collect(ch)
-}
-
-type counter struct {
- pc prometheus.Counter
-}
-
-func (c *counter) Inc(vs ...float64) {
- if len(vs) == 0 {
- c.pc.Inc()
- }
-
- c.pc.Add(sumFloat64(vs...))
-}
-
-func (c *counter) Describe(ch chan<- *prometheus.Desc) {
- c.pc.Describe(ch)
-}
-
-func (c *counter) Collect(ch chan<- prometheus.Metric) {
- c.pc.Collect(ch)
-}
diff --git a/vendor/github.com/docker/go-metrics/docs.go b/vendor/github.com/docker/go-metrics/docs.go
deleted file mode 100644
index 8fbdfc697d5..00000000000
--- a/vendor/github.com/docker/go-metrics/docs.go
+++ /dev/null
@@ -1,3 +0,0 @@
-// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects.
-
-package metrics
diff --git a/vendor/github.com/docker/go-metrics/gauge.go b/vendor/github.com/docker/go-metrics/gauge.go
deleted file mode 100644
index 74296e87740..00000000000
--- a/vendor/github.com/docker/go-metrics/gauge.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package metrics
-
-import "github.com/prometheus/client_golang/prometheus"
-
-// Gauge is a metric that allows incrementing and decrementing a value
-type Gauge interface {
- Inc(...float64)
- Dec(...float64)
-
- // Add adds the provided value to the gauge's current value
- Add(float64)
-
- // Set replaces the gauge's current value with the provided value
- Set(float64)
-}
-
-// LabeledGauge describes a gauge the must have values populated before use.
-type LabeledGauge interface {
- WithValues(labels ...string) Gauge
-}
-
-type labeledGauge struct {
- pg *prometheus.GaugeVec
-}
-
-func (lg *labeledGauge) WithValues(labels ...string) Gauge {
- return &gauge{pg: lg.pg.WithLabelValues(labels...)}
-}
-
-func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) {
- lg.pg.Describe(c)
-}
-
-func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) {
- lg.pg.Collect(c)
-}
-
-type gauge struct {
- pg prometheus.Gauge
-}
-
-func (g *gauge) Inc(vs ...float64) {
- if len(vs) == 0 {
- g.pg.Inc()
- }
-
- g.Add(sumFloat64(vs...))
-}
-
-func (g *gauge) Dec(vs ...float64) {
- if len(vs) == 0 {
- g.pg.Dec()
- }
-
- g.Add(-sumFloat64(vs...))
-}
-
-func (g *gauge) Add(v float64) {
- g.pg.Add(v)
-}
-
-func (g *gauge) Set(v float64) {
- g.pg.Set(v)
-}
-
-func (g *gauge) Describe(c chan<- *prometheus.Desc) {
- g.pg.Describe(c)
-}
-
-func (g *gauge) Collect(c chan<- prometheus.Metric) {
- g.pg.Collect(c)
-}
diff --git a/vendor/github.com/docker/go-metrics/go.mod b/vendor/github.com/docker/go-metrics/go.mod
deleted file mode 100644
index 7e328f0cffe..00000000000
--- a/vendor/github.com/docker/go-metrics/go.mod
+++ /dev/null
@@ -1,5 +0,0 @@
-module github.com/docker/go-metrics
-
-go 1.11
-
-require github.com/prometheus/client_golang v1.1.0
diff --git a/vendor/github.com/docker/go-metrics/go.sum b/vendor/github.com/docker/go-metrics/go.sum
deleted file mode 100644
index b8fb9d079d3..00000000000
--- a/vendor/github.com/docker/go-metrics/go.sum
+++ /dev/null
@@ -1,67 +0,0 @@
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8=
-github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
-github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
-github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0=
-golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/docker/go-metrics/handler.go b/vendor/github.com/docker/go-metrics/handler.go
deleted file mode 100644
index 05601e9ecd2..00000000000
--- a/vendor/github.com/docker/go-metrics/handler.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package metrics
-
-import (
- "net/http"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promhttp"
-)
-
-// HTTPHandlerOpts describes a set of configurable options of http metrics
-type HTTPHandlerOpts struct {
- DurationBuckets []float64
- RequestSizeBuckets []float64
- ResponseSizeBuckets []float64
-}
-
-const (
- InstrumentHandlerResponseSize = iota
- InstrumentHandlerRequestSize
- InstrumentHandlerDuration
- InstrumentHandlerCounter
- InstrumentHandlerInFlight
-)
-
-type HTTPMetric struct {
- prometheus.Collector
- handlerType int
-}
-
-var (
- defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60}
- defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G
- defaultResponseSizeBuckets = defaultRequestSizeBuckets
-)
-
-// Handler returns the global http.Handler that provides the prometheus
-// metrics format on GET requests. This handler is no longer instrumented.
-func Handler() http.Handler {
- return promhttp.Handler()
-}
-
-func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc {
- return InstrumentHandlerFunc(metrics, handler.ServeHTTP)
-}
-
-func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc {
- var handler http.Handler
- handler = http.HandlerFunc(handlerFunc)
- for _, metric := range metrics {
- switch metric.handlerType {
- case InstrumentHandlerResponseSize:
- if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
- handler = promhttp.InstrumentHandlerResponseSize(collector, handler)
- }
- case InstrumentHandlerRequestSize:
- if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
- handler = promhttp.InstrumentHandlerRequestSize(collector, handler)
- }
- case InstrumentHandlerDuration:
- if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
- handler = promhttp.InstrumentHandlerDuration(collector, handler)
- }
- case InstrumentHandlerCounter:
- if collector, ok := metric.Collector.(*prometheus.CounterVec); ok {
- handler = promhttp.InstrumentHandlerCounter(collector, handler)
- }
- case InstrumentHandlerInFlight:
- if collector, ok := metric.Collector.(prometheus.Gauge); ok {
- handler = promhttp.InstrumentHandlerInFlight(collector, handler)
- }
- }
- }
- return handler.ServeHTTP
-}
diff --git a/vendor/github.com/docker/go-metrics/helpers.go b/vendor/github.com/docker/go-metrics/helpers.go
deleted file mode 100644
index 68b7f51b338..00000000000
--- a/vendor/github.com/docker/go-metrics/helpers.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package metrics
-
-func sumFloat64(vs ...float64) float64 {
- var sum float64
- for _, v := range vs {
- sum += v
- }
-
- return sum
-}
diff --git a/vendor/github.com/docker/go-metrics/namespace.go b/vendor/github.com/docker/go-metrics/namespace.go
deleted file mode 100644
index 798315451a7..00000000000
--- a/vendor/github.com/docker/go-metrics/namespace.go
+++ /dev/null
@@ -1,315 +0,0 @@
-package metrics
-
-import (
- "fmt"
- "sync"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-type Labels map[string]string
-
-// NewNamespace returns a namespaces that is responsible for managing a collection of
-// metrics for a particual namespace and subsystem
-//
-// labels allows const labels to be added to all metrics created in this namespace
-// and are commonly used for data like application version and git commit
-func NewNamespace(name, subsystem string, labels Labels) *Namespace {
- if labels == nil {
- labels = make(map[string]string)
- }
- return &Namespace{
- name: name,
- subsystem: subsystem,
- labels: labels,
- }
-}
-
-// Namespace describes a set of metrics that share a namespace and subsystem.
-type Namespace struct {
- name string
- subsystem string
- labels Labels
- mu sync.Mutex
- metrics []prometheus.Collector
-}
-
-// WithConstLabels returns a namespace with the provided set of labels merged
-// with the existing constant labels on the namespace.
-//
-// Only metrics created with the returned namespace will get the new constant
-// labels. The returned namespace must be registered separately.
-func (n *Namespace) WithConstLabels(labels Labels) *Namespace {
- n.mu.Lock()
- ns := &Namespace{
- name: n.name,
- subsystem: n.subsystem,
- labels: mergeLabels(n.labels, labels),
- }
- n.mu.Unlock()
- return ns
-}
-
-func (n *Namespace) NewCounter(name, help string) Counter {
- c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))}
- n.Add(c)
- return c
-}
-
-func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter {
- c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)}
- n.Add(c)
- return c
-}
-
-func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts {
- return prometheus.CounterOpts{
- Namespace: n.name,
- Subsystem: n.subsystem,
- Name: makeName(name, Total),
- Help: help,
- ConstLabels: prometheus.Labels(n.labels),
- }
-}
-
-func (n *Namespace) NewTimer(name, help string) Timer {
- t := &timer{
- m: prometheus.NewHistogram(n.newTimerOpts(name, help)),
- }
- n.Add(t)
- return t
-}
-
-func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer {
- t := &labeledTimer{
- m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels),
- }
- n.Add(t)
- return t
-}
-
-func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts {
- return prometheus.HistogramOpts{
- Namespace: n.name,
- Subsystem: n.subsystem,
- Name: makeName(name, Seconds),
- Help: help,
- ConstLabels: prometheus.Labels(n.labels),
- }
-}
-
-func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge {
- g := &gauge{
- pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)),
- }
- n.Add(g)
- return g
-}
-
-func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge {
- g := &labeledGauge{
- pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels),
- }
- n.Add(g)
- return g
-}
-
-func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts {
- return prometheus.GaugeOpts{
- Namespace: n.name,
- Subsystem: n.subsystem,
- Name: makeName(name, unit),
- Help: help,
- ConstLabels: prometheus.Labels(n.labels),
- }
-}
-
-func (n *Namespace) Describe(ch chan<- *prometheus.Desc) {
- n.mu.Lock()
- defer n.mu.Unlock()
-
- for _, metric := range n.metrics {
- metric.Describe(ch)
- }
-}
-
-func (n *Namespace) Collect(ch chan<- prometheus.Metric) {
- n.mu.Lock()
- defer n.mu.Unlock()
-
- for _, metric := range n.metrics {
- metric.Collect(ch)
- }
-}
-
-func (n *Namespace) Add(collector prometheus.Collector) {
- n.mu.Lock()
- n.metrics = append(n.metrics, collector)
- n.mu.Unlock()
-}
-
-func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc {
- name = makeName(name, unit)
- namespace := n.name
- if n.subsystem != "" {
- namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem)
- }
- name = fmt.Sprintf("%s_%s", namespace, name)
- return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels))
-}
-
-// mergeLabels merges two or more labels objects into a single map, favoring
-// the later labels.
-func mergeLabels(lbs ...Labels) Labels {
- merged := make(Labels)
-
- for _, target := range lbs {
- for k, v := range target {
- merged[k] = v
- }
- }
-
- return merged
-}
-
-func makeName(name string, unit Unit) string {
- if unit == "" {
- return name
- }
-
- return fmt.Sprintf("%s_%s", name, unit)
-}
-
-func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric {
- return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{
- DurationBuckets: defaultDurationBuckets,
- RequestSizeBuckets: defaultResponseSizeBuckets,
- ResponseSizeBuckets: defaultResponseSizeBuckets,
- })
-}
-
-func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric {
- return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{
- DurationBuckets: durationBuckets,
- RequestSizeBuckets: requestSizeBuckets,
- ResponseSizeBuckets: responseSizeBuckets,
- })
-}
-
-func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric {
- var httpMetrics []*HTTPMetric
- inFlightMetric := n.NewInFlightGaugeMetric(handlerName)
- requestTotalMetric := n.NewRequestTotalMetric(handlerName)
- requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets)
- requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets)
- responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets)
- httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric)
- return httpMetrics
-}
-
-func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric {
- labels := prometheus.Labels(n.labels)
- labels["handler"] = handlerName
- metric := prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: n.name,
- Subsystem: n.subsystem,
- Name: "in_flight_requests",
- Help: "The in-flight HTTP requests",
- ConstLabels: prometheus.Labels(labels),
- })
- httpMetric := &HTTPMetric{
- Collector: metric,
- handlerType: InstrumentHandlerInFlight,
- }
- n.Add(httpMetric)
- return httpMetric
-}
-
-func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric {
- labels := prometheus.Labels(n.labels)
- labels["handler"] = handlerName
- metric := prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: n.name,
- Subsystem: n.subsystem,
- Name: "requests_total",
- Help: "Total number of HTTP requests made.",
- ConstLabels: prometheus.Labels(labels),
- },
- []string{"code", "method"},
- )
- httpMetric := &HTTPMetric{
- Collector: metric,
- handlerType: InstrumentHandlerCounter,
- }
- n.Add(httpMetric)
- return httpMetric
-}
-func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric {
- if len(buckets) == 0 {
- panic("DurationBuckets must be provided")
- }
- labels := prometheus.Labels(n.labels)
- labels["handler"] = handlerName
- opts := prometheus.HistogramOpts{
- Namespace: n.name,
- Subsystem: n.subsystem,
- Name: "request_duration_seconds",
- Help: "The HTTP request latencies in seconds.",
- Buckets: buckets,
- ConstLabels: prometheus.Labels(labels),
- }
- metric := prometheus.NewHistogramVec(opts, []string{"method"})
- httpMetric := &HTTPMetric{
- Collector: metric,
- handlerType: InstrumentHandlerDuration,
- }
- n.Add(httpMetric)
- return httpMetric
-}
-
-func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric {
- if len(buckets) == 0 {
- panic("RequestSizeBuckets must be provided")
- }
- labels := prometheus.Labels(n.labels)
- labels["handler"] = handlerName
- opts := prometheus.HistogramOpts{
- Namespace: n.name,
- Subsystem: n.subsystem,
- Name: "request_size_bytes",
- Help: "The HTTP request sizes in bytes.",
- Buckets: buckets,
- ConstLabels: prometheus.Labels(labels),
- }
- metric := prometheus.NewHistogramVec(opts, []string{})
- httpMetric := &HTTPMetric{
- Collector: metric,
- handlerType: InstrumentHandlerRequestSize,
- }
- n.Add(httpMetric)
- return httpMetric
-}
-
-func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric {
- if len(buckets) == 0 {
- panic("ResponseSizeBuckets must be provided")
- }
- labels := prometheus.Labels(n.labels)
- labels["handler"] = handlerName
- opts := prometheus.HistogramOpts{
- Namespace: n.name,
- Subsystem: n.subsystem,
- Name: "response_size_bytes",
- Help: "The HTTP response sizes in bytes.",
- Buckets: buckets,
- ConstLabels: prometheus.Labels(labels),
- }
- metrics := prometheus.NewHistogramVec(opts, []string{})
- httpMetric := &HTTPMetric{
- Collector: metrics,
- handlerType: InstrumentHandlerResponseSize,
- }
- n.Add(httpMetric)
- return httpMetric
-}
diff --git a/vendor/github.com/docker/go-metrics/register.go b/vendor/github.com/docker/go-metrics/register.go
deleted file mode 100644
index 708358df01d..00000000000
--- a/vendor/github.com/docker/go-metrics/register.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package metrics
-
-import "github.com/prometheus/client_golang/prometheus"
-
-// Register adds all the metrics in the provided namespace to the global
-// metrics registry
-func Register(n *Namespace) {
- prometheus.MustRegister(n)
-}
-
-// Deregister removes all the metrics in the provided namespace from the
-// global metrics registry
-func Deregister(n *Namespace) {
- prometheus.Unregister(n)
-}
diff --git a/vendor/github.com/docker/go-metrics/timer.go b/vendor/github.com/docker/go-metrics/timer.go
deleted file mode 100644
index 824c98739cf..00000000000
--- a/vendor/github.com/docker/go-metrics/timer.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package metrics
-
-import (
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-// StartTimer begins a timer observation at the callsite. When the target
-// operation is completed, the caller should call the return done func().
-func StartTimer(timer Timer) (done func()) {
- start := time.Now()
- return func() {
- timer.Update(time.Since(start))
- }
-}
-
-// Timer is a metric that allows collecting the duration of an action in seconds
-type Timer interface {
- // Update records an observation, duration, and converts to the target
- // units.
- Update(duration time.Duration)
-
- // UpdateSince will add the duration from the provided starting time to the
- // timer's summary with the precisions that was used in creation of the timer
- UpdateSince(time.Time)
-}
-
-// LabeledTimer is a timer that must have label values populated before use.
-type LabeledTimer interface {
- WithValues(labels ...string) *labeledTimerObserver
-}
-
-type labeledTimer struct {
- m *prometheus.HistogramVec
-}
-
-type labeledTimerObserver struct {
- m prometheus.Observer
-}
-
-func (lbo *labeledTimerObserver) Update(duration time.Duration) {
- lbo.m.Observe(duration.Seconds())
-}
-
-func (lbo *labeledTimerObserver) UpdateSince(since time.Time) {
- lbo.m.Observe(time.Since(since).Seconds())
-}
-
-func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver {
- return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)}
-}
-
-func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) {
- lt.m.Describe(c)
-}
-
-func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) {
- lt.m.Collect(c)
-}
-
-type timer struct {
- m prometheus.Observer
-}
-
-func (t *timer) Update(duration time.Duration) {
- t.m.Observe(duration.Seconds())
-}
-
-func (t *timer) UpdateSince(since time.Time) {
- t.m.Observe(time.Since(since).Seconds())
-}
-
-func (t *timer) Describe(c chan<- *prometheus.Desc) {
- c <- t.m.(prometheus.Metric).Desc()
-}
-
-func (t *timer) Collect(c chan<- prometheus.Metric) {
- // Are there any observers that don't implement Collector? It is really
- // unclear what the point of the upstream change was, but we'll let this
- // panic if we get an observer that doesn't implement collector. In this
- // case, we should almost always see metricVec objects, so this should
- // never panic.
- t.m.(prometheus.Collector).Collect(c)
-}
diff --git a/vendor/github.com/docker/go-metrics/unit.go b/vendor/github.com/docker/go-metrics/unit.go
deleted file mode 100644
index c96622f9031..00000000000
--- a/vendor/github.com/docker/go-metrics/unit.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package metrics
-
-// Unit represents the type or precision of a metric that is appended to
-// the metrics fully qualified name
-type Unit string
-
-const (
- Nanoseconds Unit = "nanoseconds"
- Seconds Unit = "seconds"
- Bytes Unit = "bytes"
- Total Unit = "total"
-)
diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go
index 85f6ab07155..c245a89513f 100644
--- a/vendor/github.com/docker/go-units/size.go
+++ b/vendor/github.com/docker/go-units/size.go
@@ -2,7 +2,6 @@ package units
import (
"fmt"
- "regexp"
"strconv"
"strings"
)
@@ -26,16 +25,17 @@ const (
PiB = 1024 * TiB
)
-type unitMap map[string]int64
+type unitMap map[byte]int64
var (
- decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
- binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
- sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`)
+ decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB}
+ binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB}
)
-var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
-var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+var (
+ decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+ binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+)
func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) {
i := 0
@@ -89,20 +89,66 @@ func RAMInBytes(size string) (int64, error) {
// Parses the human-readable size string into the amount it represents.
func parseSize(sizeStr string, uMap unitMap) (int64, error) {
- matches := sizeRegex.FindStringSubmatch(sizeStr)
- if len(matches) != 4 {
+ // TODO: rewrite to use strings.Cut if there's a space
+ // once Go < 1.18 is deprecated.
+ sep := strings.LastIndexAny(sizeStr, "01234567890. ")
+ if sep == -1 {
+ // There should be at least a digit.
return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
}
+ var num, sfx string
+ if sizeStr[sep] != ' ' {
+ num = sizeStr[:sep+1]
+ sfx = sizeStr[sep+1:]
+ } else {
+ // Omit the space separator.
+ num = sizeStr[:sep]
+ sfx = sizeStr[sep+1:]
+ }
- size, err := strconv.ParseFloat(matches[1], 64)
+ size, err := strconv.ParseFloat(num, 64)
if err != nil {
return -1, err
}
+ // Backward compatibility: reject negative sizes.
+ if size < 0 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ if len(sfx) == 0 {
+ return int64(size), nil
+ }
- unitPrefix := strings.ToLower(matches[3])
- if mul, ok := uMap[unitPrefix]; ok {
+ // Process the suffix.
+
+ if len(sfx) > 3 { // Too long.
+ goto badSuffix
+ }
+ sfx = strings.ToLower(sfx)
+ // Trivial case: b suffix.
+ if sfx[0] == 'b' {
+ if len(sfx) > 1 { // no extra characters allowed after b.
+ goto badSuffix
+ }
+ return int64(size), nil
+ }
+ // A suffix from the map.
+ if mul, ok := uMap[sfx[0]]; ok {
size *= float64(mul)
+ } else {
+ goto badSuffix
+ }
+
+ // The suffix may have extra "b" or "ib" (e.g. KiB or MB).
+ switch {
+ case len(sfx) == 2 && sfx[1] != 'b':
+ goto badSuffix
+ case len(sfx) == 3 && sfx[1:] != "ib":
+ goto badSuffix
}
return int64(size), nil
+
+badSuffix:
+ return -1, fmt.Errorf("invalid suffix: '%s'", sfx)
}
diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap
new file mode 100644
index 00000000000..a04f2907fed
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.mailmap
@@ -0,0 +1,2 @@
+Chris Howey
+Nathan Youngman <4566+nathany@users.noreply.github.com>
diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml
deleted file mode 100644
index a9c30165cdd..00000000000
--- a/vendor/github.com/fsnotify/fsnotify/.travis.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-sudo: false
-language: go
-
-go:
- - "stable"
- - "1.11.x"
- - "1.10.x"
- - "1.9.x"
-
-matrix:
- include:
- - go: "stable"
- env: GOLINT=true
- allow_failures:
- - go: tip
- fast_finish: true
-
-
-before_install:
- - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi
-
-script:
- - go test --race ./...
-
-after_script:
- - test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
- - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi
- - go vet ./...
-
-os:
- - linux
- - osx
- - windows
-
-notifications:
- email: false
diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS
index 5ab5d41c547..6cbabe5ef50 100644
--- a/vendor/github.com/fsnotify/fsnotify/AUTHORS
+++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS
@@ -4,35 +4,44 @@
# You can update this list using the following command:
#
-# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
+# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS
# Please keep the list sorted.
Aaron L
Adrien Bustany
+Alexey Kazakov
Amit Krishnan
Anmol Sethi
Bjørn Erik Pedersen
+Brian Goff
Bruno Bigras
Caleb Spare
Case Nelson
-Chris Howey
+Chris Howey
Christoffer Buchholz
Daniel Wagner-Hall
Dave Cheney
+Eric Lin
Evan Phoenix
Francisco Souza
+Gautam Dey
Hari haran
-John C Barstow
+Ichinose Shogo
+Johannes Ebke
+John C Barstow
Kelvin Fo
Ken-ichirou MATSUZAWA
Matt Layher
+Matthias Stone
Nathan Youngman
Nickolai Zeldovich
+Oliver Bristow
Patrick
Paul Hammond
Pawel Knap
Pieter Droogendijk
+Pratik Shinde
Pursuit92
Riku Voipio
Rob Figueiredo
@@ -41,6 +50,7 @@ Slawek Ligus
Soge Zhang
Tiffany Jernigan
Tilak Sharma
+Tobias Klauser
Tom Payne
Travis Cline
Tudor Golubenco
diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
index be4d7ea2c14..cc01c08f56d 100644
--- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
+++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -1,6 +1,46 @@
# Changelog
-## v1.4.7 / 2018-01-09
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+## [1.5.4] - 2022-04-25
+
+* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
+* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444)
+* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443)
+
+## [1.5.3] - 2022-04-22
+
+* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445)
+
+## [1.5.2] - 2022-04-21
+
+* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374)
+* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361)
+* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424)
+* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406)
+* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416)
+
+## [1.5.1] - 2021-08-24
+
+* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394)
+
+## [1.5.0] - 2021-08-20
+
+* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381)
+* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298)
+* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289)
+* CI: Use GitHub Actions for CI and cover go 1.12-1.17
+ [#378](https://github.com/fsnotify/fsnotify/pull/378)
+ [#381](https://github.com/fsnotify/fsnotify/pull/381)
+ [#385](https://github.com/fsnotify/fsnotify/pull/385)
+* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
+
+## [1.4.7] - 2018-01-09
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
* Tests: Fix missing verb on format string (thanks @rchiossi)
@@ -10,62 +50,62 @@
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
* Docs: replace references to OS X with macOS
-## v1.4.2 / 2016-10-10
+## [1.4.2] - 2016-10-10
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
-## v1.4.1 / 2016-10-04
+## [1.4.1] - 2016-10-04
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
-## v1.4.0 / 2016-10-01
+## [1.4.0] - 2016-10-01
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
-## v1.3.1 / 2016-06-28
+## [1.3.1] - 2016-06-28
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
-## v1.3.0 / 2016-04-19
+## [1.3.0] - 2016-04-19
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
-## v1.2.10 / 2016-03-02
+## [1.2.10] - 2016-03-02
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
-## v1.2.9 / 2016-01-13
+## [1.2.9] - 2016-01-13
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
-## v1.2.8 / 2015-12-17
+## [1.2.8] - 2015-12-17
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
* inotify: fix race in test
* enable race detection for continuous integration (Linux, Mac, Windows)
-## v1.2.5 / 2015-10-17
+## [1.2.5] - 2015-10-17
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
-## v1.2.1 / 2015-10-14
+## [1.2.1] - 2015-10-14
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
-## v1.2.0 / 2015-02-08
+## [1.2.0] - 2015-02-08
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
-## v1.1.1 / 2015-02-05
+## [1.1.1] - 2015-02-05
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
-## v1.1.0 / 2014-12-12
+## [1.1.0] - 2014-12-12
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
* add low-level functions
@@ -77,22 +117,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
-## v1.0.4 / 2014-09-07
+## [1.0.4] - 2014-09-07
* kqueue: add dragonfly to the build tags.
* Rename source code files, rearrange code so exported APIs are at the top.
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
-## v1.0.3 / 2014-08-19
+## [1.0.3] - 2014-08-19
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
-## v1.0.2 / 2014-08-17
+## [1.0.2] - 2014-08-17
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
-## v1.0.0 / 2014-08-15
+## [1.0.0] - 2014-08-15
* [API] Remove AddWatch on Windows, use Add.
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
@@ -146,51 +186,51 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
* no tests for the current implementation
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
-## v0.9.3 / 2014-12-31
+## [0.9.3] - 2014-12-31
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
-## v0.9.2 / 2014-08-17
+## [0.9.2] - 2014-08-17
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
-## v0.9.1 / 2014-06-12
+## [0.9.1] - 2014-06-12
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
-## v0.9.0 / 2014-01-17
+## [0.9.0] - 2014-01-17
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
-## v0.8.12 / 2013-11-13
+## [0.8.12] - 2013-11-13
* [API] Remove FD_SET and friends from Linux adapter
-## v0.8.11 / 2013-11-02
+## [0.8.11] - 2013-11-02
* [Doc] Add Changelog [#72][] (thanks @nathany)
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
-## v0.8.10 / 2013-10-19
+## [0.8.10] - 2013-10-19
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
* [Doc] specify OS-specific limits in README (thanks @debrando)
-## v0.8.9 / 2013-09-08
+## [0.8.9] - 2013-09-08
* [Doc] Contributing (thanks @nathany)
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
* [Doc] GoCI badge in README (Linux only) [#60][]
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
-## v0.8.8 / 2013-06-17
+## [0.8.8] - 2013-06-17
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
-## v0.8.7 / 2013-06-03
+## [0.8.7] - 2013-06-03
* [API] Make syscall flags internal
* [Fix] inotify: ignore event changes
@@ -198,74 +238,74 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
* [Fix] tests on Windows
* lower case error messages
-## v0.8.6 / 2013-05-23
+## [0.8.6] - 2013-05-23
* kqueue: Use EVT_ONLY flag on Darwin
* [Doc] Update README with full example
-## v0.8.5 / 2013-05-09
+## [0.8.5] - 2013-05-09
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
-## v0.8.4 / 2013-04-07
+## [0.8.4] - 2013-04-07
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
-## v0.8.3 / 2013-03-13
+## [0.8.3] - 2013-03-13
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
-## v0.8.2 / 2013-02-07
+## [0.8.2] - 2013-02-07
* [Doc] add Authors
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
-## v0.8.1 / 2013-01-09
+## [0.8.1] - 2013-01-09
* [Fix] Windows path separators
* [Doc] BSD License
-## v0.8.0 / 2012-11-09
+## [0.8.0] - 2012-11-09
* kqueue: directory watching improvements (thanks @vmirage)
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
-## v0.7.4 / 2012-10-09
+## [0.7.4] - 2012-10-09
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
* [Fix] kqueue: modify after recreation of file
-## v0.7.3 / 2012-09-27
+## [0.7.3] - 2012-09-27
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
* [Fix] kqueue: no longer get duplicate CREATE events
-## v0.7.2 / 2012-09-01
+## [0.7.2] - 2012-09-01
* kqueue: events for created directories
-## v0.7.1 / 2012-07-14
+## [0.7.1] - 2012-07-14
* [Fix] for renaming files
-## v0.7.0 / 2012-07-02
+## [0.7.0] - 2012-07-02
* [Feature] FSNotify flags
* [Fix] inotify: Added file name back to event path
-## v0.6.0 / 2012-06-06
+## [0.6.0] - 2012-06-06
* kqueue: watch files after directory created (thanks @tmc)
-## v0.5.1 / 2012-05-22
+## [0.5.1] - 2012-05-22
* [Fix] inotify: remove all watches before Close()
-## v0.5.0 / 2012-05-03
+## [0.5.0] - 2012-05-03
* [API] kqueue: return errors during watch instead of sending over channel
* kqueue: match symlink behavior on Linux
@@ -273,22 +313,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
* [Fix] kqueue: handle EINTR (reported by @robfig)
* [Doc] Godoc example [#1][] (thanks @davecheney)
-## v0.4.0 / 2012-03-30
+## [0.4.0] - 2012-03-30
* Go 1 released: build with go tool
* [Feature] Windows support using winfsnotify
* Windows does not have attribute change notifications
* Roll attribute notifications into IsModify
-## v0.3.0 / 2012-02-19
+## [0.3.0] - 2012-02-19
* kqueue: add files when watch directory
-## v0.2.0 / 2011-12-30
+## [0.2.0] - 2011-12-30
* update to latest Go weekly code
-## v0.1.0 / 2011-10-19
+## [0.1.0] - 2011-10-19
* kqueue: add watch on file creation to match inotify
* kqueue: create file event
diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
index 828a60b24ba..8a642563d71 100644
--- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
+++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
@@ -48,18 +48,6 @@ fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Win
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
-To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
-
-* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
-* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
-* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
-* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
-* When you're done, you will want to halt or destroy the Vagrant boxes.
-
-Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
-
-Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
-
### Maintainers
Help maintaining fsnotify is welcome. To be a maintainer:
@@ -67,11 +55,6 @@ Help maintaining fsnotify is welcome. To be a maintainer:
* Submit a pull request and sign the CLA as above.
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
-To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
-
All code changes should be internal pull requests.
Releases are tagged using [Semantic Versioning](http://semver.org/).
-
-[hub]: https://github.com/github/hub
-[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md
index b2629e5229c..0731c5ef8ad 100644
--- a/vendor/github.com/fsnotify/fsnotify/README.md
+++ b/vendor/github.com/fsnotify/fsnotify/README.md
@@ -1,37 +1,31 @@
# File system notifications for Go
-[](https://godoc.org/github.com/fsnotify/fsnotify) [](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
+[](https://pkg.go.dev/github.com/fsnotify/fsnotify) [](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [](https://github.com/fsnotify/fsnotify/issues/413)
-fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
-
-```console
-go get -u golang.org/x/sys/...
-```
+fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library.
Cross platform: Windows, Linux, BSD and macOS.
| Adapter | OS | Status |
| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
-| inotify | Linux 2.6.27 or later, Android\* | Supported [](https://travis-ci.org/fsnotify/fsnotify) |
-| kqueue | BSD, macOS, iOS\* | Supported [](https://travis-ci.org/fsnotify/fsnotify) |
-| ReadDirectoryChangesW | Windows | Supported [](https://travis-ci.org/fsnotify/fsnotify) |
+| inotify | Linux 2.6.27 or later, Android\* | Supported |
+| kqueue | BSD, macOS, iOS\* | Supported |
+| ReadDirectoryChangesW | Windows | Supported |
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
-| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) |
-| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) |
+| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
+| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
\* Android and iOS are untested.
-Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
+Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
## API stability
-fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
-
-All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
+fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
-Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
+All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/).
## Usage
@@ -84,10 +78,6 @@ func main() {
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
-## Example
-
-See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
-
## FAQ
**When a file is moved to another directory is it still being watched?**
diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go
index ced39cb881e..b3ac3d8f55f 100644
--- a/vendor/github.com/fsnotify/fsnotify/fen.go
+++ b/vendor/github.com/fsnotify/fsnotify/fen.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build solaris
// +build solaris
package fsnotify
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
index 89cab046d12..0f4ee52e8aa 100644
--- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build !plan9
// +build !plan9
// Package fsnotify provides a platform-independent interface for file system notifications.
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go
new file mode 100644
index 00000000000..59688559836
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go
@@ -0,0 +1,36 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
+// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
+
+package fsnotify
+
+import (
+ "fmt"
+ "runtime"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct{}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ return nil
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ return nil
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/go.mod b/vendor/github.com/fsnotify/fsnotify/go.mod
index ff11e13f224..48cfd07fe23 100644
--- a/vendor/github.com/fsnotify/fsnotify/go.mod
+++ b/vendor/github.com/fsnotify/fsnotify/go.mod
@@ -1,5 +1,10 @@
module github.com/fsnotify/fsnotify
-go 1.13
+go 1.16
-require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9
+require golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
+
+retract (
+ v1.5.3 // Published an incorrect branch accidentally https://github.com/fsnotify/fsnotify/issues/445
+ v1.5.0 // Contains symlink regression https://github.com/fsnotify/fsnotify/pull/394
+)
diff --git a/vendor/github.com/fsnotify/fsnotify/go.sum b/vendor/github.com/fsnotify/fsnotify/go.sum
index f60af9855da..7f2d82d5c1b 100644
--- a/vendor/github.com/fsnotify/fsnotify/go.sum
+++ b/vendor/github.com/fsnotify/fsnotify/go.sum
@@ -1,2 +1,2 @@
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go
index d9fd1b88a05..a6d0e0ec8c1 100644
--- a/vendor/github.com/fsnotify/fsnotify/inotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/inotify.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build linux
// +build linux
package fsnotify
@@ -162,6 +163,19 @@ func (w *Watcher) Remove(name string) error {
return nil
}
+// WatchList returns the directories and files that are being monitered.
+func (w *Watcher) WatchList() []string {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ entries := make([]string, 0, len(w.watches))
+ for pathname := range w.watches {
+ entries = append(entries, pathname)
+ }
+
+ return entries
+}
+
type watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
@@ -272,7 +286,7 @@ func (w *Watcher) readEvents() {
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
- bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
+ bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
}
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
index b33f2b4d4b7..b572a37c3f1 100644
--- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
+++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build linux
// +build linux
package fsnotify
@@ -37,7 +38,6 @@ func newFdPoller(fd int) (*fdPoller, error) {
poller.close()
}
}()
- poller.fd = fd
// Create epoll fd
poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go
index 86e76a3d676..6fb8d8532e7 100644
--- a/vendor/github.com/fsnotify/fsnotify/kqueue.go
+++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build freebsd || openbsd || netbsd || dragonfly || darwin
// +build freebsd openbsd netbsd dragonfly darwin
package fsnotify
@@ -147,6 +148,19 @@ func (w *Watcher) Remove(name string) error {
return nil
}
+// WatchList returns the directories and files that are being monitered.
+func (w *Watcher) WatchList() []string {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ entries := make([]string, 0, len(w.watches))
+ for pathname := range w.watches {
+ entries = append(entries, pathname)
+ }
+
+ return entries
+}
+
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
index 2306c4620bf..36cc3845b6e 100644
--- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build freebsd || openbsd || netbsd || dragonfly
// +build freebsd openbsd netbsd dragonfly
package fsnotify
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
index 870c4d6d184..98cd8476ffb 100644
--- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build darwin
// +build darwin
package fsnotify
diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go
index 09436f31d82..02ce7deb0bb 100644
--- a/vendor/github.com/fsnotify/fsnotify/windows.go
+++ b/vendor/github.com/fsnotify/fsnotify/windows.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build windows
// +build windows
package fsnotify
@@ -11,6 +12,7 @@ import (
"fmt"
"os"
"path/filepath"
+ "reflect"
"runtime"
"sync"
"syscall"
@@ -95,6 +97,21 @@ func (w *Watcher) Remove(name string) error {
return <-in.reply
}
+// WatchList returns the directories and files that are being monitered.
+func (w *Watcher) WatchList() []string {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ entries := make([]string, 0, len(w.watches))
+ for _, entry := range w.watches {
+ for _, watchEntry := range entry {
+ entries = append(entries, watchEntry.path)
+ }
+ }
+
+ return entries
+}
+
const (
// Options for AddWatch
sysFSONESHOT = 0x80000000
@@ -451,8 +468,16 @@ func (w *Watcher) readEvents() {
// Point "raw" to the event in the buffer
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
- buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
- name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
+ // TODO: Consider using unsafe.Slice that is available from go1.17
+ // https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang
+ // instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name
+ size := int(raw.FileNameLength / 2)
+ var buf []uint16
+ sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
+ sh.Len = size
+ sh.Cap = size
+ name := syscall.UTF16ToString(buf)
fullname := filepath.Join(watch.path, name)
var mask uint64
diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go
new file mode 100644
index 00000000000..60e82caa9a2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go
@@ -0,0 +1,524 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jsonpb
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/encoding/protojson"
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapJSONUnmarshalV2 = false
+
+// UnmarshalNext unmarshals the next JSON object from d into m.
+func UnmarshalNext(d *json.Decoder, m proto.Message) error {
+ return new(Unmarshaler).UnmarshalNext(d, m)
+}
+
+// Unmarshal unmarshals a JSON object from r into m.
+func Unmarshal(r io.Reader, m proto.Message) error {
+ return new(Unmarshaler).Unmarshal(r, m)
+}
+
+// UnmarshalString unmarshals a JSON object from s into m.
+func UnmarshalString(s string, m proto.Message) error {
+ return new(Unmarshaler).Unmarshal(strings.NewReader(s), m)
+}
+
+// Unmarshaler is a configurable object for converting from a JSON
+// representation to a protocol buffer object.
+type Unmarshaler struct {
+ // AllowUnknownFields specifies whether to allow messages to contain
+ // unknown JSON fields, as opposed to failing to unmarshal.
+ AllowUnknownFields bool
+
+ // AnyResolver is used to resolve the google.protobuf.Any well-known type.
+ // If unset, the global registry is used by default.
+ AnyResolver AnyResolver
+}
+
+// JSONPBUnmarshaler is implemented by protobuf messages that customize the way
+// they are unmarshaled from JSON. Messages that implement this should also
+// implement JSONPBMarshaler so that the custom format can be produced.
+//
+// The JSON unmarshaling must follow the JSON to proto specification:
+// https://developers.google.com/protocol-buffers/docs/proto3#json
+//
+// Deprecated: Custom types should implement protobuf reflection instead.
+type JSONPBUnmarshaler interface {
+ UnmarshalJSONPB(*Unmarshaler, []byte) error
+}
+
+// Unmarshal unmarshals a JSON object from r into m.
+func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error {
+ return u.UnmarshalNext(json.NewDecoder(r), m)
+}
+
+// UnmarshalNext unmarshals the next JSON object from d into m.
+func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error {
+ if m == nil {
+ return errors.New("invalid nil message")
+ }
+
+ // Parse the next JSON object from the stream.
+ raw := json.RawMessage{}
+ if err := d.Decode(&raw); err != nil {
+ return err
+ }
+
+ // Check for custom unmarshalers first since they may not properly
+ // implement protobuf reflection that the logic below relies on.
+ if jsu, ok := m.(JSONPBUnmarshaler); ok {
+ return jsu.UnmarshalJSONPB(u, raw)
+ }
+
+ mr := proto.MessageReflect(m)
+
+ // NOTE: For historical reasons, a top-level null is treated as a noop.
+ // This is incorrect, but kept for compatibility.
+ if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" {
+ return nil
+ }
+
+ if wrapJSONUnmarshalV2 {
+ // NOTE: If input message is non-empty, we need to preserve merge semantics
+ // of the old jsonpb implementation. These semantics are not supported by
+ // the protobuf JSON specification.
+ isEmpty := true
+ mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
+ isEmpty = false // at least one iteration implies non-empty
+ return false
+ })
+ if !isEmpty {
+ // Perform unmarshaling into a newly allocated, empty message.
+ mr = mr.New()
+
+ // Use a defer to copy all unmarshaled fields into the original message.
+ dst := proto.MessageReflect(m)
+ defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ dst.Set(fd, v)
+ return true
+ })
+ }
+
+ // Unmarshal using the v2 JSON unmarshaler.
+ opts := protojson.UnmarshalOptions{
+ DiscardUnknown: u.AllowUnknownFields,
+ }
+ if u.AnyResolver != nil {
+ opts.Resolver = anyResolver{u.AnyResolver}
+ }
+ return opts.Unmarshal(raw, mr.Interface())
+ } else {
+ if err := u.unmarshalMessage(mr, raw); err != nil {
+ return err
+ }
+ return protoV2.CheckInitialized(mr.Interface())
+ }
+}
+
+func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error {
+ md := m.Descriptor()
+ fds := md.Fields()
+
+ if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
+ return jsu.UnmarshalJSONPB(u, in)
+ }
+
+ if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
+ return nil
+ }
+
+ switch wellKnownType(md.FullName()) {
+ case "Any":
+ var jsonObject map[string]json.RawMessage
+ if err := json.Unmarshal(in, &jsonObject); err != nil {
+ return err
+ }
+
+ rawTypeURL, ok := jsonObject["@type"]
+ if !ok {
+ return errors.New("Any JSON doesn't have '@type'")
+ }
+ typeURL, err := unquoteString(string(rawTypeURL))
+ if err != nil {
+ return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL)
+ }
+ m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL))
+
+ var m2 protoreflect.Message
+ if u.AnyResolver != nil {
+ mi, err := u.AnyResolver.Resolve(typeURL)
+ if err != nil {
+ return err
+ }
+ m2 = proto.MessageReflect(mi)
+ } else {
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
+ if err != nil {
+ if err == protoregistry.NotFound {
+ return fmt.Errorf("could not resolve Any message type: %v", typeURL)
+ }
+ return err
+ }
+ m2 = mt.New()
+ }
+
+ if wellKnownType(m2.Descriptor().FullName()) != "" {
+ rawValue, ok := jsonObject["value"]
+ if !ok {
+ return errors.New("Any JSON doesn't have 'value'")
+ }
+ if err := u.unmarshalMessage(m2, rawValue); err != nil {
+ return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
+ }
+ } else {
+ delete(jsonObject, "@type")
+ rawJSON, err := json.Marshal(jsonObject)
+ if err != nil {
+ return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
+ }
+ if err = u.unmarshalMessage(m2, rawJSON); err != nil {
+ return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
+ }
+ }
+
+ rawWire, err := protoV2.Marshal(m2.Interface())
+ if err != nil {
+ return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err)
+ }
+ m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire))
+ return nil
+ case "BoolValue", "BytesValue", "StringValue",
+ "Int32Value", "UInt32Value", "FloatValue",
+ "Int64Value", "UInt64Value", "DoubleValue":
+ fd := fds.ByNumber(1)
+ v, err := u.unmarshalValue(m.NewField(fd), in, fd)
+ if err != nil {
+ return err
+ }
+ m.Set(fd, v)
+ return nil
+ case "Duration":
+ v, err := unquoteString(string(in))
+ if err != nil {
+ return err
+ }
+ d, err := time.ParseDuration(v)
+ if err != nil {
+ return fmt.Errorf("bad Duration: %v", err)
+ }
+
+ sec := d.Nanoseconds() / 1e9
+ nsec := d.Nanoseconds() % 1e9
+ m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
+ m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
+ return nil
+ case "Timestamp":
+ v, err := unquoteString(string(in))
+ if err != nil {
+ return err
+ }
+ t, err := time.Parse(time.RFC3339Nano, v)
+ if err != nil {
+ return fmt.Errorf("bad Timestamp: %v", err)
+ }
+
+ sec := t.Unix()
+ nsec := t.Nanosecond()
+ m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
+ m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
+ return nil
+ case "Value":
+ switch {
+ case string(in) == "null":
+ m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0))
+ case string(in) == "true":
+ m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true))
+ case string(in) == "false":
+ m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false))
+ case hasPrefixAndSuffix('"', in, '"'):
+ s, err := unquoteString(string(in))
+ if err != nil {
+ return fmt.Errorf("unrecognized type for Value %q", in)
+ }
+ m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s))
+ case hasPrefixAndSuffix('[', in, ']'):
+ v := m.Mutable(fds.ByNumber(6))
+ return u.unmarshalMessage(v.Message(), in)
+ case hasPrefixAndSuffix('{', in, '}'):
+ v := m.Mutable(fds.ByNumber(5))
+ return u.unmarshalMessage(v.Message(), in)
+ default:
+ f, err := strconv.ParseFloat(string(in), 0)
+ if err != nil {
+ return fmt.Errorf("unrecognized type for Value %q", in)
+ }
+ m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f))
+ }
+ return nil
+ case "ListValue":
+ var jsonArray []json.RawMessage
+ if err := json.Unmarshal(in, &jsonArray); err != nil {
+ return fmt.Errorf("bad ListValue: %v", err)
+ }
+
+ lv := m.Mutable(fds.ByNumber(1)).List()
+ for _, raw := range jsonArray {
+ ve := lv.NewElement()
+ if err := u.unmarshalMessage(ve.Message(), raw); err != nil {
+ return err
+ }
+ lv.Append(ve)
+ }
+ return nil
+ case "Struct":
+ var jsonObject map[string]json.RawMessage
+ if err := json.Unmarshal(in, &jsonObject); err != nil {
+ return fmt.Errorf("bad StructValue: %v", err)
+ }
+
+ mv := m.Mutable(fds.ByNumber(1)).Map()
+ for key, raw := range jsonObject {
+ kv := protoreflect.ValueOf(key).MapKey()
+ vv := mv.NewValue()
+ if err := u.unmarshalMessage(vv.Message(), raw); err != nil {
+ return fmt.Errorf("bad value in StructValue for key %q: %v", key, err)
+ }
+ mv.Set(kv, vv)
+ }
+ return nil
+ }
+
+ var jsonObject map[string]json.RawMessage
+ if err := json.Unmarshal(in, &jsonObject); err != nil {
+ return err
+ }
+
+ // Handle known fields.
+ for i := 0; i < fds.Len(); i++ {
+ fd := fds.Get(i)
+ if fd.IsWeak() && fd.Message().IsPlaceholder() {
+ continue // weak reference is not linked in
+ }
+
+ // Search for any raw JSON value associated with this field.
+ var raw json.RawMessage
+ name := string(fd.Name())
+ if fd.Kind() == protoreflect.GroupKind {
+ name = string(fd.Message().Name())
+ }
+ if v, ok := jsonObject[name]; ok {
+ delete(jsonObject, name)
+ raw = v
+ }
+ name = string(fd.JSONName())
+ if v, ok := jsonObject[name]; ok {
+ delete(jsonObject, name)
+ raw = v
+ }
+
+ field := m.NewField(fd)
+ // Unmarshal the field value.
+ if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
+ continue
+ }
+ v, err := u.unmarshalValue(field, raw, fd)
+ if err != nil {
+ return err
+ }
+ m.Set(fd, v)
+ }
+
+ // Handle extension fields.
+ for name, raw := range jsonObject {
+ if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") {
+ continue
+ }
+
+ // Resolve the extension field by name.
+ xname := protoreflect.FullName(name[len("[") : len(name)-len("]")])
+ xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
+ if xt == nil && isMessageSet(md) {
+ xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
+ }
+ if xt == nil {
+ continue
+ }
+ delete(jsonObject, name)
+ fd := xt.TypeDescriptor()
+ if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
+ return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
+ }
+
+ field := m.NewField(fd)
+ // Unmarshal the field value.
+ if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
+ continue
+ }
+ v, err := u.unmarshalValue(field, raw, fd)
+ if err != nil {
+ return err
+ }
+ m.Set(fd, v)
+ }
+
+ if !u.AllowUnknownFields && len(jsonObject) > 0 {
+ for name := range jsonObject {
+ return fmt.Errorf("unknown field %q in %v", name, md.FullName())
+ }
+ }
+ return nil
+}
+
+func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
+ if md := fd.Message(); md != nil {
+ return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated
+ }
+ return false
+}
+
+func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool {
+ if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated {
+ _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler)
+ return ok
+ }
+ return false
+}
+
+func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ switch {
+ case fd.IsList():
+ var jsonArray []json.RawMessage
+ if err := json.Unmarshal(in, &jsonArray); err != nil {
+ return v, err
+ }
+ lv := v.List()
+ for _, raw := range jsonArray {
+ ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(ve)
+ }
+ return v, nil
+ case fd.IsMap():
+ var jsonObject map[string]json.RawMessage
+ if err := json.Unmarshal(in, &jsonObject); err != nil {
+ return v, err
+ }
+ kfd := fd.MapKey()
+ vfd := fd.MapValue()
+ mv := v.Map()
+ for key, raw := range jsonObject {
+ var kv protoreflect.MapKey
+ if kfd.Kind() == protoreflect.StringKind {
+ kv = protoreflect.ValueOf(key).MapKey()
+ } else {
+ v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd)
+ if err != nil {
+ return v, err
+ }
+ kv = v.MapKey()
+ }
+
+ vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd)
+ if err != nil {
+ return v, err
+ }
+ mv.Set(kv, vv)
+ }
+ return v, nil
+ default:
+ return u.unmarshalSingularValue(v, in, fd)
+ }
+}
+
+var nonFinite = map[string]float64{
+ `"NaN"`: math.NaN(),
+ `"Infinity"`: math.Inf(+1),
+ `"-Infinity"`: math.Inf(-1),
+}
+
+func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ return unmarshalValue(in, new(bool))
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ return unmarshalValue(trimQuote(in), new(int32))
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return unmarshalValue(trimQuote(in), new(int64))
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ return unmarshalValue(trimQuote(in), new(uint32))
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return unmarshalValue(trimQuote(in), new(uint64))
+ case protoreflect.FloatKind:
+ if f, ok := nonFinite[string(in)]; ok {
+ return protoreflect.ValueOfFloat32(float32(f)), nil
+ }
+ return unmarshalValue(trimQuote(in), new(float32))
+ case protoreflect.DoubleKind:
+ if f, ok := nonFinite[string(in)]; ok {
+ return protoreflect.ValueOfFloat64(float64(f)), nil
+ }
+ return unmarshalValue(trimQuote(in), new(float64))
+ case protoreflect.StringKind:
+ return unmarshalValue(in, new(string))
+ case protoreflect.BytesKind:
+ return unmarshalValue(in, new([]byte))
+ case protoreflect.EnumKind:
+ if hasPrefixAndSuffix('"', in, '"') {
+ vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in)))
+ if vd == nil {
+ return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName())
+ }
+ return protoreflect.ValueOfEnum(vd.Number()), nil
+ }
+ return unmarshalValue(in, new(protoreflect.EnumNumber))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ err := u.unmarshalMessage(v.Message(), in)
+ return v, err
+ default:
+ panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
+ }
+}
+
+func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) {
+ err := json.Unmarshal(in, v)
+ return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err
+}
+
+func unquoteString(in string) (out string, err error) {
+ err = json.Unmarshal([]byte(in), &out)
+ return out, err
+}
+
+func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool {
+ if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix {
+ return true
+ }
+ return false
+}
+
+// trimQuote is like unquoteString but simply strips surrounding quotes.
+// This is incorrect, but is behavior done by the legacy implementation.
+func trimQuote(in []byte) []byte {
+ if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' {
+ in = in[1 : len(in)-1]
+ }
+ return in
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go
new file mode 100644
index 00000000000..685c80a62bc
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go
@@ -0,0 +1,559 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jsonpb
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/encoding/protojson"
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapJSONMarshalV2 = false
+
+// Marshaler is a configurable object for marshaling protocol buffer messages
+// to the specified JSON representation.
+type Marshaler struct {
+ // OrigName specifies whether to use the original protobuf name for fields.
+ OrigName bool
+
+ // EnumsAsInts specifies whether to render enum values as integers,
+ // as opposed to string values.
+ EnumsAsInts bool
+
+ // EmitDefaults specifies whether to render fields with zero values.
+ EmitDefaults bool
+
+ // Indent controls whether the output is compact or not.
+ // If empty, the output is compact JSON. Otherwise, every JSON object
+ // entry and JSON array value will be on its own line.
+ // Each line will be preceded by repeated copies of Indent, where the
+ // number of copies is the current indentation depth.
+ Indent string
+
+ // AnyResolver is used to resolve the google.protobuf.Any well-known type.
+ // If unset, the global registry is used by default.
+ AnyResolver AnyResolver
+}
+
+// JSONPBMarshaler is implemented by protobuf messages that customize the
+// way they are marshaled to JSON. Messages that implement this should also
+// implement JSONPBUnmarshaler so that the custom format can be parsed.
+//
+// The JSON marshaling must follow the proto to JSON specification:
+// https://developers.google.com/protocol-buffers/docs/proto3#json
+//
+// Deprecated: Custom types should implement protobuf reflection instead.
+type JSONPBMarshaler interface {
+ MarshalJSONPB(*Marshaler) ([]byte, error)
+}
+
+// Marshal serializes a protobuf message as JSON into w.
+func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error {
+ b, err := jm.marshal(m)
+ if len(b) > 0 {
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+// MarshalToString serializes a protobuf message as JSON in string form.
+func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) {
+ b, err := jm.marshal(m)
+ if err != nil {
+ return "", err
+ }
+ return string(b), nil
+}
+
+func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) {
+ v := reflect.ValueOf(m)
+ if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
+ return nil, errors.New("Marshal called with nil")
+ }
+
+ // Check for custom marshalers first since they may not properly
+ // implement protobuf reflection that the logic below relies on.
+ if jsm, ok := m.(JSONPBMarshaler); ok {
+ return jsm.MarshalJSONPB(jm)
+ }
+
+ if wrapJSONMarshalV2 {
+ opts := protojson.MarshalOptions{
+ UseProtoNames: jm.OrigName,
+ UseEnumNumbers: jm.EnumsAsInts,
+ EmitUnpopulated: jm.EmitDefaults,
+ Indent: jm.Indent,
+ }
+ if jm.AnyResolver != nil {
+ opts.Resolver = anyResolver{jm.AnyResolver}
+ }
+ return opts.Marshal(proto.MessageReflect(m).Interface())
+ } else {
+ // Check for unpopulated required fields first.
+ m2 := proto.MessageReflect(m)
+ if err := protoV2.CheckInitialized(m2.Interface()); err != nil {
+ return nil, err
+ }
+
+ w := jsonWriter{Marshaler: jm}
+ err := w.marshalMessage(m2, "", "")
+ return w.buf, err
+ }
+}
+
+type jsonWriter struct {
+ *Marshaler
+ buf []byte
+}
+
+func (w *jsonWriter) write(s string) {
+ w.buf = append(w.buf, s...)
+}
+
+func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error {
+ if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok {
+ b, err := jsm.MarshalJSONPB(w.Marshaler)
+ if err != nil {
+ return err
+ }
+ if typeURL != "" {
+ // we are marshaling this object to an Any type
+ var js map[string]*json.RawMessage
+ if err = json.Unmarshal(b, &js); err != nil {
+ return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err)
+ }
+ turl, err := json.Marshal(typeURL)
+ if err != nil {
+ return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
+ }
+ js["@type"] = (*json.RawMessage)(&turl)
+ if b, err = json.Marshal(js); err != nil {
+ return err
+ }
+ }
+ w.write(string(b))
+ return nil
+ }
+
+ md := m.Descriptor()
+ fds := md.Fields()
+
+ // Handle well-known types.
+ const secondInNanos = int64(time.Second / time.Nanosecond)
+ switch wellKnownType(md.FullName()) {
+ case "Any":
+ return w.marshalAny(m, indent)
+ case "BoolValue", "BytesValue", "StringValue",
+ "Int32Value", "UInt32Value", "FloatValue",
+ "Int64Value", "UInt64Value", "DoubleValue":
+ fd := fds.ByNumber(1)
+ return w.marshalValue(fd, m.Get(fd), indent)
+ case "Duration":
+ const maxSecondsInDuration = 315576000000
+ // "Generated output always contains 0, 3, 6, or 9 fractional digits,
+ // depending on required precision."
+ s := m.Get(fds.ByNumber(1)).Int()
+ ns := m.Get(fds.ByNumber(2)).Int()
+ if s < -maxSecondsInDuration || s > maxSecondsInDuration {
+ return fmt.Errorf("seconds out of range %v", s)
+ }
+ if ns <= -secondInNanos || ns >= secondInNanos {
+ return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
+ }
+ if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
+ return errors.New("signs of seconds and nanos do not match")
+ }
+ var sign string
+ if s < 0 || ns < 0 {
+ sign, s, ns = "-", -1*s, -1*ns
+ }
+ x := fmt.Sprintf("%s%d.%09d", sign, s, ns)
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, ".000")
+ w.write(fmt.Sprintf(`"%vs"`, x))
+ return nil
+ case "Timestamp":
+ // "RFC 3339, where generated output will always be Z-normalized
+ // and uses 0, 3, 6 or 9 fractional digits."
+ s := m.Get(fds.ByNumber(1)).Int()
+ ns := m.Get(fds.ByNumber(2)).Int()
+ if ns < 0 || ns >= secondInNanos {
+ return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
+ }
+ t := time.Unix(s, ns).UTC()
+ // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
+ x := t.Format("2006-01-02T15:04:05.000000000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, ".000")
+ w.write(fmt.Sprintf(`"%vZ"`, x))
+ return nil
+ case "Value":
+ // JSON value; which is a null, number, string, bool, object, or array.
+ od := md.Oneofs().Get(0)
+ fd := m.WhichOneof(od)
+ if fd == nil {
+ return errors.New("nil Value")
+ }
+ return w.marshalValue(fd, m.Get(fd), indent)
+ case "Struct", "ListValue":
+ // JSON object or array.
+ fd := fds.ByNumber(1)
+ return w.marshalValue(fd, m.Get(fd), indent)
+ }
+
+ w.write("{")
+ if w.Indent != "" {
+ w.write("\n")
+ }
+
+ firstField := true
+ if typeURL != "" {
+ if err := w.marshalTypeURL(indent, typeURL); err != nil {
+ return err
+ }
+ firstField = false
+ }
+
+ for i := 0; i < fds.Len(); {
+ fd := fds.Get(i)
+ if od := fd.ContainingOneof(); od != nil {
+ fd = m.WhichOneof(od)
+ i += od.Fields().Len()
+ if fd == nil {
+ continue
+ }
+ } else {
+ i++
+ }
+
+ v := m.Get(fd)
+
+ if !m.Has(fd) {
+ if !w.EmitDefaults || fd.ContainingOneof() != nil {
+ continue
+ }
+ if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) {
+ v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars
+ }
+ }
+
+ if !firstField {
+ w.writeComma()
+ }
+ if err := w.marshalField(fd, v, indent); err != nil {
+ return err
+ }
+ firstField = false
+ }
+
+ // Handle proto2 extensions.
+ if md.ExtensionRanges().Len() > 0 {
+ // Collect a sorted list of all extension descriptor and values.
+ type ext struct {
+ desc protoreflect.FieldDescriptor
+ val protoreflect.Value
+ }
+ var exts []ext
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ exts = append(exts, ext{fd, v})
+ }
+ return true
+ })
+ sort.Slice(exts, func(i, j int) bool {
+ return exts[i].desc.Number() < exts[j].desc.Number()
+ })
+
+ for _, ext := range exts {
+ if !firstField {
+ w.writeComma()
+ }
+ if err := w.marshalField(ext.desc, ext.val, indent); err != nil {
+ return err
+ }
+ firstField = false
+ }
+ }
+
+ if w.Indent != "" {
+ w.write("\n")
+ w.write(indent)
+ }
+ w.write("}")
+ return nil
+}
+
+func (w *jsonWriter) writeComma() {
+ if w.Indent != "" {
+ w.write(",\n")
+ } else {
+ w.write(",")
+ }
+}
+
+func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error {
+ // "If the Any contains a value that has a special JSON mapping,
+ // it will be converted as follows: {"@type": xxx, "value": yyy}.
+ // Otherwise, the value will be converted into a JSON object,
+ // and the "@type" field will be inserted to indicate the actual data type."
+ md := m.Descriptor()
+ typeURL := m.Get(md.Fields().ByNumber(1)).String()
+ rawVal := m.Get(md.Fields().ByNumber(2)).Bytes()
+
+ var m2 protoreflect.Message
+ if w.AnyResolver != nil {
+ mi, err := w.AnyResolver.Resolve(typeURL)
+ if err != nil {
+ return err
+ }
+ m2 = proto.MessageReflect(mi)
+ } else {
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
+ if err != nil {
+ return err
+ }
+ m2 = mt.New()
+ }
+
+ if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil {
+ return err
+ }
+
+ if wellKnownType(m2.Descriptor().FullName()) == "" {
+ return w.marshalMessage(m2, indent, typeURL)
+ }
+
+ w.write("{")
+ if w.Indent != "" {
+ w.write("\n")
+ }
+ if err := w.marshalTypeURL(indent, typeURL); err != nil {
+ return err
+ }
+ w.writeComma()
+ if w.Indent != "" {
+ w.write(indent)
+ w.write(w.Indent)
+ w.write(`"value": `)
+ } else {
+ w.write(`"value":`)
+ }
+ if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil {
+ return err
+ }
+ if w.Indent != "" {
+ w.write("\n")
+ w.write(indent)
+ }
+ w.write("}")
+ return nil
+}
+
+func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error {
+ if w.Indent != "" {
+ w.write(indent)
+ w.write(w.Indent)
+ }
+ w.write(`"@type":`)
+ if w.Indent != "" {
+ w.write(" ")
+ }
+ b, err := json.Marshal(typeURL)
+ if err != nil {
+ return err
+ }
+ w.write(string(b))
+ return nil
+}
+
+// marshalField writes field description and value to the Writer.
+func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
+ if w.Indent != "" {
+ w.write(indent)
+ w.write(w.Indent)
+ }
+ w.write(`"`)
+ switch {
+ case fd.IsExtension():
+ // For message set, use the fname of the message as the extension name.
+ name := string(fd.FullName())
+ if isMessageSet(fd.ContainingMessage()) {
+ name = strings.TrimSuffix(name, ".message_set_extension")
+ }
+
+ w.write("[" + name + "]")
+ case w.OrigName:
+ name := string(fd.Name())
+ if fd.Kind() == protoreflect.GroupKind {
+ name = string(fd.Message().Name())
+ }
+ w.write(name)
+ default:
+ w.write(string(fd.JSONName()))
+ }
+ w.write(`":`)
+ if w.Indent != "" {
+ w.write(" ")
+ }
+ return w.marshalValue(fd, v, indent)
+}
+
+func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
+ switch {
+ case fd.IsList():
+ w.write("[")
+ comma := ""
+ lv := v.List()
+ for i := 0; i < lv.Len(); i++ {
+ w.write(comma)
+ if w.Indent != "" {
+ w.write("\n")
+ w.write(indent)
+ w.write(w.Indent)
+ w.write(w.Indent)
+ }
+ if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil {
+ return err
+ }
+ comma = ","
+ }
+ if w.Indent != "" {
+ w.write("\n")
+ w.write(indent)
+ w.write(w.Indent)
+ }
+ w.write("]")
+ return nil
+ case fd.IsMap():
+ kfd := fd.MapKey()
+ vfd := fd.MapValue()
+ mv := v.Map()
+
+ // Collect a sorted list of all map keys and values.
+ type entry struct{ key, val protoreflect.Value }
+ var entries []entry
+ mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ entries = append(entries, entry{k.Value(), v})
+ return true
+ })
+ sort.Slice(entries, func(i, j int) bool {
+ switch kfd.Kind() {
+ case protoreflect.BoolKind:
+ return !entries[i].key.Bool() && entries[j].key.Bool()
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return entries[i].key.Int() < entries[j].key.Int()
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return entries[i].key.Uint() < entries[j].key.Uint()
+ case protoreflect.StringKind:
+ return entries[i].key.String() < entries[j].key.String()
+ default:
+ panic("invalid kind")
+ }
+ })
+
+ w.write(`{`)
+ comma := ""
+ for _, entry := range entries {
+ w.write(comma)
+ if w.Indent != "" {
+ w.write("\n")
+ w.write(indent)
+ w.write(w.Indent)
+ w.write(w.Indent)
+ }
+
+ s := fmt.Sprint(entry.key.Interface())
+ b, err := json.Marshal(s)
+ if err != nil {
+ return err
+ }
+ w.write(string(b))
+
+ w.write(`:`)
+ if w.Indent != "" {
+ w.write(` `)
+ }
+
+ if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil {
+ return err
+ }
+ comma = ","
+ }
+ if w.Indent != "" {
+ w.write("\n")
+ w.write(indent)
+ w.write(w.Indent)
+ }
+ w.write(`}`)
+ return nil
+ default:
+ return w.marshalSingularValue(fd, v, indent)
+ }
+}
+
+func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
+ switch {
+ case !v.IsValid():
+ w.write("null")
+ return nil
+ case fd.Message() != nil:
+ return w.marshalMessage(v.Message(), indent+w.Indent, "")
+ case fd.Enum() != nil:
+ if fd.Enum().FullName() == "google.protobuf.NullValue" {
+ w.write("null")
+ return nil
+ }
+
+ vd := fd.Enum().Values().ByNumber(v.Enum())
+ if vd == nil || w.EnumsAsInts {
+ w.write(strconv.Itoa(int(v.Enum())))
+ } else {
+ w.write(`"` + string(vd.Name()) + `"`)
+ }
+ return nil
+ default:
+ switch v.Interface().(type) {
+ case float32, float64:
+ switch {
+ case math.IsInf(v.Float(), +1):
+ w.write(`"Infinity"`)
+ return nil
+ case math.IsInf(v.Float(), -1):
+ w.write(`"-Infinity"`)
+ return nil
+ case math.IsNaN(v.Float()):
+ w.write(`"NaN"`)
+ return nil
+ }
+ case int64, uint64:
+ w.write(fmt.Sprintf(`"%d"`, v.Interface()))
+ return nil
+ }
+
+ b, err := json.Marshal(v.Interface())
+ if err != nil {
+ return err
+ }
+ w.write(string(b))
+ return nil
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go
new file mode 100644
index 00000000000..480e2448de6
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/json.go
@@ -0,0 +1,69 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jsonpb provides functionality to marshal and unmarshal between a
+// protocol buffer message and JSON. It follows the specification at
+// https://developers.google.com/protocol-buffers/docs/proto3#json.
+//
+// Do not rely on the default behavior of the standard encoding/json package
+// when called on generated message types as it does not operate correctly.
+//
+// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson"
+// package instead.
+package jsonpb
+
+import (
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// AnyResolver takes a type URL, present in an Any message,
+// and resolves it into an instance of the associated message.
+type AnyResolver interface {
+ Resolve(typeURL string) (proto.Message, error)
+}
+
+type anyResolver struct{ AnyResolver }
+
+func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
+ return r.FindMessageByURL(string(message))
+}
+
+func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+ m, err := r.Resolve(url)
+ if err != nil {
+ return nil, err
+ }
+ return protoimpl.X.MessageTypeOf(m), nil
+}
+
+func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ return protoregistry.GlobalTypes.FindExtensionByName(field)
+}
+
+func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
+}
+
+func wellKnownType(s protoreflect.FullName) string {
+ if s.Parent() == "google.protobuf" {
+ switch s.Name() {
+ case "Empty", "Any",
+ "BoolValue", "BytesValue", "StringValue",
+ "Int32Value", "UInt32Value", "FloatValue",
+ "Int64Value", "UInt64Value", "DoubleValue",
+ "Duration", "Timestamp",
+ "NullValue", "Struct", "Value", "ListValue":
+ return string(s.Name())
+ }
+ }
+ return ""
+}
+
+func isMessageSet(md protoreflect.MessageDescriptor) bool {
+ ms, ok := md.(interface{ IsMessageSet() bool })
+ return ok && ms.IsMessageSet()
+}
diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/google/go-containerregistry/LICENSE
similarity index 99%
rename from vendor/github.com/prometheus/procfs/LICENSE
rename to vendor/github.com/google/go-containerregistry/LICENSE
index 261eeb9e9f8..7a4a3ea2424 100644
--- a/vendor/github.com/prometheus/procfs/LICENSE
+++ b/vendor/github.com/google/go-containerregistry/LICENSE
@@ -1,3 +1,4 @@
+
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@@ -198,4 +199,4 @@
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
- limitations under the License.
+ limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/README.md b/vendor/github.com/google/go-containerregistry/pkg/name/README.md
new file mode 100644
index 00000000000..4889b8446a4
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/README.md
@@ -0,0 +1,3 @@
+# `name`
+
+[](https://godoc.org/github.com/google/go-containerregistry/pkg/name)
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/check.go b/vendor/github.com/google/go-containerregistry/pkg/name/check.go
new file mode 100644
index 00000000000..e9a240a3e5a
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/check.go
@@ -0,0 +1,43 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+// stripRunesFn returns a function which returns -1 (i.e. a value which
+// signals deletion in strings.Map) for runes in 'runes', and the rune otherwise.
+func stripRunesFn(runes string) func(rune) rune {
+ return func(r rune) rune {
+ if strings.ContainsRune(runes, r) {
+ return -1
+ }
+ return r
+ }
+}
+
+// checkElement checks a given named element matches character and length restrictions.
+// Returns true if the given element adheres to the given restrictions, false otherwise.
+func checkElement(name, element, allowedRunes string, minRunes, maxRunes int) error {
+ numRunes := utf8.RuneCountInString(element)
+ if (numRunes < minRunes) || (maxRunes < numRunes) {
+ return newErrBadName("%s must be between %d and %d characters in length: %s", name, minRunes, maxRunes, element)
+ } else if len(strings.Map(stripRunesFn(allowedRunes), element)) != 0 {
+ return newErrBadName("%s can only contain the characters `%s`: %s", name, allowedRunes, element)
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go
new file mode 100644
index 00000000000..c4a2e693e33
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go
@@ -0,0 +1,93 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ _ "crypto/sha256" // Recommended by go-digest.
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+const digestDelim = "@"
+
+// Digest stores a digest name in a structured form.
+type Digest struct {
+ Repository
+ digest string
+ original string
+}
+
+// Ensure Digest implements Reference
+var _ Reference = (*Digest)(nil)
+
+// Context implements Reference.
+func (d Digest) Context() Repository {
+ return d.Repository
+}
+
+// Identifier implements Reference.
+func (d Digest) Identifier() string {
+ return d.DigestStr()
+}
+
+// DigestStr returns the digest component of the Digest.
+func (d Digest) DigestStr() string {
+ return d.digest
+}
+
+// Name returns the name from which the Digest was derived.
+func (d Digest) Name() string {
+ return d.Repository.Name() + digestDelim + d.DigestStr()
+}
+
+// String returns the original input string.
+func (d Digest) String() string {
+ return d.original
+}
+
+// NewDigest returns a new Digest representing the given name.
+func NewDigest(name string, opts ...Option) (Digest, error) {
+ // Split on "@"
+ parts := strings.Split(name, digestDelim)
+ if len(parts) != 2 {
+ return Digest{}, newErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name)
+ }
+ base := parts[0]
+ dig := parts[1]
+ prefix := digest.Canonical.String() + ":"
+ if !strings.HasPrefix(dig, prefix) {
+ return Digest{}, newErrBadName("unsupported digest algorithm: %s", dig)
+ }
+ hex := strings.TrimPrefix(dig, prefix)
+ if err := digest.Canonical.Validate(hex); err != nil {
+ return Digest{}, err
+ }
+
+ tag, err := NewTag(base, opts...)
+ if err == nil {
+ base = tag.Repository.Name()
+ }
+
+ repo, err := NewRepository(base, opts...)
+ if err != nil {
+ return Digest{}, err
+ }
+ return Digest{
+ Repository: repo,
+ digest: dig,
+ original: name,
+ }, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/doc.go b/vendor/github.com/google/go-containerregistry/pkg/name/doc.go
new file mode 100644
index 00000000000..b294794dc12
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/doc.go
@@ -0,0 +1,42 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package name defines structured types for representing image references.
+//
+// What's in a name? For image references, not nearly enough!
+//
+// Image references look a lot like URLs, but they differ in that they don't
+// contain the scheme (http or https), they can end with a :tag or a @digest
+// (the latter being validated), and they perform defaulting for missing
+// components.
+//
+// Since image references don't contain the scheme, we do our best to infer
+// if we use http or https from the given hostname. We allow http fallback for
+// any host that looks like localhost (localhost, 127.0.0.1, ::1), ends in
+// ".local", or is in the "private" address space per RFC 1918. For everything
+// else, we assume https only. To override this heuristic, use the Insecure
+// option.
+//
+// Image references with a digest signal to us that we should verify the content
+// of the image matches the digest. E.g. when pulling a Digest reference, we'll
+// calculate the sha256 of the manifest returned by the registry and error out
+// if it doesn't match what we asked for.
+//
+// For defaulting, we interpret "ubuntu" as
+// "index.docker.io/library/ubuntu:latest" because we add the missing repo
+// "library", the missing registry "index.docker.io", and the missing tag
+// "latest". To disable this defaulting, use the StrictValidation option. This
+// is useful e.g. to only allow image references that explicitly set a tag or
+// digest, so that you don't accidentally pull "latest".
+package name
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/errors.go b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go
new file mode 100644
index 00000000000..035e3506939
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go
@@ -0,0 +1,42 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "errors"
+ "fmt"
+)
+
+// ErrBadName is an error for when a bad docker name is supplied.
+type ErrBadName struct {
+ info string
+}
+
+func (e *ErrBadName) Error() string {
+ return e.info
+}
+
+// newErrBadName returns a ErrBadName which returns the given formatted string from Error().
+func newErrBadName(fmtStr string, args ...interface{}) *ErrBadName {
+ return &ErrBadName{fmt.Sprintf(fmtStr, args...)}
+}
+
+// IsErrBadName returns true if the given error is an ErrBadName.
+//
+// Deprecated: Use errors.Is.
+func IsErrBadName(err error) bool {
+ var berr *ErrBadName
+ return errors.As(err, &berr)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/options.go b/vendor/github.com/google/go-containerregistry/pkg/name/options.go
new file mode 100644
index 00000000000..d14fedcdadf
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/options.go
@@ -0,0 +1,83 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+const (
+ // DefaultRegistry is the registry name that will be used if no registry
+ // provided and the default is not overridden.
+ DefaultRegistry = "index.docker.io"
+ defaultRegistryAlias = "docker.io"
+
+ // DefaultTag is the tag name that will be used if no tag provided and the
+ // default is not overridden.
+ DefaultTag = "latest"
+)
+
+type options struct {
+ strict bool // weak by default
+ insecure bool // secure by default
+ defaultRegistry string
+ defaultTag string
+}
+
+func makeOptions(opts ...Option) options {
+ opt := options{
+ defaultRegistry: DefaultRegistry,
+ defaultTag: DefaultTag,
+ }
+ for _, o := range opts {
+ o(&opt)
+ }
+ return opt
+}
+
+// Option is a functional option for name parsing.
+type Option func(*options)
+
+// StrictValidation is an Option that requires image references to be fully
+// specified; i.e. no defaulting for registry (dockerhub), repo (library),
+// or tag (latest).
+func StrictValidation(opts *options) {
+ opts.strict = true
+}
+
+// WeakValidation is an Option that sets defaults when parsing names, see
+// StrictValidation.
+func WeakValidation(opts *options) {
+ opts.strict = false
+}
+
+// Insecure is an Option that allows image references to be fetched without TLS.
+func Insecure(opts *options) {
+ opts.insecure = true
+}
+
+// OptionFn is a function that returns an option.
+type OptionFn func() Option
+
+// WithDefaultRegistry sets the default registry that will be used if one is not
+// provided.
+func WithDefaultRegistry(r string) Option {
+ return func(opts *options) {
+ opts.defaultRegistry = r
+ }
+}
+
+// WithDefaultTag sets the default tag that will be used if one is not provided.
+func WithDefaultTag(t string) Option {
+ return func(opts *options) {
+ opts.defaultTag = t
+ }
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go
new file mode 100644
index 00000000000..955c59a7bb6
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go
@@ -0,0 +1,75 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "fmt"
+)
+
+// Reference defines the interface that consumers use when they can
+// take either a tag or a digest.
+type Reference interface {
+ fmt.Stringer
+
+ // Context accesses the Repository context of the reference.
+ Context() Repository
+
+ // Identifier accesses the type-specific portion of the reference.
+ Identifier() string
+
+ // Name is the fully-qualified reference name.
+ Name() string
+
+ // Scope is the scope needed to access this reference.
+ Scope(string) string
+}
+
+// ParseReference parses the string as a reference, either by tag or digest.
+func ParseReference(s string, opts ...Option) (Reference, error) {
+ if t, err := NewTag(s, opts...); err == nil {
+ return t, nil
+ }
+ if d, err := NewDigest(s, opts...); err == nil {
+ return d, nil
+ }
+ return nil, newErrBadName("could not parse reference: " + s)
+}
+
+type stringConst string
+
+// MustParseReference behaves like ParseReference, but panics instead of
+// returning an error. It's intended for use in tests, or when a value is
+// expected to be valid at code authoring time.
+//
+// To discourage its use in scenarios where the value is not known at code
+// authoring time, it must be passed a string constant:
+//
+// const str = "valid/string"
+// MustParseReference(str)
+// MustParseReference("another/valid/string")
+// MustParseReference(str + "/and/more")
+//
+// These will not compile:
+//
+// var str = "valid/string"
+// MustParseReference(str)
+// MustParseReference(strings.Join([]string{"valid", "string"}, "/"))
+func MustParseReference(s stringConst, opts ...Option) Reference {
+ ref, err := ParseReference(string(s), opts...)
+ if err != nil {
+ panic(err)
+ }
+ return ref
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go
new file mode 100644
index 00000000000..2a26b66d046
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go
@@ -0,0 +1,136 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "net"
+ "net/url"
+ "regexp"
+ "strings"
+)
+
+// Detect more complex forms of local references.
+var reLocal = regexp.MustCompile(`.*\.local(?:host)?(?::\d{1,5})?$`)
+
+// Detect the loopback IP (127.0.0.1)
+var reLoopback = regexp.MustCompile(regexp.QuoteMeta("127.0.0.1"))
+
+// Detect the loopback IPV6 (::1)
+var reipv6Loopback = regexp.MustCompile(regexp.QuoteMeta("::1"))
+
+// Registry stores a docker registry name in a structured form.
+type Registry struct {
+ insecure bool
+ registry string
+}
+
+// RegistryStr returns the registry component of the Registry.
+func (r Registry) RegistryStr() string {
+ return r.registry
+}
+
+// Name returns the name from which the Registry was derived.
+func (r Registry) Name() string {
+ return r.RegistryStr()
+}
+
+func (r Registry) String() string {
+ return r.Name()
+}
+
+// Scope returns the scope required to access the registry.
+func (r Registry) Scope(string) string {
+ // The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z
+ return "registry:catalog:*"
+}
+
+func (r Registry) isRFC1918() bool {
+ ipStr := strings.Split(r.Name(), ":")[0]
+ ip := net.ParseIP(ipStr)
+ if ip == nil {
+ return false
+ }
+ for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} {
+ _, block, _ := net.ParseCIDR(cidr)
+ if block.Contains(ip) {
+ return true
+ }
+ }
+ return false
+}
+
+// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined.
+func (r Registry) Scheme() string {
+ if r.insecure {
+ return "http"
+ }
+ if r.isRFC1918() {
+ return "http"
+ }
+ if strings.HasPrefix(r.Name(), "localhost:") {
+ return "http"
+ }
+ if reLocal.MatchString(r.Name()) {
+ return "http"
+ }
+ if reLoopback.MatchString(r.Name()) {
+ return "http"
+ }
+ if reipv6Loopback.MatchString(r.Name()) {
+ return "http"
+ }
+ return "https"
+}
+
+func checkRegistry(name string) error {
+ // Per RFC 3986, registries (authorities) are required to be prefixed with "//"
+ // url.Host == hostname[:port] == authority
+ if url, err := url.Parse("//" + name); err != nil || url.Host != name {
+ return newErrBadName("registries must be valid RFC 3986 URI authorities: %s", name)
+ }
+ return nil
+}
+
+// NewRegistry returns a Registry based on the given name.
+// Strict validation requires explicit, valid RFC 3986 URI authorities to be given.
+func NewRegistry(name string, opts ...Option) (Registry, error) {
+ opt := makeOptions(opts...)
+ if opt.strict && len(name) == 0 {
+ return Registry{}, newErrBadName("strict validation requires the registry to be explicitly defined")
+ }
+
+ if err := checkRegistry(name); err != nil {
+ return Registry{}, err
+ }
+
+ if name == "" {
+ name = opt.defaultRegistry
+ }
+ // Rewrite "docker.io" to "index.docker.io".
+ // See: https://github.com/google/go-containerregistry/issues/68
+ if name == defaultRegistryAlias {
+ name = DefaultRegistry
+ }
+
+ return Registry{registry: name, insecure: opt.insecure}, nil
+}
+
+// NewInsecureRegistry returns an Insecure Registry based on the given name.
+//
+// Deprecated: Use the Insecure Option with NewRegistry instead.
+func NewInsecureRegistry(name string, opts ...Option) (Registry, error) {
+ opts = append(opts, Insecure)
+ return NewRegistry(name, opts...)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/repository.go b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go
new file mode 100644
index 00000000000..9250e362525
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go
@@ -0,0 +1,121 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ defaultNamespace = "library"
+ repositoryChars = "abcdefghijklmnopqrstuvwxyz0123456789_-./"
+ regRepoDelimiter = "/"
+)
+
+// Repository stores a docker repository name in a structured form.
+type Repository struct {
+ Registry
+ repository string
+}
+
+// See https://docs.docker.com/docker-hub/official_repos
+func hasImplicitNamespace(repo string, reg Registry) bool {
+ return !strings.ContainsRune(repo, '/') && reg.RegistryStr() == DefaultRegistry
+}
+
+// RepositoryStr returns the repository component of the Repository.
+func (r Repository) RepositoryStr() string {
+ if hasImplicitNamespace(r.repository, r.Registry) {
+ return fmt.Sprintf("%s/%s", defaultNamespace, r.repository)
+ }
+ return r.repository
+}
+
+// Name returns the name from which the Repository was derived.
+func (r Repository) Name() string {
+ regName := r.Registry.Name()
+ if regName != "" {
+ return regName + regRepoDelimiter + r.RepositoryStr()
+ }
+ // TODO: As far as I can tell, this is unreachable.
+ return r.RepositoryStr()
+}
+
+func (r Repository) String() string {
+ return r.Name()
+}
+
+// Scope returns the scope required to perform the given action on the registry.
+// TODO(jonjohnsonjr): consider moving scopes to a separate package.
+func (r Repository) Scope(action string) string {
+ return fmt.Sprintf("repository:%s:%s", r.RepositoryStr(), action)
+}
+
+func checkRepository(repository string) error {
+ return checkElement("repository", repository, repositoryChars, 2, 255)
+}
+
+// NewRepository returns a new Repository representing the given name, according to the given strictness.
+func NewRepository(name string, opts ...Option) (Repository, error) {
+ opt := makeOptions(opts...)
+ if len(name) == 0 {
+ return Repository{}, newErrBadName("a repository name must be specified")
+ }
+
+ var registry string
+ repo := name
+ parts := strings.SplitN(name, regRepoDelimiter, 2)
+ if len(parts) == 2 && (strings.ContainsRune(parts[0], '.') || strings.ContainsRune(parts[0], ':')) {
+ // The first part of the repository is treated as the registry domain
+ // iff it contains a '.' or ':' character, otherwise it is all repository
+ // and the domain defaults to Docker Hub.
+ registry = parts[0]
+ repo = parts[1]
+ }
+
+ if err := checkRepository(repo); err != nil {
+ return Repository{}, err
+ }
+
+ reg, err := NewRegistry(registry, opts...)
+ if err != nil {
+ return Repository{}, err
+ }
+ if hasImplicitNamespace(repo, reg) && opt.strict {
+ return Repository{}, newErrBadName("strict validation requires the full repository path (missing 'library')")
+ }
+ return Repository{reg, repo}, nil
+}
+
+// Tag returns a Tag in this Repository.
+func (r Repository) Tag(identifier string) Tag {
+ t := Tag{
+ tag: identifier,
+ Repository: r,
+ }
+ t.original = t.Name()
+ return t
+}
+
+// Digest returns a Digest in this Repository.
+func (r Repository) Digest(identifier string) Digest {
+ d := Digest{
+ digest: identifier,
+ Repository: r,
+ }
+ d.original = d.Name()
+ return d
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/tag.go b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go
new file mode 100644
index 00000000000..66bd1bec3d1
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go
@@ -0,0 +1,108 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "strings"
+)
+
+const (
+ // TODO(dekkagaijin): use the docker/distribution regexes for validation.
+ tagChars = "abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ tagDelim = ":"
+)
+
+// Tag stores a docker tag name in a structured form.
+type Tag struct {
+ Repository
+ tag string
+ original string
+}
+
+// Ensure Tag implements Reference
+var _ Reference = (*Tag)(nil)
+
+// Context implements Reference.
+func (t Tag) Context() Repository {
+ return t.Repository
+}
+
+// Identifier implements Reference.
+func (t Tag) Identifier() string {
+ return t.TagStr()
+}
+
+// TagStr returns the tag component of the Tag.
+func (t Tag) TagStr() string {
+ return t.tag
+}
+
+// Name returns the name from which the Tag was derived.
+func (t Tag) Name() string {
+ return t.Repository.Name() + tagDelim + t.TagStr()
+}
+
+// String returns the original input string.
+func (t Tag) String() string {
+ return t.original
+}
+
+// Scope returns the scope required to perform the given action on the tag.
+func (t Tag) Scope(action string) string {
+ return t.Repository.Scope(action)
+}
+
+func checkTag(name string) error {
+ return checkElement("tag", name, tagChars, 1, 128)
+}
+
+// NewTag returns a new Tag representing the given name, according to the given strictness.
+func NewTag(name string, opts ...Option) (Tag, error) {
+ opt := makeOptions(opts...)
+ base := name
+ tag := ""
+
+ // Split on ":"
+ parts := strings.Split(name, tagDelim)
+ // Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation.
+ if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], regRepoDelimiter) {
+ base = strings.Join(parts[:len(parts)-1], tagDelim)
+ tag = parts[len(parts)-1]
+ }
+
+ // We don't require a tag, but if we get one check it's valid,
+ // even when not being strict.
+ // If we are being strict, we want to validate the tag regardless in case
+ // it's empty.
+ if tag != "" || opt.strict {
+ if err := checkTag(tag); err != nil {
+ return Tag{}, err
+ }
+ }
+
+ if tag == "" {
+ tag = opt.defaultTag
+ }
+
+ repo, err := NewRepository(base, opts...)
+ if err != nil {
+ return Tag{}, err
+ }
+ return Tag{
+ Repository: repo,
+ tag: tag,
+ original: name,
+ }, nil
+}
diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go
new file mode 100644
index 00000000000..d7fcbf28651
--- /dev/null
+++ b/vendor/github.com/google/uuid/null.go
@@ -0,0 +1,118 @@
+// Copyright 2021 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+)
+
+var jsonNull = []byte("null")
+
+// NullUUID represents a UUID that may be null.
+// NullUUID implements the SQL driver.Scanner interface so
+// it can be used as a scan destination:
+//
+// var u uuid.NullUUID
+// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
+// ...
+// if u.Valid {
+// // use u.UUID
+// } else {
+// // NULL value
+// }
+//
+type NullUUID struct {
+ UUID UUID
+ Valid bool // Valid is true if UUID is not NULL
+}
+
+// Scan implements the SQL driver.Scanner interface.
+func (nu *NullUUID) Scan(value interface{}) error {
+ if value == nil {
+ nu.UUID, nu.Valid = Nil, false
+ return nil
+ }
+
+ err := nu.UUID.Scan(value)
+ if err != nil {
+ nu.Valid = false
+ return err
+ }
+
+ nu.Valid = true
+ return nil
+}
+
+// Value implements the driver Valuer interface.
+func (nu NullUUID) Value() (driver.Value, error) {
+ if !nu.Valid {
+ return nil, nil
+ }
+ // Delegate to UUID Value function
+ return nu.UUID.Value()
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (nu NullUUID) MarshalBinary() ([]byte, error) {
+ if nu.Valid {
+ return nu.UUID[:], nil
+ }
+
+ return []byte(nil), nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (nu *NullUUID) UnmarshalBinary(data []byte) error {
+ if len(data) != 16 {
+ return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+ }
+ copy(nu.UUID[:], data)
+ nu.Valid = true
+ return nil
+}
+
+// MarshalText implements encoding.TextMarshaler.
+func (nu NullUUID) MarshalText() ([]byte, error) {
+ if nu.Valid {
+ return nu.UUID.MarshalText()
+ }
+
+ return jsonNull, nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (nu *NullUUID) UnmarshalText(data []byte) error {
+ id, err := ParseBytes(data)
+ if err != nil {
+ nu.Valid = false
+ return err
+ }
+ nu.UUID = id
+ nu.Valid = true
+ return nil
+}
+
+// MarshalJSON implements json.Marshaler.
+func (nu NullUUID) MarshalJSON() ([]byte, error) {
+ if nu.Valid {
+ return json.Marshal(nu.UUID)
+ }
+
+ return jsonNull, nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (nu *NullUUID) UnmarshalJSON(data []byte) error {
+ if bytes.Equal(data, jsonNull) {
+ *nu = NullUUID{}
+ return nil // valid null UUID
+ }
+ err := json.Unmarshal(data, &nu.UUID)
+ nu.Valid = err == nil
+ return err
+}
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
index 60d26bb50c6..a57207aeb6f 100644
--- a/vendor/github.com/google/uuid/uuid.go
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -12,6 +12,7 @@ import (
"fmt"
"io"
"strings"
+ "sync"
)
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
@@ -33,7 +34,15 @@ const (
Future // Reserved for future definition.
)
-var rander = rand.Reader // random function
+const randPoolSize = 16 * 16
+
+var (
+ rander = rand.Reader // random function
+ poolEnabled = false
+ poolMu sync.Mutex
+ poolPos = randPoolSize // protected with poolMu
+ pool [randPoolSize]byte // protected with poolMu
+)
type invalidLengthError struct{ len int }
@@ -41,6 +50,12 @@ func (err invalidLengthError) Error() string {
return fmt.Sprintf("invalid UUID length: %d", err.len)
}
+// IsInvalidLengthError is matcher function for custom error invalidLengthError
+func IsInvalidLengthError(err error) bool {
+ _, ok := err.(invalidLengthError)
+ return ok
+}
+
// Parse decodes s into a UUID or returns an error. Both the standard UUID
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
@@ -249,3 +264,31 @@ func SetRand(r io.Reader) {
}
rander = r
}
+
+// EnableRandPool enables internal randomness pool used for Random
+// (Version 4) UUID generation. The pool contains random bytes read from
+// the random number generator on demand in batches. Enabling the pool
+// may improve the UUID generation throughput significantly.
+//
+// Since the pool is stored on the Go heap, this feature may be a bad fit
+// for security sensitive applications.
+//
+// Both EnableRandPool and DisableRandPool are not thread-safe and should
+// only be called when there is no possibility that New or any other
+// UUID Version 4 generation function will be called concurrently.
+func EnableRandPool() {
+ poolEnabled = true
+}
+
+// DisableRandPool disables the randomness pool if it was previously
+// enabled with EnableRandPool.
+//
+// Both EnableRandPool and DisableRandPool are not thread-safe and should
+// only be called when there is no possibility that New or any other
+// UUID Version 4 generation function will be called concurrently.
+func DisableRandPool() {
+ poolEnabled = false
+ defer poolMu.Unlock()
+ poolMu.Lock()
+ poolPos = randPoolSize
+}
diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go
index 86160fbd072..7697802e4d1 100644
--- a/vendor/github.com/google/uuid/version4.go
+++ b/vendor/github.com/google/uuid/version4.go
@@ -27,6 +27,8 @@ func NewString() string {
// The strength of the UUIDs is based on the strength of the crypto/rand
// package.
//
+// Uses the randomness pool if it was enabled with EnableRandPool.
+//
// A note about uniqueness derived from the UUID Wikipedia entry:
//
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
@@ -35,7 +37,10 @@ func NewString() string {
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
func NewRandom() (UUID, error) {
- return NewRandomFromReader(rander)
+ if !poolEnabled {
+ return NewRandomFromReader(rander)
+ }
+ return newRandomFromPool()
}
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
@@ -49,3 +54,23 @@ func NewRandomFromReader(r io.Reader) (UUID, error) {
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}
+
+func newRandomFromPool() (UUID, error) {
+ var uuid UUID
+ poolMu.Lock()
+ if poolPos == randPoolSize {
+ _, err := io.ReadFull(rander, pool[:])
+ if err != nil {
+ poolMu.Unlock()
+ return Nil, err
+ }
+ poolPos = 0
+ }
+ copy(uuid[:], pool[poolPos:(poolPos+16)])
+ poolPos += 16
+ poolMu.Unlock()
+
+ uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+ return uuid, nil
+}
diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go
index a733bef18c0..44e368e5692 100644
--- a/vendor/github.com/hashicorp/errwrap/errwrap.go
+++ b/vendor/github.com/hashicorp/errwrap/errwrap.go
@@ -44,6 +44,8 @@ func Wrap(outer, inner error) error {
//
// format is the format of the error message. The string '{{err}}' will
// be replaced with the original error message.
+//
+// Deprecated: Use fmt.Errorf()
func Wrapf(format string, err error) error {
outerMsg := ""
if err != nil {
@@ -148,6 +150,9 @@ func Walk(err error, cb WalkFunc) {
for _, err := range e.WrappedErrors() {
Walk(err, cb)
}
+ case interface{ Unwrap() error }:
+ cb(err)
+ Walk(e.Unwrap(), cb)
default:
cb(err)
}
@@ -167,3 +172,7 @@ func (w *wrappedError) Error() string {
func (w *wrappedError) WrappedErrors() []error {
return []error{w.Outer, w.Inner}
}
+
+func (w *wrappedError) Unwrap() error {
+ return w.Inner
+}
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
index aa8cbd7ce6d..7e6f7aeee82 100644
--- a/vendor/github.com/imdario/mergo/README.md
+++ b/vendor/github.com/imdario/mergo/README.md
@@ -8,8 +8,7 @@
[![Coverage Status][9]][10]
[![Sourcegraph][11]][12]
[![FOSSA Status][13]][14]
-
-[![GoCenter Kudos][15]][16]
+[![Become my sponsor][15]][16]
[1]: https://travis-ci.org/imdario/mergo.png
[2]: https://travis-ci.org/imdario/mergo
@@ -25,8 +24,8 @@
[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
-[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo
-[16]: https://search.gocenter.io/github.com/imdario/mergo
+[15]: https://img.shields.io/github/sponsors/imdario
+[16]: https://github.com/sponsors/imdario
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
@@ -36,11 +35,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
## Status
-It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
+It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
### Important note
-Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules.
+Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
@@ -51,12 +50,12 @@ If you were using Mergo before April 6th, 2015, please check your project works
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
-[](https://beerpay.io/imdario/mergo)
-[](https://beerpay.io/imdario/mergo)
+
### Mergo in the wild
+- [cli/cli](https://github.com/cli/cli)
- [moby/moby](https://github.com/moby/moby)
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
- [vmware/dispatch](https://github.com/vmware/dispatch)
@@ -98,6 +97,8 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
- [containerssh/containerssh](https://github.com/containerssh/containerssh)
+- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
+- [tjpnz/structbot](https://github.com/tjpnz/structbot)
## Install
@@ -168,7 +169,7 @@ func main() {
Note: if test are failing due missing package, please execute:
- go get gopkg.in/yaml.v2
+ go get gopkg.in/yaml.v3
### Transformers
@@ -218,7 +219,6 @@ func main() {
}
```
-
## Contact me
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
@@ -227,18 +227,6 @@ If I can help you, you have an idea or you are using Mergo in your projects, don
Written by [Dario Castañé](http://dario.im).
-## Top Contributors
-
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
-
-
## License
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
diff --git a/vendor/github.com/imdario/mergo/go.mod b/vendor/github.com/imdario/mergo/go.mod
index 3d689d93eb3..6f476d6ff91 100644
--- a/vendor/github.com/imdario/mergo/go.mod
+++ b/vendor/github.com/imdario/mergo/go.mod
@@ -2,4 +2,4 @@ module github.com/imdario/mergo
go 1.13
-require gopkg.in/yaml.v2 v2.3.0
+require gopkg.in/yaml.v3 v3.0.0
diff --git a/vendor/github.com/imdario/mergo/go.sum b/vendor/github.com/imdario/mergo/go.sum
index 168980da5f7..223abcb4231 100644
--- a/vendor/github.com/imdario/mergo/go.sum
+++ b/vendor/github.com/imdario/mergo/go.sum
@@ -1,4 +1,4 @@
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
index 8c2a8fcd901..8b4e2f47a08 100644
--- a/vendor/github.com/imdario/mergo/merge.go
+++ b/vendor/github.com/imdario/mergo/merge.go
@@ -79,7 +79,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
visited[h] = &visit{addr, typ, seen}
}
- if config.Transformers != nil && !isEmptyValue(dst) {
+ if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
err = fn(dst, src)
return
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go
index 3cc926c7f62..9fe362d476a 100644
--- a/vendor/github.com/imdario/mergo/mergo.go
+++ b/vendor/github.com/imdario/mergo/mergo.go
@@ -17,7 +17,7 @@ import (
var (
ErrNilArguments = errors.New("src and dst must not be nil")
ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
- ErrNotSupported = errors.New("only structs and maps are supported")
+ ErrNotSupported = errors.New("only structs, maps, and slices are supported")
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
ErrNonPointerAgument = errors.New("dst must be a pointer")
@@ -65,7 +65,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
return
}
vDst = reflect.ValueOf(dst).Elem()
- if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
+ if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice {
err = ErrNotSupported
return
}
diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md
index 52b111d5f36..c589addf98c 100644
--- a/vendor/github.com/json-iterator/go/README.md
+++ b/vendor/github.com/json-iterator/go/README.md
@@ -8,8 +8,6 @@
A high-performance 100% compatible drop-in replacement of "encoding/json"
-You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
-
# Benchmark

diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod
index e05c42ff58b..e817cccbf6f 100644
--- a/vendor/github.com/json-iterator/go/go.mod
+++ b/vendor/github.com/json-iterator/go/go.mod
@@ -6,6 +6,6 @@ require (
github.com/davecgh/go-spew v1.1.1
github.com/google/gofuzz v1.0.0
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421
- github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742
+ github.com/modern-go/reflect2 v1.0.2
github.com/stretchr/testify v1.3.0
)
diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum
index be00a6df969..4b7bb8a2952 100644
--- a/vendor/github.com/json-iterator/go/go.sum
+++ b/vendor/github.com/json-iterator/go/go.sum
@@ -5,11 +5,10 @@ github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/github.com/juju/ansiterm/LICENSE b/vendor/github.com/juju/ansiterm/LICENSE
deleted file mode 100644
index ade9307b390..00000000000
--- a/vendor/github.com/juju/ansiterm/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-All files in this repository are licensed as follows. If you contribute
-to this repository, it is assumed that you license your contribution
-under the same license unless you state otherwise.
-
-All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
-
-This software is licensed under the LGPLv3, included below.
-
-As a special exception to the GNU Lesser General Public License version 3
-("LGPL3"), the copyright holders of this Library give you permission to
-convey to a third party a Combined Work that links statically or dynamically
-to this Library without providing any Minimal Corresponding Source or
-Minimal Application Code as set out in 4d or providing the installation
-information set out in section 4e, provided that you comply with the other
-provisions of LGPL3 and provided that you meet, for the Application the
-terms and conditions of the license(s) which apply to the Application.
-
-Except as stated in this special exception, the provisions of LGPL3 will
-continue to comply in full to this Library. If you modify this Library, you
-may apply this exception to your version of this Library, but you are not
-obliged to do so. If you do not wish to do so, delete this exception
-statement from your version. This exception does not (and cannot) modify any
-license terms which apply to the Application, with which you must still
-comply.
-
-
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/vendor/github.com/juju/ansiterm/Makefile b/vendor/github.com/juju/ansiterm/Makefile
deleted file mode 100644
index 212fdcbe523..00000000000
--- a/vendor/github.com/juju/ansiterm/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright 2016 Canonical Ltd.
-# Licensed under the LGPLv3, see LICENCE file for details.
-
-default: check
-
-check:
- go test
-
-docs:
- godoc2md github.com/juju/ansiterm > README.md
- sed -i 's|\[godoc-link-here\]|[](https://godoc.org/github.com/juju/ansiterm)|' README.md
-
-
-.PHONY: default check docs
diff --git a/vendor/github.com/juju/ansiterm/README.md b/vendor/github.com/juju/ansiterm/README.md
deleted file mode 100644
index 567438721db..00000000000
--- a/vendor/github.com/juju/ansiterm/README.md
+++ /dev/null
@@ -1,323 +0,0 @@
-
-# ansiterm
- import "github.com/juju/ansiterm"
-
-Package ansiterm provides a Writer that writes out the ANSI escape
-codes for color and styles.
-
-
-
-
-
-
-
-## type Color
-``` go
-type Color int
-```
-Color represents one of the standard 16 ANSI colors.
-
-
-
-``` go
-const (
- Default Color
- Black
- Red
- Green
- Yellow
- Blue
- Magenta
- Cyan
- Gray
- DarkGray
- BrightRed
- BrightGreen
- BrightYellow
- BrightBlue
- BrightMagenta
- BrightCyan
- White
-)
-```
-
-
-
-
-
-
-
-
-### func (Color) String
-``` go
-func (c Color) String() string
-```
-String returns the name of the color.
-
-
-
-## type Context
-``` go
-type Context struct {
- Foreground Color
- Background Color
- Styles []Style
-}
-```
-Context provides a way to specify both foreground and background colors
-along with other styles and write text to a Writer with those colors and
-styles.
-
-
-
-
-
-
-
-
-
-### func Background
-``` go
-func Background(color Color) *Context
-```
-Background is a convenience function that creates a Context with the
-specified color as the background color.
-
-
-### func Foreground
-``` go
-func Foreground(color Color) *Context
-```
-Foreground is a convenience function that creates a Context with the
-specified color as the foreground color.
-
-
-### func Styles
-``` go
-func Styles(styles ...Style) *Context
-```
-Styles is a convenience function that creates a Context with the
-specified styles set.
-
-
-
-
-### func (\*Context) Fprint
-``` go
-func (c *Context) Fprint(w sgrWriter, args ...interface{})
-```
-Fprint will set the sgr values of the writer to the specified foreground,
-background and styles, then formats using the default formats for its
-operands and writes to w. Spaces are added between operands when neither is
-a string. It returns the number of bytes written and any write error
-encountered.
-
-
-
-### func (\*Context) Fprintf
-``` go
-func (c *Context) Fprintf(w sgrWriter, format string, args ...interface{})
-```
-Fprintf will set the sgr values of the writer to the specified
-foreground, background and styles, then write the formatted string,
-then reset the writer.
-
-
-
-### func (\*Context) SetBackground
-``` go
-func (c *Context) SetBackground(color Color) *Context
-```
-SetBackground sets the background to the specified color.
-
-
-
-### func (\*Context) SetForeground
-``` go
-func (c *Context) SetForeground(color Color) *Context
-```
-SetForeground sets the foreground to the specified color.
-
-
-
-### func (\*Context) SetStyle
-``` go
-func (c *Context) SetStyle(styles ...Style) *Context
-```
-SetStyle replaces the styles with the new values.
-
-
-
-## type Style
-``` go
-type Style int
-```
-
-
-``` go
-const (
- Bold Style
- Faint
- Italic
- Underline
- Blink
- Reverse
- Strikethrough
- Conceal
-)
-```
-
-
-
-
-
-
-
-
-### func (Style) String
-``` go
-func (s Style) String() string
-```
-
-
-## type TabWriter
-``` go
-type TabWriter struct {
- Writer
- // contains filtered or unexported fields
-}
-```
-TabWriter is a filter that inserts padding around tab-delimited
-columns in its input to align them in the output.
-
-It also setting of colors and styles over and above the standard
-tabwriter package.
-
-
-
-
-
-
-
-
-
-### func NewTabWriter
-``` go
-func NewTabWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter
-```
-NewTabWriter returns a writer that is able to set colors and styels.
-The ansi escape codes are stripped for width calculations.
-
-
-
-
-### func (\*TabWriter) Flush
-``` go
-func (t *TabWriter) Flush() error
-```
-Flush should be called after the last call to Write to ensure
-that any data buffered in the Writer is written to output. Any
-incomplete escape sequence at the end is considered
-complete for formatting purposes.
-
-
-
-### func (\*TabWriter) Init
-``` go
-func (t *TabWriter) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter
-```
-A Writer must be initialized with a call to Init. The first parameter (output)
-specifies the filter output. The remaining parameters control the formatting:
-
-
- minwidth minimal cell width including any padding
- tabwidth width of tab characters (equivalent number of spaces)
- padding padding added to a cell before computing its width
- padchar ASCII char used for padding
- if padchar == '\t', the Writer will assume that the
- width of a '\t' in the formatted output is tabwidth,
- and cells are left-aligned independent of align_left
- (for correct-looking results, tabwidth must correspond
- to the tab width in the viewer displaying the result)
- flags formatting control
-
-
-
-## type Writer
-``` go
-type Writer struct {
- io.Writer
- // contains filtered or unexported fields
-}
-```
-Writer allows colors and styles to be specified. If the io.Writer
-is not a terminal capable of color, all attempts to set colors or
-styles are no-ops.
-
-
-
-
-
-
-
-
-
-### func NewWriter
-``` go
-func NewWriter(w io.Writer) *Writer
-```
-NewWriter returns a Writer that allows the caller to specify colors and
-styles. If the io.Writer is not a terminal capable of color, all attempts
-to set colors or styles are no-ops.
-
-
-
-
-### func (\*Writer) ClearStyle
-``` go
-func (w *Writer) ClearStyle(s Style)
-```
-ClearStyle clears the text style.
-
-
-
-### func (\*Writer) Reset
-``` go
-func (w *Writer) Reset()
-```
-Reset returns the default foreground and background colors with no styles.
-
-
-
-### func (\*Writer) SetBackground
-``` go
-func (w *Writer) SetBackground(c Color)
-```
-SetBackground sets the background color.
-
-
-
-### func (\*Writer) SetForeground
-``` go
-func (w *Writer) SetForeground(c Color)
-```
-SetForeground sets the foreground color.
-
-
-
-### func (\*Writer) SetStyle
-``` go
-func (w *Writer) SetStyle(s Style)
-```
-SetStyle sets the text style.
-
-
-
-
-
-
-
-
-
-- - -
-Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
\ No newline at end of file
diff --git a/vendor/github.com/juju/ansiterm/attribute.go b/vendor/github.com/juju/ansiterm/attribute.go
deleted file mode 100644
index f2daa48132f..00000000000
--- a/vendor/github.com/juju/ansiterm/attribute.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2016 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package ansiterm
-
-import (
- "fmt"
- "sort"
- "strings"
-)
-
-type attribute int
-
-const (
- unknownAttribute attribute = -1
- reset attribute = 0
-)
-
-// sgr returns the escape sequence for the Select Graphic Rendition
-// for the attribute.
-func (a attribute) sgr() string {
- if a < 0 {
- return ""
- }
- return fmt.Sprintf("\x1b[%dm", a)
-}
-
-type attributes []attribute
-
-func (a attributes) Len() int { return len(a) }
-func (a attributes) Less(i, j int) bool { return a[i] < a[j] }
-func (a attributes) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-// sgr returns the combined escape sequence for the Select Graphic Rendition
-// for the sequence of attributes.
-func (a attributes) sgr() string {
- switch len(a) {
- case 0:
- return ""
- case 1:
- return a[0].sgr()
- default:
- sort.Sort(a)
- var values []string
- for _, attr := range a {
- values = append(values, fmt.Sprint(attr))
- }
- return fmt.Sprintf("\x1b[%sm", strings.Join(values, ";"))
- }
-}
diff --git a/vendor/github.com/juju/ansiterm/color.go b/vendor/github.com/juju/ansiterm/color.go
deleted file mode 100644
index 0a97de31e53..00000000000
--- a/vendor/github.com/juju/ansiterm/color.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2016 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package ansiterm
-
-const (
- _ Color = iota
- Default
- Black
- Red
- Green
- Yellow
- Blue
- Magenta
- Cyan
- Gray
- DarkGray
- BrightRed
- BrightGreen
- BrightYellow
- BrightBlue
- BrightMagenta
- BrightCyan
- White
-)
-
-// Color represents one of the standard 16 ANSI colors.
-type Color int
-
-// String returns the name of the color.
-func (c Color) String() string {
- switch c {
- case Default:
- return "default"
- case Black:
- return "black"
- case Red:
- return "red"
- case Green:
- return "green"
- case Yellow:
- return "yellow"
- case Blue:
- return "blue"
- case Magenta:
- return "magenta"
- case Cyan:
- return "cyan"
- case Gray:
- return "gray"
- case DarkGray:
- return "darkgray"
- case BrightRed:
- return "brightred"
- case BrightGreen:
- return "brightgreen"
- case BrightYellow:
- return "brightyellow"
- case BrightBlue:
- return "brightblue"
- case BrightMagenta:
- return "brightmagenta"
- case BrightCyan:
- return "brightcyan"
- case White:
- return "white"
- default:
- return ""
- }
-}
-
-func (c Color) foreground() attribute {
- switch c {
- case Default:
- return 39
- case Black:
- return 30
- case Red:
- return 31
- case Green:
- return 32
- case Yellow:
- return 33
- case Blue:
- return 34
- case Magenta:
- return 35
- case Cyan:
- return 36
- case Gray:
- return 37
- case DarkGray:
- return 90
- case BrightRed:
- return 91
- case BrightGreen:
- return 92
- case BrightYellow:
- return 93
- case BrightBlue:
- return 94
- case BrightMagenta:
- return 95
- case BrightCyan:
- return 96
- case White:
- return 97
- default:
- return unknownAttribute
- }
-}
-
-func (c Color) background() attribute {
- value := c.foreground()
- if value != unknownAttribute {
- return value + 10
- }
- return value
-}
diff --git a/vendor/github.com/juju/ansiterm/context.go b/vendor/github.com/juju/ansiterm/context.go
deleted file mode 100644
index e61a867ffff..00000000000
--- a/vendor/github.com/juju/ansiterm/context.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2016 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package ansiterm
-
-import (
- "fmt"
- "io"
-)
-
-// Context provides a way to specify both foreground and background colors
-// along with other styles and write text to a Writer with those colors and
-// styles.
-type Context struct {
- Foreground Color
- Background Color
- Styles []Style
-}
-
-// Foreground is a convenience function that creates a Context with the
-// specified color as the foreground color.
-func Foreground(color Color) *Context {
- return &Context{Foreground: color}
-}
-
-// Background is a convenience function that creates a Context with the
-// specified color as the background color.
-func Background(color Color) *Context {
- return &Context{Background: color}
-}
-
-// Styles is a convenience function that creates a Context with the
-// specified styles set.
-func Styles(styles ...Style) *Context {
- return &Context{Styles: styles}
-}
-
-// SetForeground sets the foreground to the specified color.
-func (c *Context) SetForeground(color Color) *Context {
- c.Foreground = color
- return c
-}
-
-// SetBackground sets the background to the specified color.
-func (c *Context) SetBackground(color Color) *Context {
- c.Background = color
- return c
-}
-
-// SetStyle replaces the styles with the new values.
-func (c *Context) SetStyle(styles ...Style) *Context {
- c.Styles = styles
- return c
-}
-
-type sgrWriter interface {
- io.Writer
- writeSGR(value sgr)
-}
-
-// Fprintf will set the sgr values of the writer to the specified
-// foreground, background and styles, then write the formatted string,
-// then reset the writer.
-func (c *Context) Fprintf(w sgrWriter, format string, args ...interface{}) {
- w.writeSGR(c)
- fmt.Fprintf(w, format, args...)
- w.writeSGR(reset)
-}
-
-// Fprint will set the sgr values of the writer to the specified foreground,
-// background and styles, then formats using the default formats for its
-// operands and writes to w. Spaces are added between operands when neither is
-// a string. It returns the number of bytes written and any write error
-// encountered.
-func (c *Context) Fprint(w sgrWriter, args ...interface{}) {
- w.writeSGR(c)
- fmt.Fprint(w, args...)
- w.writeSGR(reset)
-}
-
-func (c *Context) sgr() string {
- var values attributes
- if foreground := c.Foreground.foreground(); foreground != unknownAttribute {
- values = append(values, foreground)
- }
- if background := c.Background.background(); background != unknownAttribute {
- values = append(values, background)
- }
- for _, style := range c.Styles {
- if value := style.enable(); value != unknownAttribute {
- values = append(values, value)
- }
- }
- return values.sgr()
-}
diff --git a/vendor/github.com/juju/ansiterm/doc.go b/vendor/github.com/juju/ansiterm/doc.go
deleted file mode 100644
index 78270077956..00000000000
--- a/vendor/github.com/juju/ansiterm/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2016 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-// Package ansiterm provides a Writer that writes out the ANSI escape
-// codes for color and styles.
-package ansiterm
diff --git a/vendor/github.com/juju/ansiterm/style.go b/vendor/github.com/juju/ansiterm/style.go
deleted file mode 100644
index 0be42da56ce..00000000000
--- a/vendor/github.com/juju/ansiterm/style.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2016 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package ansiterm
-
-const (
- _ Style = iota
- Bold
- Faint
- Italic
- Underline
- Blink
- Reverse
- Strikethrough
- Conceal
-)
-
-type Style int
-
-func (s Style) String() string {
- switch s {
- case Bold:
- return "bold"
- case Faint:
- return "faint"
- case Italic:
- return "italic"
- case Underline:
- return "underline"
- case Blink:
- return "blink"
- case Reverse:
- return "reverse"
- case Strikethrough:
- return "strikethrough"
- case Conceal:
- return "conceal"
- default:
- return ""
- }
-}
-
-func (s Style) enable() attribute {
- switch s {
- case Bold:
- return 1
- case Faint:
- return 2
- case Italic:
- return 3
- case Underline:
- return 4
- case Blink:
- return 5
- case Reverse:
- return 7
- case Conceal:
- return 8
- case Strikethrough:
- return 9
- default:
- return unknownAttribute
- }
-}
-
-func (s Style) disable() attribute {
- value := s.enable()
- if value != unknownAttribute {
- return value + 20
- }
- return value
-}
diff --git a/vendor/github.com/juju/ansiterm/tabwriter.go b/vendor/github.com/juju/ansiterm/tabwriter.go
deleted file mode 100644
index 1ff6faaaf24..00000000000
--- a/vendor/github.com/juju/ansiterm/tabwriter.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2016 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package ansiterm
-
-import (
- "io"
-
- "github.com/juju/ansiterm/tabwriter"
-)
-
-// NewTabWriter returns a writer that is able to set colors and styels.
-// The ansi escape codes are stripped for width calculations.
-func NewTabWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter {
- return new(TabWriter).Init(output, minwidth, tabwidth, padding, padchar, flags)
-}
-
-// TabWriter is a filter that inserts padding around tab-delimited
-// columns in its input to align them in the output.
-//
-// It also setting of colors and styles over and above the standard
-// tabwriter package.
-type TabWriter struct {
- Writer
- tw tabwriter.Writer
-}
-
-// Flush should be called after the last call to Write to ensure
-// that any data buffered in the Writer is written to output. Any
-// incomplete escape sequence at the end is considered
-// complete for formatting purposes.
-//
-func (t *TabWriter) Flush() error {
- return t.tw.Flush()
-}
-
-// SetColumnAlignRight will mark a particular column as align right.
-// This is reset on the next flush.
-func (t *TabWriter) SetColumnAlignRight(column int) {
- t.tw.SetColumnAlignRight(column)
-}
-
-// A Writer must be initialized with a call to Init. The first parameter (output)
-// specifies the filter output. The remaining parameters control the formatting:
-//
-// minwidth minimal cell width including any padding
-// tabwidth width of tab characters (equivalent number of spaces)
-// padding padding added to a cell before computing its width
-// padchar ASCII char used for padding
-// if padchar == '\t', the Writer will assume that the
-// width of a '\t' in the formatted output is tabwidth,
-// and cells are left-aligned independent of align_left
-// (for correct-looking results, tabwidth must correspond
-// to the tab width in the viewer displaying the result)
-// flags formatting control
-//
-func (t *TabWriter) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter {
- writer, colorCapable := colorEnabledWriter(output)
- t.Writer = Writer{
- Writer: t.tw.Init(writer, minwidth, tabwidth, padding, padchar, flags),
- noColor: !colorCapable,
- }
- return t
-}
diff --git a/vendor/github.com/juju/ansiterm/tabwriter/LICENSE b/vendor/github.com/juju/ansiterm/tabwriter/LICENSE
deleted file mode 100644
index 74487567632..00000000000
--- a/vendor/github.com/juju/ansiterm/tabwriter/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2012 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go b/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go
deleted file mode 100644
index 98949d036a3..00000000000
--- a/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go
+++ /dev/null
@@ -1,587 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file is mostly a copy of the go standard library text/tabwriter. With
-// the additional stripping of ansi control characters for width calculations.
-
-// Package tabwriter implements a write filter (tabwriter.Writer) that
-// translates tabbed columns in input into properly aligned text.
-//
-// The package is using the Elastic Tabstops algorithm described at
-// http://nickgravgaard.com/elastictabstops/index.html.
-//
-package tabwriter
-
-import (
- "bytes"
- "io"
- "unicode/utf8"
-
- "github.com/lunixbochs/vtclean"
-)
-
-// ----------------------------------------------------------------------------
-// Filter implementation
-
-// A cell represents a segment of text terminated by tabs or line breaks.
-// The text itself is stored in a separate buffer; cell only describes the
-// segment's size in bytes, its width in runes, and whether it's an htab
-// ('\t') terminated cell.
-//
-type cell struct {
- size int // cell size in bytes
- width int // cell width in runes
- htab bool // true if the cell is terminated by an htab ('\t')
-}
-
-// A Writer is a filter that inserts padding around tab-delimited
-// columns in its input to align them in the output.
-//
-// The Writer treats incoming bytes as UTF-8 encoded text consisting
-// of cells terminated by (horizontal or vertical) tabs or line
-// breaks (newline or formfeed characters). Cells in adjacent lines
-// constitute a column. The Writer inserts padding as needed to
-// make all cells in a column have the same width, effectively
-// aligning the columns. It assumes that all characters have the
-// same width except for tabs for which a tabwidth must be specified.
-// Note that cells are tab-terminated, not tab-separated: trailing
-// non-tab text at the end of a line does not form a column cell.
-//
-// The Writer assumes that all Unicode code points have the same width;
-// this may not be true in some fonts.
-//
-// If DiscardEmptyColumns is set, empty columns that are terminated
-// entirely by vertical (or "soft") tabs are discarded. Columns
-// terminated by horizontal (or "hard") tabs are not affected by
-// this flag.
-//
-// If a Writer is configured to filter HTML, HTML tags and entities
-// are passed through. The widths of tags and entities are
-// assumed to be zero (tags) and one (entities) for formatting purposes.
-//
-// A segment of text may be escaped by bracketing it with Escape
-// characters. The tabwriter passes escaped text segments through
-// unchanged. In particular, it does not interpret any tabs or line
-// breaks within the segment. If the StripEscape flag is set, the
-// Escape characters are stripped from the output; otherwise they
-// are passed through as well. For the purpose of formatting, the
-// width of the escaped text is always computed excluding the Escape
-// characters.
-//
-// The formfeed character ('\f') acts like a newline but it also
-// terminates all columns in the current line (effectively calling
-// Flush). Cells in the next line start new columns. Unless found
-// inside an HTML tag or inside an escaped text segment, formfeed
-// characters appear as newlines in the output.
-//
-// The Writer must buffer input internally, because proper spacing
-// of one line may depend on the cells in future lines. Clients must
-// call Flush when done calling Write.
-//
-type Writer struct {
- // configuration
- output io.Writer
- minwidth int
- tabwidth int
- padding int
- padbytes [8]byte
- flags uint
-
- // current state
- buf bytes.Buffer // collected text excluding tabs or line breaks
- pos int // buffer position up to which cell.width of incomplete cell has been computed
- cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
- endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
- lines [][]cell // list of lines; each line is a list of cells
- widths []int // list of column widths in runes - re-used during formatting
- alignment map[int]uint // column alignment
-}
-
-func (b *Writer) addLine() { b.lines = append(b.lines, []cell{}) }
-
-// Reset the current state.
-func (b *Writer) reset() {
- b.buf.Reset()
- b.pos = 0
- b.cell = cell{}
- b.endChar = 0
- b.lines = b.lines[0:0]
- b.widths = b.widths[0:0]
- b.alignment = make(map[int]uint)
- b.addLine()
-}
-
-// Internal representation (current state):
-//
-// - all text written is appended to buf; tabs and line breaks are stripped away
-// - at any given time there is a (possibly empty) incomplete cell at the end
-// (the cell starts after a tab or line break)
-// - cell.size is the number of bytes belonging to the cell so far
-// - cell.width is text width in runes of that cell from the start of the cell to
-// position pos; html tags and entities are excluded from this width if html
-// filtering is enabled
-// - the sizes and widths of processed text are kept in the lines list
-// which contains a list of cells for each line
-// - the widths list is a temporary list with current widths used during
-// formatting; it is kept in Writer because it's re-used
-//
-// |<---------- size ---------->|
-// | |
-// |<- width ->|<- ignored ->| |
-// | | | |
-// [---processed---tab------------......]
-// ^ ^ ^
-// | | |
-// buf start of incomplete cell pos
-
-// Formatting can be controlled with these flags.
-const (
- // Ignore html tags and treat entities (starting with '&'
- // and ending in ';') as single characters (width = 1).
- FilterHTML uint = 1 << iota
-
- // Strip Escape characters bracketing escaped text segments
- // instead of passing them through unchanged with the text.
- StripEscape
-
- // Force right-alignment of cell content.
- // Default is left-alignment.
- AlignRight
-
- // Handle empty columns as if they were not present in
- // the input in the first place.
- DiscardEmptyColumns
-
- // Always use tabs for indentation columns (i.e., padding of
- // leading empty cells on the left) independent of padchar.
- TabIndent
-
- // Print a vertical bar ('|') between columns (after formatting).
- // Discarded columns appear as zero-width columns ("||").
- Debug
-)
-
-// A Writer must be initialized with a call to Init. The first parameter (output)
-// specifies the filter output. The remaining parameters control the formatting:
-//
-// minwidth minimal cell width including any padding
-// tabwidth width of tab characters (equivalent number of spaces)
-// padding padding added to a cell before computing its width
-// padchar ASCII char used for padding
-// if padchar == '\t', the Writer will assume that the
-// width of a '\t' in the formatted output is tabwidth,
-// and cells are left-aligned independent of align_left
-// (for correct-looking results, tabwidth must correspond
-// to the tab width in the viewer displaying the result)
-// flags formatting control
-//
-func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
- if minwidth < 0 || tabwidth < 0 || padding < 0 {
- panic("negative minwidth, tabwidth, or padding")
- }
- b.output = output
- b.minwidth = minwidth
- b.tabwidth = tabwidth
- b.padding = padding
- for i := range b.padbytes {
- b.padbytes[i] = padchar
- }
- if padchar == '\t' {
- // tab padding enforces left-alignment
- flags &^= AlignRight
- }
- b.flags = flags
-
- b.reset()
-
- return b
-}
-
-// debugging support (keep code around)
-func (b *Writer) dump() {
- pos := 0
- for i, line := range b.lines {
- print("(", i, ") ")
- for _, c := range line {
- print("[", string(b.buf.Bytes()[pos:pos+c.size]), "]")
- pos += c.size
- }
- print("\n")
- }
- print("\n")
-}
-
-// local error wrapper so we can distinguish errors we want to return
-// as errors from genuine panics (which we don't want to return as errors)
-type osError struct {
- err error
-}
-
-func (b *Writer) write0(buf []byte) {
- n, err := b.output.Write(buf)
- if n != len(buf) && err == nil {
- err = io.ErrShortWrite
- }
- if err != nil {
- panic(osError{err})
- }
-}
-
-func (b *Writer) writeN(src []byte, n int) {
- for n > len(src) {
- b.write0(src)
- n -= len(src)
- }
- b.write0(src[0:n])
-}
-
-var (
- newline = []byte{'\n'}
- tabs = []byte("\t\t\t\t\t\t\t\t")
-)
-
-func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
- if b.padbytes[0] == '\t' || useTabs {
- // padding is done with tabs
- if b.tabwidth == 0 {
- return // tabs have no width - can't do any padding
- }
- // make cellw the smallest multiple of b.tabwidth
- cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
- n := cellw - textw // amount of padding
- if n < 0 {
- panic("internal error")
- }
- b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
- return
- }
-
- // padding is done with non-tab characters
- b.writeN(b.padbytes[0:], cellw-textw)
-}
-
-var vbar = []byte{'|'}
-
-func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
- pos = pos0
- for i := line0; i < line1; i++ {
- line := b.lines[i]
-
- // if TabIndent is set, use tabs to pad leading empty cells
- useTabs := b.flags&TabIndent != 0
-
- for j, c := range line {
- if j > 0 && b.flags&Debug != 0 {
- // indicate column break
- b.write0(vbar)
- }
-
- if c.size == 0 {
- // empty cell
- if j < len(b.widths) {
- b.writePadding(c.width, b.widths[j], useTabs)
- }
- } else {
- // non-empty cell
- useTabs = false
- alignColumnRight := b.alignment[j] == AlignRight
- if (b.flags&AlignRight == 0) && !alignColumnRight { // align left
- b.write0(b.buf.Bytes()[pos : pos+c.size])
- pos += c.size
- if j < len(b.widths) {
- b.writePadding(c.width, b.widths[j], false)
- }
- } else if alignColumnRight && j < len(b.widths) {
- // just this column
- internalSize := b.widths[j] - b.padding
- if j < len(b.widths) {
- b.writePadding(c.width, internalSize, false)
- }
- b.write0(b.buf.Bytes()[pos : pos+c.size])
- if b.padding > 0 {
- b.writePadding(0, b.padding, false)
- }
- pos += c.size
- } else { // align right
- if j < len(b.widths) {
- b.writePadding(c.width, b.widths[j], false)
- }
- b.write0(b.buf.Bytes()[pos : pos+c.size])
- pos += c.size
- }
- }
- }
-
- if i+1 == len(b.lines) {
- // last buffered line - we don't have a newline, so just write
- // any outstanding buffered data
- b.write0(b.buf.Bytes()[pos : pos+b.cell.size])
- pos += b.cell.size
- } else {
- // not the last line - write newline
- b.write0(newline)
- }
- }
- return
-}
-
-// Format the text between line0 and line1 (excluding line1); pos
-// is the buffer position corresponding to the beginning of line0.
-// Returns the buffer position corresponding to the beginning of
-// line1 and an error, if any.
-//
-func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
- pos = pos0
- column := len(b.widths)
- for this := line0; this < line1; this++ {
- line := b.lines[this]
-
- if column < len(line)-1 {
- // cell exists in this column => this line
- // has more cells than the previous line
- // (the last cell per line is ignored because cells are
- // tab-terminated; the last cell per line describes the
- // text before the newline/formfeed and does not belong
- // to a column)
-
- // print unprinted lines until beginning of block
- pos = b.writeLines(pos, line0, this)
- line0 = this
-
- // column block begin
- width := b.minwidth // minimal column width
- discardable := true // true if all cells in this column are empty and "soft"
- for ; this < line1; this++ {
- line = b.lines[this]
- if column < len(line)-1 {
- // cell exists in this column
- c := line[column]
- // update width
- if w := c.width + b.padding; w > width {
- width = w
- }
- // update discardable
- if c.width > 0 || c.htab {
- discardable = false
- }
- } else {
- break
- }
- }
- // column block end
-
- // discard empty columns if necessary
- if discardable && b.flags&DiscardEmptyColumns != 0 {
- width = 0
- }
-
- // format and print all columns to the right of this column
- // (we know the widths of this column and all columns to the left)
- b.widths = append(b.widths, width) // push width
- pos = b.format(pos, line0, this)
- b.widths = b.widths[0 : len(b.widths)-1] // pop width
- line0 = this
- }
- }
-
- // print unprinted lines until end
- return b.writeLines(pos, line0, line1)
-}
-
-// Append text to current cell.
-func (b *Writer) append(text []byte) {
- b.buf.Write(text)
- b.cell.size += len(text)
-}
-
-// Update the cell width.
-func (b *Writer) updateWidth() {
- // ---- Changes here -----
- newChars := b.buf.Bytes()[b.pos:b.buf.Len()]
- cleaned := vtclean.Clean(string(newChars), false) // false to strip colors
- b.cell.width += utf8.RuneCount([]byte(cleaned))
- // --- end of changes ----
- b.pos = b.buf.Len()
-}
-
-// To escape a text segment, bracket it with Escape characters.
-// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
-// does not terminate a cell and constitutes a single character of
-// width one for formatting purposes.
-//
-// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
-//
-const Escape = '\xff'
-
-// Start escaped mode.
-func (b *Writer) startEscape(ch byte) {
- switch ch {
- case Escape:
- b.endChar = Escape
- case '<':
- b.endChar = '>'
- case '&':
- b.endChar = ';'
- }
-}
-
-// Terminate escaped mode. If the escaped text was an HTML tag, its width
-// is assumed to be zero for formatting purposes; if it was an HTML entity,
-// its width is assumed to be one. In all other cases, the width is the
-// unicode width of the text.
-//
-func (b *Writer) endEscape() {
- switch b.endChar {
- case Escape:
- b.updateWidth()
- if b.flags&StripEscape == 0 {
- b.cell.width -= 2 // don't count the Escape chars
- }
- case '>': // tag of zero width
- case ';':
- b.cell.width++ // entity, count as one rune
- }
- b.pos = b.buf.Len()
- b.endChar = 0
-}
-
-// Terminate the current cell by adding it to the list of cells of the
-// current line. Returns the number of cells in that line.
-//
-func (b *Writer) terminateCell(htab bool) int {
- b.cell.htab = htab
- line := &b.lines[len(b.lines)-1]
- *line = append(*line, b.cell)
- b.cell = cell{}
- return len(*line)
-}
-
-func handlePanic(err *error, op string) {
- if e := recover(); e != nil {
- if nerr, ok := e.(osError); ok {
- *err = nerr.err
- return
- }
- panic("tabwriter: panic during " + op)
- }
-}
-
-// Flush should be called after the last call to Write to ensure
-// that any data buffered in the Writer is written to output. Any
-// incomplete escape sequence at the end is considered
-// complete for formatting purposes.
-//
-func (b *Writer) Flush() (err error) {
- defer b.reset() // even in the presence of errors
- defer handlePanic(&err, "Flush")
-
- // add current cell if not empty
- if b.cell.size > 0 {
- if b.endChar != 0 {
- // inside escape - terminate it even if incomplete
- b.endEscape()
- }
- b.terminateCell(false)
- }
-
- // format contents of buffer
- b.format(0, 0, len(b.lines))
-
- return
-}
-
-var hbar = []byte("---\n")
-
-// SetColumnAlignRight will mark a particular column as align right.
-// This is reset on the next flush.
-func (b *Writer) SetColumnAlignRight(column int) {
- b.alignment[column] = AlignRight
-}
-
-// Write writes buf to the writer b.
-// The only errors returned are ones encountered
-// while writing to the underlying output stream.
-//
-func (b *Writer) Write(buf []byte) (n int, err error) {
- defer handlePanic(&err, "Write")
-
- // split text into cells
- n = 0
- for i, ch := range buf {
- if b.endChar == 0 {
- // outside escape
- switch ch {
- case '\t', '\v', '\n', '\f':
- // end of cell
- b.append(buf[n:i])
- b.updateWidth()
- n = i + 1 // ch consumed
- ncells := b.terminateCell(ch == '\t')
- if ch == '\n' || ch == '\f' {
- // terminate line
- b.addLine()
- if ch == '\f' || ncells == 1 {
- // A '\f' always forces a flush. Otherwise, if the previous
- // line has only one cell which does not have an impact on
- // the formatting of the following lines (the last cell per
- // line is ignored by format()), thus we can flush the
- // Writer contents.
- if err = b.Flush(); err != nil {
- return
- }
- if ch == '\f' && b.flags&Debug != 0 {
- // indicate section break
- b.write0(hbar)
- }
- }
- }
-
- case Escape:
- // start of escaped sequence
- b.append(buf[n:i])
- b.updateWidth()
- n = i
- if b.flags&StripEscape != 0 {
- n++ // strip Escape
- }
- b.startEscape(Escape)
-
- case '<', '&':
- // possibly an html tag/entity
- if b.flags&FilterHTML != 0 {
- // begin of tag/entity
- b.append(buf[n:i])
- b.updateWidth()
- n = i
- b.startEscape(ch)
- }
- }
-
- } else {
- // inside escape
- if ch == b.endChar {
- // end of tag/entity
- j := i + 1
- if ch == Escape && b.flags&StripEscape != 0 {
- j = i // strip Escape
- }
- b.append(buf[n:j])
- n = i + 1 // ch consumed
- b.endEscape()
- }
- }
- }
-
- // append leftover text
- b.append(buf[n:])
- n = len(buf)
- return
-}
-
-// NewWriter allocates and initializes a new tabwriter.Writer.
-// The parameters are the same as for the Init function.
-//
-func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
- return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
-}
diff --git a/vendor/github.com/juju/ansiterm/terminal.go b/vendor/github.com/juju/ansiterm/terminal.go
deleted file mode 100644
index 96fd11c5100..00000000000
--- a/vendor/github.com/juju/ansiterm/terminal.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2016 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package ansiterm
-
-import (
- "io"
- "os"
-
- "github.com/mattn/go-colorable"
- "github.com/mattn/go-isatty"
-)
-
-// colorEnabledWriter returns a writer that can handle the ansi color codes
-// and true if the writer passed in is a terminal capable of color. If the
-// TERM environment variable is set to "dumb", the terminal is not considered
-// color capable.
-func colorEnabledWriter(w io.Writer) (io.Writer, bool) {
- f, ok := w.(*os.File)
- if !ok {
- return w, false
- }
- // Check the TERM environment variable specifically
- // to check for "dumb" terminals.
- if os.Getenv("TERM") == "dumb" {
- return w, false
- }
- if !isatty.IsTerminal(f.Fd()) {
- return w, false
- }
- return colorable.NewColorable(f), true
-}
diff --git a/vendor/github.com/juju/ansiterm/writer.go b/vendor/github.com/juju/ansiterm/writer.go
deleted file mode 100644
index 32437bb27bc..00000000000
--- a/vendor/github.com/juju/ansiterm/writer.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2016 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package ansiterm
-
-import (
- "fmt"
- "io"
-)
-
-// Writer allows colors and styles to be specified. If the io.Writer
-// is not a terminal capable of color, all attempts to set colors or
-// styles are no-ops.
-type Writer struct {
- io.Writer
-
- noColor bool
-}
-
-// NewWriter returns a Writer that allows the caller to specify colors and
-// styles. If the io.Writer is not a terminal capable of color, all attempts
-// to set colors or styles are no-ops.
-func NewWriter(w io.Writer) *Writer {
- writer, colorCapable := colorEnabledWriter(w)
- return &Writer{
- Writer: writer,
- noColor: !colorCapable,
- }
-}
-
-// SetColorCapable forces the writer to either write the ANSI escape color
-// if capable is true, or to not write them if capable is false.
-func (w *Writer) SetColorCapable(capable bool) {
- w.noColor = !capable
-}
-
-// SetForeground sets the foreground color.
-func (w *Writer) SetForeground(c Color) {
- w.writeSGR(c.foreground())
-}
-
-// SetBackground sets the background color.
-func (w *Writer) SetBackground(c Color) {
- w.writeSGR(c.background())
-}
-
-// SetStyle sets the text style.
-func (w *Writer) SetStyle(s Style) {
- w.writeSGR(s.enable())
-}
-
-// ClearStyle clears the text style.
-func (w *Writer) ClearStyle(s Style) {
- w.writeSGR(s.disable())
-}
-
-// Reset returns the default foreground and background colors with no styles.
-func (w *Writer) Reset() {
- w.writeSGR(reset)
-}
-
-type sgr interface {
- // sgr returns the combined escape sequence for the Select Graphic Rendition.
- sgr() string
-}
-
-// writeSGR takes the appropriate integer SGR parameters
-// and writes out the ANIS escape code.
-func (w *Writer) writeSGR(value sgr) {
- if w.noColor {
- return
- }
- fmt.Fprint(w, value.sgr())
-}
diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore
index b35f8449bf2..d31b3781527 100644
--- a/vendor/github.com/klauspost/compress/.gitignore
+++ b/vendor/github.com/klauspost/compress/.gitignore
@@ -23,3 +23,10 @@ _testmain.go
*.test
*.prof
/s2/cmd/_s2sx/sfx-exe
+
+# Linux perf files
+perf.data
+perf.data.old
+
+# gdb history
+.gdb_history
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
index c9014ce1da2..0af08e65e68 100644
--- a/vendor/github.com/klauspost/compress/.goreleaser.yml
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -3,6 +3,7 @@
before:
hooks:
- ./gen.sh
+ - go install mvdan.cc/garble@latest
builds:
-
@@ -31,6 +32,7 @@ builds:
- mips64le
goarm:
- 7
+ gobinary: garble
-
id: "s2d"
binary: s2d
@@ -57,6 +59,7 @@ builds:
- mips64le
goarm:
- 7
+ gobinary: garble
-
id: "s2sx"
binary: s2sx
@@ -84,6 +87,7 @@ builds:
- mips64le
goarm:
- 7
+ gobinary: garble
archives:
-
diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE
index 6cd1e962761..87d55747778 100644
--- a/vendor/github.com/klauspost/compress/LICENSE
+++ b/vendor/github.com/klauspost/compress/LICENSE
@@ -290,3 +290,15 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-----------------
+
+Files: s2/cmd/internal/filepathx/*
+
+Copyright 2016 The filepathx Authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 235dc7cc68a..3c00c1af968 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,12 +17,147 @@ This package provides various compression algorithms.
# changelog
+* Sept 16, 2022 (v1.15.10)
+
+ * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
+ * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
+ * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
+ * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
+ * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
+ * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
+ * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
+ * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659
+
+* July 21, 2022 (v1.15.9)
+
+ * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
+ * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
+ * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
+
+* July 13, 2022 (v1.15.8)
+
+ * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641
+ * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638
+ * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636
+ * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637
+ * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634
+ * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640
+ * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639
+
+* June 29, 2022 (v1.15.7)
+
+ * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633
+ * zip: Merge upstream https://github.com/klauspost/compress/pull/631
+ * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624
+ * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598
+ * flate: Faster histograms https://github.com/klauspost/compress/pull/620
+ * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622
+
+* June 3, 2022 (v1.15.6)
+ * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613
+ * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611
+ * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605
+ * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606
+ * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608
+ * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612
+ * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609
+ * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607
+ * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614
+ * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610
+
+* May 25, 2022 (v1.15.5)
+ * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602
+ * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601
+ * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596
+ * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588
+ * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592
+ * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
+ * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
+ * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
+ * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
+
+
+* May 11, 2022 (v1.15.4)
+ * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577)
+ * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581)
+ * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583)
+ * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580)
+
+* May 5, 2022 (v1.15.3)
+ * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
+ * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
+
+* Apr 26, 2022 (v1.15.2)
+ * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537)
+ * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539)
+ * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555)
+ * Minimum version is Go 1.16, added CI test on 1.18.
+
+* Mar 11, 2022 (v1.15.1)
+ * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
+ * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)
+ * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520)
+ * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521)
+ * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
+
+* Mar 3, 2022 (v1.15.0)
+ * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
+ * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
+ * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
+ * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
+ * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
+ * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
+
+Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
+
+Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
+
+While the release has been extensively tested, it is recommended to testing when upgrading.
+
+
+ See changes to v1.14.x
+
+* Feb 22, 2022 (v1.14.4)
+ * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
+ * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
+ * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
+ * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
+
+* Feb 17, 2022 (v1.14.3)
+ * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
+ * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483)
+ * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486)
+
+* Jan 25, 2022 (v1.14.2)
+ * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
+ * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
+ * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470)
+ * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472)
+ * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473)
+ * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475)
+
+* Jan 11, 2022 (v1.14.1)
+ * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
+ * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)
+ * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
+ * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
+ * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
+
+
+
+ See changes to v1.13.x
+
+* Aug 30, 2021 (v1.13.5)
+ * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
+ * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
+ * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426)
+ * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421)
+
* Aug 12, 2021 (v1.13.4)
* Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy).
* zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415)
* Aug 3, 2021 (v1.13.3)
-
* zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404)
* zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411)
* gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406)
@@ -31,7 +166,6 @@ This package provides various compression algorithms.
* zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410)
* Jun 14, 2021 (v1.13.1)
-
* s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396)
* zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394)
* gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389)
@@ -41,7 +175,12 @@ This package provides various compression algorithms.
* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
+
+
+
+ See changes to v1.12.x
+
* May 25, 2021 (v1.12.3)
* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
@@ -63,7 +202,11 @@ This package provides various compression algorithms.
* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
+
+
+ See changes to v1.11.x
+
* Mar 26, 2021 (v1.11.13)
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
* zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336)
@@ -118,9 +261,10 @@ This package provides various compression algorithms.
* zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281)
* zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282)
* inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274)
+
- See changes prior to v1.11.0
+ See changes to v1.10.x
* July 8, 2020 (v1.10.11)
* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
@@ -282,11 +426,6 @@ This package provides various compression algorithms.
# deflate usage
-* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
-* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
-* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
-* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
-
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
| old import | new import | Documentation
@@ -308,6 +447,8 @@ Memory usage is typically 1MB for a Writer. stdlib is in the same range.
If you expect to have a lot of concurrently allocated Writers consider using
the stateless compress described below.
+For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
+
# Stateless compression
This package offers stateless compression as a special option for gzip/deflate.
@@ -424,6 +565,13 @@ For more information see my blog post on [Fast Linear Time Compression](http://b
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
+# Other packages
+
+Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code):
+
+* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
+* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
+* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
# license
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
index 5283ac5a538..07265ddede8 100644
--- a/vendor/github.com/klauspost/compress/flate/deflate.go
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -6,6 +6,7 @@
package flate
import (
+ "encoding/binary"
"fmt"
"io"
"math"
@@ -37,15 +38,17 @@ const (
maxMatchLength = 258 // The longest match for the compressor
minOffsetSize = 1 // The shortest offset that makes any sense
- // The maximum number of tokens we put into a single flat block, just too
- // stop things from getting too large.
- maxFlateBlockTokens = 1 << 14
+ // The maximum number of tokens we will encode at the time.
+ // Smaller sizes usually creates less optimal blocks.
+ // Bigger can make context switching slow.
+ // We use this for levels 7-9, so we make it big.
+ maxFlateBlockTokens = 1 << 15
maxStoreBlockSize = 65535
hashBits = 17 // After 17 performance degrades
hashSize = 1 << hashBits
hashMask = (1 << hashBits) - 1
hashShift = (hashBits + minMatchLength - 1) / minMatchLength
- maxHashOffset = 1 << 24
+ maxHashOffset = 1 << 28
skipNever = math.MaxInt32
@@ -70,9 +73,9 @@ var levels = []compressionLevel{
{0, 0, 0, 0, 0, 6},
// Levels 7-9 use increasingly more lazy matching
// and increasingly stringent conditions for "good enough".
- {8, 8, 24, 16, skipNever, 7},
- {10, 16, 24, 64, skipNever, 8},
- {32, 258, 258, 4096, skipNever, 9},
+ {8, 12, 16, 24, skipNever, 7},
+ {16, 30, 40, 64, skipNever, 8},
+ {32, 258, 258, 1024, skipNever, 9},
}
// advancedState contains state for the advanced levels, with bigger hash tables, etc.
@@ -81,28 +84,29 @@ type advancedState struct {
length int
offset int
maxInsertIndex int
+ chainHead int
+ hashOffset int
+
+ ii uint16 // position of last match, intended to overflow to reset.
+
+ // input window: unprocessed data is window[index:windowEnd]
+ index int
+ estBitsPerByte int
+ hashMatch [maxMatchLength + minMatchLength]uint32
// Input hash chains
// hashHead[hashValue] contains the largest inputIndex with the specified hash value
// If hashHead[hashValue] is within the current window, then
// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
// with the same hash value.
- chainHead int
- hashHead [hashSize]uint32
- hashPrev [windowSize]uint32
- hashOffset int
-
- // input window: unprocessed data is window[index:windowEnd]
- index int
- hashMatch [maxMatchLength + minMatchLength]uint32
-
- hash uint32
- ii uint16 // position of last match, intended to overflow to reset.
+ hashHead [hashSize]uint32
+ hashPrev [windowSize]uint32
}
type compressor struct {
compressionLevel
+ h *huffmanEncoder
w *huffmanBitWriter
// compression algorithm
@@ -127,7 +131,8 @@ func (d *compressor) fillDeflate(b []byte) int {
s := d.state
if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
// shift the window by windowSize
- copy(d.window[:], d.window[windowSize:2*windowSize])
+ //copy(d.window[:], d.window[windowSize:2*windowSize])
+ *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:])
s.index -= windowSize
d.windowEnd -= windowSize
if d.blockStart >= windowSize {
@@ -170,7 +175,8 @@ func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
window = d.window[d.blockStart:index]
}
d.blockStart = index
- d.w.writeBlock(tok, eof, window)
+ //d.w.writeBlock(tok, eof, window)
+ d.w.writeBlockDynamic(tok, eof, window, d.sync)
return d.w.err
}
return nil
@@ -253,7 +259,6 @@ func (d *compressor) fillWindow(b []byte) {
// Set the head of the hash chain to us.
s.hashHead[newH] = uint32(di + s.hashOffset)
}
- s.hash = newH
}
// Update window information.
d.windowEnd += n
@@ -263,7 +268,7 @@ func (d *compressor) fillWindow(b []byte) {
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
-func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
+func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) {
minMatchLook := maxMatchLength
if lookahead < minMatchLook {
minMatchLook = lookahead
@@ -279,36 +284,75 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead
// If we've got a match that's good enough, only look in 1/4 the chain.
tries := d.chain
- length = prevLength
- if length >= d.good {
- tries >>= 2
- }
+ length = minMatchLength - 1
wEnd := win[pos+length]
wPos := win[pos:]
minIndex := pos - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+ offset = 0
+
+ cGain := 0
+ if d.chain < 100 {
+ for i := prevHead; tries > 0; tries-- {
+ if wEnd == win[i+length] {
+ n := matchLen(win[i:i+minMatchLook], wPos)
+ if n > length {
+ length = n
+ offset = pos - i
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
+ }
+ }
+ if i <= minIndex {
+ // hashPrev[i & windowMask] has already been overwritten, so stop now.
+ break
+ }
+ i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
+ if i < minIndex {
+ break
+ }
+ }
+ return
+ }
+ // Some like it higher (CSV), some like it lower (JSON)
+ const baseCost = 6
+ // Base is 4 bytes at with an additional cost.
+ // Matches must be better than this.
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLen(win[i:i+minMatchLook], wPos)
-
- if n > length && (n > minMatchLength || pos-i <= 4096) {
- length = n
- offset = pos - i
- ok = true
- if n >= nice {
- // The match is good enough that we don't try to find a better one.
- break
+ if n > length {
+ // Calculate gain. Estimate
+ newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]])
+
+ //fmt.Println(n, "gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]))
+ if newGain > cGain {
+ length = n
+ offset = pos - i
+ cGain = newGain
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
}
- wEnd = win[pos+n]
}
}
- if i == minIndex {
+ if i <= minIndex {
// hashPrev[i & windowMask] has already been overwritten, so stop now.
break
}
i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
- if i < minIndex || i < 0 {
+ if i < minIndex {
break
}
}
@@ -327,8 +371,13 @@ func (d *compressor) writeStoredBlock(buf []byte) error {
// of the supplied slice.
// The caller must ensure that len(b) >= 4.
func hash4(b []byte) uint32 {
- b = b[:4]
- return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits)
+ return hash4u(binary.LittleEndian.Uint32(b), hashBits)
+}
+
+// hash4 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4u(u uint32, h uint8) uint32 {
+ return (u * prime4bytes) >> (32 - h)
}
// bulkHash4 will compute hashes using the same
@@ -337,11 +386,12 @@ func bulkHash4(b []byte, dst []uint32) {
if len(b) < 4 {
return
}
- hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+ hb := binary.LittleEndian.Uint32(b)
+
dst[0] = hash4u(hb, hashBits)
end := len(b) - 4 + 1
for i := 1; i < end; i++ {
- hb = (hb << 8) | uint32(b[i+3])
+ hb = (hb >> 8) | uint32(b[i+3])<<24
dst[i] = hash4u(hb, hashBits)
}
}
@@ -358,7 +408,6 @@ func (d *compressor) initDeflate() {
s.hashOffset = 1
s.length = minMatchLength - 1
s.offset = 0
- s.hash = 0
s.chainHead = -1
}
@@ -374,11 +423,19 @@ func (d *compressor) deflateLazy() {
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
return
}
+ if d.windowEnd != s.index && d.chain > 100 {
+ // Get literal huffman coder.
+ if d.h == nil {
+ d.h = newHuffmanEncoder(maxFlateBlockTokens)
+ }
+ var tmp [256]uint16
+ for _, v := range d.window[s.index:d.windowEnd] {
+ tmp[v]++
+ }
+ d.h.generate(tmp[:], 15)
+ }
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
- if s.index < s.maxInsertIndex {
- s.hash = hash4(d.window[s.index : s.index+minMatchLength])
- }
for {
if sanity && s.index > d.windowEnd {
@@ -410,11 +467,11 @@ func (d *compressor) deflateLazy() {
}
if s.index < s.maxInsertIndex {
// Update the hash
- s.hash = hash4(d.window[s.index : s.index+minMatchLength])
- ch := s.hashHead[s.hash&hashMask]
+ hash := hash4(d.window[s.index:])
+ ch := s.hashHead[hash]
s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch
- s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset)
+ s.hashHead[hash] = uint32(s.index + s.hashOffset)
}
prevLength := s.length
prevOffset := s.offset
@@ -426,12 +483,37 @@ func (d *compressor) deflateLazy() {
}
if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
- if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
+ if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok {
s.length = newLength
s.offset = newOffset
}
}
+
if prevLength >= minMatchLength && s.length <= prevLength {
+ // Check for better match at end...
+ //
+ // checkOff must be >=2 since we otherwise risk checking s.index
+ // Offset of 2 seems to yield best results.
+ const checkOff = 2
+ prevIndex := s.index - 1
+ if prevIndex+prevLength+checkOff < s.maxInsertIndex {
+ end := lookahead
+ if lookahead > maxMatchLength {
+ end = maxMatchLength
+ }
+ end += prevIndex
+ idx := prevIndex + prevLength - (4 - checkOff)
+ h := hash4(d.window[idx:])
+ ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + (4 - checkOff)
+ if ch2 > minIndex {
+ length := matchLen(d.window[prevIndex:end], d.window[ch2:])
+ // It seems like a pure length metric is best.
+ if length > prevLength {
+ prevLength = length
+ prevOffset = prevIndex - ch2
+ }
+ }
+ }
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
@@ -466,7 +548,6 @@ func (d *compressor) deflateLazy() {
// Set the head of the hash chain to us.
s.hashHead[newH] = uint32(di + s.hashOffset)
}
- s.hash = newH
}
s.index = newIndex
@@ -479,6 +560,7 @@ func (d *compressor) deflateLazy() {
}
d.tokens.Reset()
}
+ s.ii = 0
} else {
// Reset, if we got a match this run.
if s.length >= minMatchLength {
@@ -498,13 +580,12 @@ func (d *compressor) deflateLazy() {
// If we have a long run of no matches, skip additional bytes
// Resets when s.ii overflows after 64KB.
- if s.ii > 31 {
- n := int(s.ii >> 5)
+ if n := int(s.ii) - d.chain; n > 0 {
+ n = 1 + int(n>>6)
for j := 0; j < n; j++ {
if s.index >= d.windowEnd-1 {
break
}
-
d.tokens.AddLiteral(d.window[s.index-1])
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
@@ -512,6 +593,14 @@ func (d *compressor) deflateLazy() {
}
d.tokens.Reset()
}
+ // Index...
+ if s.index < s.maxInsertIndex {
+ h := hash4(d.window[s.index:])
+ ch := s.hashHead[h]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[h] = uint32(s.index + s.hashOffset)
+ }
s.index++
}
// Flush last byte
@@ -611,7 +700,9 @@ func (d *compressor) write(b []byte) (n int, err error) {
}
n = len(b)
for len(b) > 0 {
- d.step(d)
+ if d.windowEnd == len(d.window) || d.sync {
+ d.step(d)
+ }
b = b[d.fill(d, b):]
if d.err != nil {
return 0, d.err
@@ -652,13 +743,13 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
level = 5
fallthrough
case level >= 1 && level <= 6:
- d.w.logNewTablePenalty = 8
+ d.w.logNewTablePenalty = 7
d.fast = newFastEnc(level)
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillBlock
d.step = (*compressor).storeFast
case 7 <= level && level <= 9:
- d.w.logNewTablePenalty = 10
+ d.w.logNewTablePenalty = 8
d.state = &advancedState{}
d.compressionLevel = levels[level]
d.initDeflate()
@@ -702,7 +793,6 @@ func (d *compressor) reset(w io.Writer) {
d.tokens.Reset()
s.length = minMatchLength - 1
s.offset = 0
- s.hash = 0
s.ii = 0
s.maxInsertIndex = 0
}
diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
index 71c75a065ea..bb36351a5af 100644
--- a/vendor/github.com/klauspost/compress/flate/dict_decoder.go
+++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
@@ -7,19 +7,19 @@ package flate
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
// LZ77 decompresses data through sequences of two forms of commands:
//
-// * Literal insertions: Runs of one or more symbols are inserted into the data
-// stream as is. This is accomplished through the writeByte method for a
-// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
-// Any valid stream must start with a literal insertion if no preset dictionary
-// is used.
+// - Literal insertions: Runs of one or more symbols are inserted into the data
+// stream as is. This is accomplished through the writeByte method for a
+// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
+// Any valid stream must start with a literal insertion if no preset dictionary
+// is used.
//
-// * Backward copies: Runs of one or more symbols are copied from previously
-// emitted data. Backward copies come as the tuple (dist, length) where dist
-// determines how far back in the stream to copy from and length determines how
-// many bytes to copy. Note that it is valid for the length to be greater than
-// the distance. Since LZ77 uses forward copies, that situation is used to
-// perform a form of run-length encoding on repeated runs of symbols.
-// The writeCopy and tryWriteCopy are used to implement this command.
+// - Backward copies: Runs of one or more symbols are copied from previously
+// emitted data. Backward copies come as the tuple (dist, length) where dist
+// determines how far back in the stream to copy from and length determines how
+// many bytes to copy. Note that it is valid for the length to be greater than
+// the distance. Since LZ77 uses forward copies, that situation is used to
+// perform a form of run-length encoding on repeated runs of symbols.
+// The writeCopy and tryWriteCopy are used to implement this command.
//
// For performance reasons, this implementation performs little to no sanity
// checks about the arguments. As such, the invariants documented for each
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
index 347ac2c902e..24caf5f70b0 100644
--- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -58,17 +58,6 @@ const (
prime8bytes = 0xcf1bbcdcb7a56463
)
-func load32(b []byte, i int) uint32 {
- // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
- b = b[i:]
- b = b[:4]
- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-}
-
-func load64(b []byte, i int) uint64 {
- return binary.LittleEndian.Uint64(b[i:])
-}
-
func load3232(b []byte, i int32) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
@@ -77,10 +66,6 @@ func load6432(b []byte, i int32) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
-func hash(u uint32) uint32 {
- return (u * 0x1e35a7bd) >> tableShift
-}
-
type tableEntry struct {
offset int32
}
@@ -104,7 +89,8 @@ func (e *fastGen) addBlock(src []byte) int32 {
}
// Move down
offset := int32(len(e.hist)) - maxMatchOffset
- copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+ // copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+ *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:])
e.cur += offset
e.hist = e.hist[:maxMatchOffset]
}
@@ -114,39 +100,36 @@ func (e *fastGen) addBlock(src []byte) int32 {
return s
}
-// hash4 returns the hash of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <32.
-func hash4u(u uint32, h uint8) uint32 {
- return (u * prime4bytes) >> ((32 - h) & reg8SizeMask32)
-}
-
type tableEntryPrev struct {
Cur tableEntry
Prev tableEntry
}
-// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <32.
-func hash4x64(u uint64, h uint8) uint32 {
- return (uint32(u) * prime4bytes) >> ((32 - h) & reg8SizeMask32)
-}
-
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash7(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
}
-// hash8 returns the hash of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <64.
-func hash8(u uint64, h uint8) uint32 {
- return uint32((u * prime8bytes) >> ((64 - h) & reg8SizeMask64))
-}
-
-// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <64.
-func hash6(u uint64, h uint8) uint32 {
- return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & reg8SizeMask64))
+// hashLen returns a hash of the lowest mls bytes of with length output bits.
+// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
+// length should always be < 32.
+// Preferably length and mls should be a constant for inlining.
+func hashLen(u uint64, length, mls uint8) uint32 {
+ switch mls {
+ case 3:
+ return (uint32(u<<8) * prime3bytes) >> (32 - length)
+ case 5:
+ return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
+ case 6:
+ return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
+ case 7:
+ return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
+ case 8:
+ return uint32((u * prime8bytes) >> (64 - length))
+ default:
+ return (uint32(u) * prime4bytes) >> (32 - length)
+ }
}
// matchlen will return the match length between offsets and t in src.
@@ -179,7 +162,7 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
// matchlenLong will return the match length between offsets and t in src.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
- if debugDecode {
+ if debugDeflate {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
@@ -213,26 +196,15 @@ func (e *fastGen) Reset() {
// matchLen returns the maximum length.
// 'a' must be the shortest of the two.
func matchLen(a, b []byte) int {
- b = b[:len(a)]
var checked int
- if len(a) >= 4 {
- // Try 4 bytes first
- if diff := binary.LittleEndian.Uint32(a) ^ binary.LittleEndian.Uint32(b); diff != 0 {
- return bits.TrailingZeros32(diff) >> 3
- }
- // Switch to 8 byte matching.
- checked = 4
- a = a[4:]
- b = b[4:]
- for len(a) >= 8 {
- b = b[:len(a)]
- if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
- return checked + (bits.TrailingZeros64(diff) >> 3)
- }
- checked += 8
- a = a[8:]
- b = b[8:]
+
+ for len(a) >= 8 {
+ if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
+ return checked + (bits.TrailingZeros64(diff) >> 3)
}
+ checked += 8
+ a = a[8:]
+ b = b[8:]
}
b = b[:len(a)]
for i := range a {
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index 3ad5e980724..89a5dd89f98 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -8,6 +8,7 @@ import (
"encoding/binary"
"fmt"
"io"
+ "math"
)
const (
@@ -24,6 +25,10 @@ const (
codegenCodeCount = 19
badCode = 255
+ // maxPredefinedTokens is the maximum number of tokens
+ // where we check if fixed size is smaller.
+ maxPredefinedTokens = 250
+
// bufferFlushSize indicates the buffer size
// after which bytes are flushed to the writer.
// Should preferably be a multiple of 6, since
@@ -36,8 +41,11 @@ const (
bufferSize = bufferFlushSize + 8
)
+// Minimum length code that emits bits.
+const lengthExtraBitsMinCode = 8
+
// The number of extra bits needed by length code X - LENGTH_CODES_START.
-var lengthExtraBits = [32]int8{
+var lengthExtraBits = [32]uint8{
/* 257 */ 0, 0, 0,
/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
@@ -51,19 +59,22 @@ var lengthBase = [32]uint8{
64, 80, 96, 112, 128, 160, 192, 224, 255,
}
+// Minimum offset code that emits bits.
+const offsetExtraBitsMinCode = 4
+
// offset code word extra bits.
-var offsetExtraBits = [64]int8{
+var offsetExtraBits = [32]int8{
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
/* extended window */
- 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
+ 14, 14,
}
var offsetCombined = [32]uint32{}
func init() {
- var offsetBase = [64]uint32{
+ var offsetBase = [32]uint32{
/* normal deflate */
0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
@@ -73,17 +84,15 @@ func init() {
0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
/* extended window */
- 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
- 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
- 0x100000, 0x180000, 0x200000, 0x300000,
+ 0x008000, 0x00c000,
}
for i := range offsetCombined[:] {
// Don't use extended window values...
- if offsetBase[i] > 0x006000 {
+ if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 {
continue
}
- offsetCombined[i] = uint32(offsetExtraBits[i])<<16 | (offsetBase[i])
+ offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8)
}
}
@@ -99,7 +108,7 @@ type huffmanBitWriter struct {
// Data waiting to be written is bytes[0:nbytes]
// and then the low nbits of bits.
bits uint64
- nbits uint16
+ nbits uint8
nbytes uint8
lastHuffMan bool
literalEncoding *huffmanEncoder
@@ -155,37 +164,33 @@ func (w *huffmanBitWriter) reset(writer io.Writer) {
w.lastHuffMan = false
}
-func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) {
- offsets, lits = true, true
+func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
a := t.offHist[:offsetCodeCount]
- b := w.offsetFreq[:len(a)]
- for i := range a {
- if b[i] == 0 && a[i] != 0 {
- offsets = false
- break
+ b := w.offsetEncoding.codes
+ b = b[:len(a)]
+ for i, v := range a {
+ if v != 0 && b[i].zero() {
+ return false
}
}
a = t.extraHist[:literalCount-256]
- b = w.literalFreq[256:literalCount]
+ b = w.literalEncoding.codes[256:literalCount]
b = b[:len(a)]
- for i := range a {
- if b[i] == 0 && a[i] != 0 {
- lits = false
- break
+ for i, v := range a {
+ if v != 0 && b[i].zero() {
+ return false
}
}
- if lits {
- a = t.litHist[:]
- b = w.literalFreq[:len(a)]
- for i := range a {
- if b[i] == 0 && a[i] != 0 {
- lits = false
- break
- }
+
+ a = t.litHist[:256]
+ b = w.literalEncoding.codes[:len(a)]
+ for i, v := range a {
+ if v != 0 && b[i].zero() {
+ return false
}
}
- return
+ return true
}
func (w *huffmanBitWriter) flush() {
@@ -221,8 +226,8 @@ func (w *huffmanBitWriter) write(b []byte) {
_, w.err = w.writer.Write(b)
}
-func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
- w.bits |= uint64(b) << w.nbits
+func (w *huffmanBitWriter) writeBits(b int32, nb uint8) {
+ w.bits |= uint64(b) << (w.nbits & 63)
w.nbits += nb
if w.nbits >= 48 {
w.writeOutBits()
@@ -260,9 +265,9 @@ func (w *huffmanBitWriter) writeBytes(bytes []byte) {
// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
// information. Code badCode is an end marker
//
-// numLiterals The number of literals in literalEncoding
-// numOffsets The number of offsets in offsetEncoding
-// litenc, offenc The literal and offset encoder to use
+// numLiterals The number of literals in literalEncoding
+// numOffsets The number of offsets in offsetEncoding
+// litenc, offenc The literal and offset encoder to use
func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
for i := range w.codegenFreq {
w.codegenFreq[i] = 0
@@ -275,12 +280,12 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE
// Copy the concatenated code sizes to codegen. Put a marker at the end.
cgnl := codegen[:numLiterals]
for i := range cgnl {
- cgnl[i] = uint8(litEnc.codes[i].len)
+ cgnl[i] = litEnc.codes[i].len()
}
cgnl = codegen[numLiterals : numLiterals+numOffsets]
for i := range cgnl {
- cgnl[i] = uint8(offEnc.codes[i].len)
+ cgnl[i] = offEnc.codes[i].len()
}
codegen[numLiterals+numOffsets] = badCode
@@ -423,8 +428,8 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
func (w *huffmanBitWriter) writeCode(c hcode) {
// The function does not get inlined if we "& 63" the shift.
- w.bits |= uint64(c.code) << w.nbits
- w.nbits += c.len
+ w.bits |= c.code64() << (w.nbits & 63)
+ w.nbits += c.len()
if w.nbits >= 48 {
w.writeOutBits()
}
@@ -455,9 +460,9 @@ func (w *huffmanBitWriter) writeOutBits() {
// Write the header of a dynamic Huffman block to the output stream.
//
-// numLiterals The number of literals specified in codegen
-// numOffsets The number of offsets specified in codegen
-// numCodegens The number of codegens used in codegen
+// numLiterals The number of literals specified in codegen
+// numOffsets The number of offsets specified in codegen
+// numCodegens The number of codegens used in codegen
func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
if w.err != nil {
return
@@ -472,7 +477,7 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n
w.writeBits(int32(numCodegens-4), 4)
for i := 0; i < numCodegens; i++ {
- value := uint(w.codegenEncoding.codes[codegenOrder[i]].len)
+ value := uint(w.codegenEncoding.codes[codegenOrder[i]].len())
w.writeBits(int32(value), 3)
}
@@ -566,7 +571,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
w.lastHeader = 0
}
numLiterals, numOffsets := w.indexTokens(tokens, false)
- w.generate(tokens)
+ w.generate()
var extraBits int
storedSize, storable := w.storedSize(input)
if storable {
@@ -577,7 +582,10 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
// Fixed Huffman baseline.
var literalEncoding = fixedLiteralEncoding
var offsetEncoding = fixedOffsetEncoding
- var size = w.fixedSize(extraBits)
+ var size = math.MaxInt32
+ if tokens.n < maxPredefinedTokens {
+ size = w.fixedSize(extraBits)
+ }
// Dynamic Huffman?
var numCodegens int
@@ -595,7 +603,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
}
// Stored bytes?
- if storable && storedSize < size {
+ if storable && storedSize <= size {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
@@ -634,22 +642,39 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
w.lastHeader = 0
w.lastHuffMan = false
}
- if !sync {
- tokens.Fill()
+
+ // fillReuse enables filling of empty values.
+ // This will make encodings always reusable without testing.
+ // However, this does not appear to benefit on most cases.
+ const fillReuse = false
+
+ // Check if we can reuse...
+ if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) {
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
}
+
numLiterals, numOffsets := w.indexTokens(tokens, !sync)
+ extraBits := 0
+ ssize, storable := w.storedSize(input)
+
+ const usePrefs = true
+ if storable || w.lastHeader > 0 {
+ extraBits = w.extraBitSize()
+ }
var size int
+
// Check if we should reuse.
if w.lastHeader > 0 {
// Estimate size for using a new table.
// Use the previous header size as the best estimate.
newSize := w.lastHeader + tokens.EstimatedBits()
- newSize += newSize >> w.logNewTablePenalty
+ newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty
// The estimated size is calculated as an optimal table.
// We add a penalty to make it more realistic and re-use a bit more.
- reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + w.extraBitSize()
+ reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits
// Check if a new table is better.
if newSize < reuseSize {
@@ -660,35 +685,83 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
} else {
size = reuseSize
}
+
+ if tokens.n < maxPredefinedTokens {
+ if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
+ // Check if we get a reasonable size decrease.
+ if storable && ssize <= size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+ return
+ }
+ }
// Check if we get a reasonable size decrease.
- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ if storable && ssize <= size {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
- w.lastHeader = 0
return
}
}
// We want a new block/table
if w.lastHeader == 0 {
- w.generate(tokens)
+ if fillReuse && !sync {
+ w.fillTokens()
+ numLiterals, numOffsets = maxNumLit, maxNumDist
+ } else {
+ w.literalFreq[endBlockMarker] = 1
+ }
+
+ w.generate()
// Generate codegen and codegenFrequencies, which indicates how to encode
// the literalEncoding and the offsetEncoding.
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
w.codegenEncoding.generate(w.codegenFreq[:], 7)
+
var numCodegens int
- size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize())
- // Store bytes, if we don't get a reasonable improvement.
- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ if fillReuse && !sync {
+ // Reindex for accurate size...
+ w.indexTokens(tokens, true)
+ }
+ size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+ // Store predefined, if we don't get a reasonable improvement.
+ if tokens.n < maxPredefinedTokens {
+ if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
+ // Store bytes, if we don't get an improvement.
+ if storable && ssize <= preSize {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+ return
+ }
+ }
+
+ if storable && ssize <= size {
+ // Store bytes, if we don't get an improvement.
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
- w.lastHeader = 0
return
}
// Write Huffman table.
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
- w.lastHeader, _ = w.headerSize()
+ if !sync {
+ w.lastHeader, _ = w.headerSize()
+ }
w.lastHuffMan = false
}
@@ -699,14 +772,29 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
}
+func (w *huffmanBitWriter) fillTokens() {
+ for i, v := range w.literalFreq[:literalCount] {
+ if v == 0 {
+ w.literalFreq[i] = 1
+ }
+ }
+ for i, v := range w.offsetFreq[:offsetCodeCount] {
+ if v == 0 {
+ w.offsetFreq[i] = 1
+ }
+ }
+}
+
// indexTokens indexes a slice of tokens, and updates
// literalFreq and offsetFreq, and generates literalEncoding
// and offsetEncoding.
// The number of literal and offset tokens is returned.
func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
- copy(w.literalFreq[:], t.litHist[:])
- copy(w.literalFreq[256:], t.extraHist[:])
- copy(w.offsetFreq[:], t.offHist[:offsetCodeCount])
+ //copy(w.literalFreq[:], t.litHist[:])
+ *(*[256]uint16)(w.literalFreq[:]) = t.litHist
+ //copy(w.literalFreq[256:], t.extraHist[:])
+ *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist
+ w.offsetFreq = t.offHist
if t.n == 0 {
return
@@ -733,7 +821,7 @@ func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, num
return
}
-func (w *huffmanBitWriter) generate(t *tokens) {
+func (w *huffmanBitWriter) generate() {
w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
}
@@ -765,11 +853,11 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
for _, t := range tokens {
- if t < matchType {
+ if t < 256 {
//w.writeCode(lits[t.literal()])
- c := lits[t.literal()]
- bits |= uint64(c.code) << nbits
- nbits += c.len
+ c := lits[t]
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
@@ -790,14 +878,14 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
// Write the length
length := t.length()
- lengthCode := lengthCode(length)
+ lengthCode := lengthCode(length) & 31
if false {
- w.writeCode(lengths[lengthCode&31])
+ w.writeCode(lengths[lengthCode])
} else {
// inlined
- c := lengths[lengthCode&31]
- bits |= uint64(c.code) << nbits
- nbits += c.len
+ c := lengths[lengthCode]
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
@@ -815,11 +903,11 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
}
}
- extraLengthBits := uint16(lengthExtraBits[lengthCode&31])
- if extraLengthBits > 0 {
+ if lengthCode >= lengthExtraBitsMinCode {
+ extraLengthBits := lengthExtraBits[lengthCode]
//w.writeBits(extraLength, extraLengthBits)
- extraLength := int32(length - lengthBase[lengthCode&31])
- bits |= uint64(extraLength) << nbits
+ extraLength := int32(length - lengthBase[lengthCode])
+ bits |= uint64(extraLength) << (nbits & 63)
nbits += extraLengthBits
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
@@ -839,15 +927,14 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
}
// Write the offset
offset := t.offset()
- offsetCode := offset >> 16
- offset &= matchOffsetOnlyMask
+ offsetCode := (offset >> 16) & 31
if false {
- w.writeCode(offs[offsetCode&31])
+ w.writeCode(offs[offsetCode])
} else {
// inlined
c := offs[offsetCode]
- bits |= uint64(c.code) << nbits
- nbits += c.len
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
@@ -864,11 +951,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
}
}
}
- offsetComb := offsetCombined[offsetCode]
- if offsetComb > 1<<16 {
+
+ if offsetCode >= offsetExtraBitsMinCode {
+ offsetComb := offsetCombined[offsetCode]
//w.writeBits(extraOffset, extraOffsetBits)
- bits |= uint64(offset&matchOffsetOnlyMask-(offsetComb&0xffff)) << nbits
- nbits += uint16(offsetComb >> 16)
+ bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
+ nbits += uint8(offsetComb)
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
@@ -923,8 +1011,6 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
}
}
- // Fill is rarely better...
- const fill = false
const numLiterals = endBlockMarker + 1
const numOffsets = 1
@@ -933,26 +1019,46 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
// Assume header is around 70 bytes:
// https://stackoverflow.com/a/25454430
const guessHeaderSizeBits = 70 * 8
- histogram(input, w.literalFreq[:numLiterals], fill)
- w.literalFreq[endBlockMarker] = 1
- w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
- if fill {
- // Clear fill...
- for i := range w.literalFreq[:numLiterals] {
- w.literalFreq[i] = 0
+ histogram(input, w.literalFreq[:numLiterals])
+ ssize, storable := w.storedSize(input)
+ if storable && len(input) > 1024 {
+ // Quick check for incompressible content.
+ abs := float64(0)
+ avg := float64(len(input)) / 256
+ max := float64(len(input) * 2)
+ for _, v := range w.literalFreq[:256] {
+ diff := float64(v) - avg
+ abs += diff * diff
+ if abs > max {
+ break
+ }
+ }
+ if abs < max {
+ if debugDeflate {
+ fmt.Println("stored", abs, "<", max)
+ }
+ // No chance we can compress this...
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
}
- histogram(input, w.literalFreq[:numLiterals], false)
}
+ w.literalFreq[endBlockMarker] = 1
+ w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals])
- estBits += w.lastHeader
- if w.lastHeader == 0 {
- estBits += guessHeaderSizeBits
+ if estBits < math.MaxInt32 {
+ estBits += w.lastHeader
+ if w.lastHeader == 0 {
+ estBits += guessHeaderSizeBits
+ }
+ estBits += estBits >> w.logNewTablePenalty
}
- estBits += estBits >> w.logNewTablePenalty
// Store bytes, if we don't get a reasonable improvement.
- ssize, storable := w.storedSize(input)
if storable && ssize <= estBits {
+ if debugDeflate {
+ fmt.Println("stored,", ssize, "<=", estBits)
+ }
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
@@ -963,7 +1069,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
if estBits < reuseSize {
if debugDeflate {
- //fmt.Println("not reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8)
+ fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes")
}
// We owe an EOB
w.writeCode(w.literalEncoding.codes[endBlockMarker])
@@ -996,14 +1102,44 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
encoding := w.literalEncoding.codes[:256]
// Go 1.16 LOVES having these on stack. At least 1.5x the speed.
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
- for _, t := range input {
- // Bitwriting inlined, ~30% speedup
- c := encoding[t]
- bits |= uint64(c.code) << nbits
- nbits += c.len
- if debugDeflate {
- count += int(c.len)
+
+ if debugDeflate {
+ count -= int(nbytes)*8 + int(nbits)
+ }
+ // Unroll, write 3 codes/loop.
+ // Fastest number of unrolls.
+ for len(input) > 3 {
+ // We must have at least 48 bits free.
+ if nbits >= 8 {
+ n := nbits >> 3
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ bits >>= (n * 8) & 63
+ nbits -= n * 8
+ nbytes += n
+ }
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ if debugDeflate {
+ count += int(nbytes) * 8
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
}
+ a, b := encoding[input[0]], encoding[input[1]]
+ bits |= a.code64() << (nbits & 63)
+ bits |= b.code64() << ((nbits + a.len()) & 63)
+ c := encoding[input[2]]
+ nbits += b.len() + a.len()
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
+ input = input[3:]
+ }
+
+ // Remaining...
+ for _, t := range input {
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
@@ -1015,17 +1151,34 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
nbytes = 0
return
}
+ if debugDeflate {
+ count += int(nbytes) * 8
+ }
_, w.err = w.writer.Write(w.bytes[:nbytes])
nbytes = 0
}
}
+ // Bitwriting inlined, ~30% speedup
+ c := encoding[t]
+ bits |= c.code64() << (nbits & 63)
+
+ nbits += c.len()
+ if debugDeflate {
+ count += int(c.len())
+ }
}
// Restore...
w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
if debugDeflate {
- fmt.Println("wrote", count/8, "bytes")
+ nb := count + int(nbytes)*8 + int(nbits)
+ fmt.Println("wrote", nb, "bits,", nb/8, "bytes.")
}
+ // Flush if needed to have space.
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+
if eof || sync {
w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
index 67b2b387284..be7b58b473f 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_code.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -16,8 +16,18 @@ const (
)
// hcode is a huffman code with a bit code and bit length.
-type hcode struct {
- code, len uint16
+type hcode uint32
+
+func (h hcode) len() uint8 {
+ return uint8(h)
+}
+
+func (h hcode) code64() uint64 {
+ return uint64(h >> 8)
+}
+
+func (h hcode) zero() bool {
+ return h == 0
}
type huffmanEncoder struct {
@@ -56,9 +66,12 @@ type levelInfo struct {
}
// set sets the code and length of an hcode.
-func (h *hcode) set(code uint16, length uint16) {
- h.len = length
- h.code = code
+func (h *hcode) set(code uint16, length uint8) {
+ *h = hcode(length) | (hcode(code) << 8)
+}
+
+func newhcode(code uint16, length uint8) hcode {
+ return hcode(length) | (hcode(code) << 8)
}
func reverseBits(number uint16, bitLength byte) uint16 {
@@ -80,7 +93,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder {
var ch uint16
for ch = 0; ch < literalCount; ch++ {
var bits uint16
- var size uint16
+ var size uint8
switch {
case ch < 144:
// size 8, 000110000 .. 10111111
@@ -99,7 +112,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder {
bits = ch + 192 - 280
size = 8
}
- codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size}
+ codes[ch] = newhcode(reverseBits(bits, size), size)
}
return h
}
@@ -108,7 +121,7 @@ func generateFixedOffsetEncoding() *huffmanEncoder {
h := newHuffmanEncoder(30)
codes := h.codes
for ch := range codes {
- codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5}
+ codes[ch] = newhcode(reverseBits(uint16(ch), 5), 5)
}
return h
}
@@ -120,7 +133,7 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int {
var total int
for i, f := range freq {
if f != 0 {
- total += int(f) * int(h.codes[i].len)
+ total += int(f) * int(h.codes[i].len())
}
}
return total
@@ -129,9 +142,7 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int {
func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
var total int
for _, f := range b {
- if f != 0 {
- total += int(h.codes[f].len)
- }
+ total += int(h.codes[f].len())
}
return total
}
@@ -142,10 +153,10 @@ func (h *huffmanEncoder) canReuseBits(freq []uint16) int {
for i, f := range freq {
if f != 0 {
code := h.codes[i]
- if code.len == 0 {
+ if code.zero() {
return math.MaxInt32
}
- total += int(f) * int(code.len)
+ total += int(f) * int(code.len())
}
}
return total
@@ -157,13 +168,18 @@ func (h *huffmanEncoder) canReuseBits(freq []uint16) int {
// The cases of 0, 1, and 2 literals are handled by special case code.
//
// list An array of the literals with non-zero frequencies
-// and their associated frequencies. The array is in order of increasing
-// frequency, and has as its last element a special element with frequency
-// MaxInt32
+//
+// and their associated frequencies. The array is in order of increasing
+// frequency, and has as its last element a special element with frequency
+// MaxInt32
+//
// maxBits The maximum number of bits that should be used to encode any literal.
-// Must be less than 16.
+//
+// Must be less than 16.
+//
// return An integer array in which array[i] indicates the number of literals
-// that should be encoded in i bits.
+//
+// that should be encoded in i bits.
func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
if maxBits >= maxBitsLimit {
panic("flate: maxBits too large")
@@ -189,14 +205,19 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
// of the level j ancestor.
var leafCounts [maxBitsLimit][maxBitsLimit]int32
+ // Descending to only have 1 bounds check.
+ l2f := int32(list[2].freq)
+ l1f := int32(list[1].freq)
+ l0f := int32(list[0].freq) + int32(list[1].freq)
+
for level := int32(1); level <= maxBits; level++ {
// For every level, the first two items are the first two characters.
// We initialize the levels as if we had already figured this out.
levels[level] = levelInfo{
level: level,
- lastFreq: int32(list[1].freq),
- nextCharFreq: int32(list[2].freq),
- nextPairFreq: int32(list[0].freq) + int32(list[1].freq),
+ lastFreq: l1f,
+ nextCharFreq: l2f,
+ nextPairFreq: l0f,
}
leafCounts[level][level] = 2
if level == 1 {
@@ -207,8 +228,8 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
// We need a total of 2*n - 2 items at top level and have already generated 2.
levels[maxBits].needed = 2*n - 4
- level := maxBits
- for {
+ level := uint32(maxBits)
+ for level < 16 {
l := &levels[level]
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
// We've run out of both leafs and pairs.
@@ -240,7 +261,13 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
// more values in the level below
l.lastFreq = l.nextPairFreq
// Take leaf counts from the lower level, except counts[level] remains the same.
- copy(leafCounts[level][:level], leafCounts[level-1][:level])
+ if true {
+ save := leafCounts[level][level]
+ leafCounts[level] = leafCounts[level-1]
+ leafCounts[level][level] = save
+ } else {
+ copy(leafCounts[level][:level], leafCounts[level-1][:level])
+ }
levels[l.level-1].needed = 2
}
@@ -298,7 +325,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
sortByLiteral(chunk)
for _, node := range chunk {
- h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
+ h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n))
code++
}
list = list[0 : len(list)-int(bits)]
@@ -311,6 +338,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
// maxBits The maximum number of bits to use for any literal.
func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
list := h.freqcache[:len(freq)+1]
+ codes := h.codes[:len(freq)]
// Number of non-zero literals
count := 0
// Set list to be the set of all non-zero literals and their frequencies
@@ -319,11 +347,10 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
list[count] = literalNode{uint16(i), f}
count++
} else {
- list[count] = literalNode{}
- h.codes[i].len = 0
+ codes[i] = 0
}
}
- list[len(freq)] = literalNode{}
+ list[count] = literalNode{}
list = list[:count]
if count <= 2 {
@@ -354,21 +381,37 @@ func atLeastOne(v float32) float32 {
return v
}
-// Unassigned values are assigned '1' in the histogram.
-func fillHist(b []uint16) {
- for i, v := range b {
- if v == 0 {
- b[i] = 1
+func histogram(b []byte, h []uint16) {
+ if true && len(b) >= 8<<10 {
+ // Split for bigger inputs
+ histogramSplit(b, h)
+ } else {
+ h = h[:256]
+ for _, t := range b {
+ h[t]++
}
}
}
-func histogram(b []byte, h []uint16, fill bool) {
+func histogramSplit(b []byte, h []uint16) {
+ // Tested, and slightly faster than 2-way.
+ // Writing to separate arrays and combining is also slightly slower.
h = h[:256]
- for _, t := range b {
- h[t]++
+ for len(b)&3 != 0 {
+ h[b[0]]++
+ b = b[1:]
}
- if fill {
- fillHist(h)
+ n := len(b) / 4
+ x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:]
+ y, z, w = y[:len(x)], z[:len(x)], w[:len(x)]
+ for i, t := range x {
+ v0 := &h[t]
+ v1 := &h[y[i]]
+ v3 := &h[w[i]]
+ v2 := &h[z[i]]
+ *v0++
+ *v1++
+ *v2++
+ *v3++
}
}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
index d1edb356c4b..414c0bea9fa 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -36,6 +36,13 @@ type lengthExtra struct {
var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
+var bitMask32 = [32]uint32{
+ 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
+ 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
+ 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
+ 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
+} // up to 32 bits
+
// Initialize the fixedHuffmanDecoder only once upon first use.
var fixedOnce sync.Once
var fixedHuffmanDecoder huffmanDecoder
@@ -328,11 +335,17 @@ func (f *decompressor) nextBlock() {
switch typ {
case 0:
f.dataBlock()
+ if debugDecode {
+ fmt.Println("stored block")
+ }
case 1:
// compressed, fixed Huffman tables
f.hl = &fixedHuffmanDecoder
f.hd = nil
f.huffmanBlockDecoder()()
+ if debugDecode {
+ fmt.Println("predefinied huffman block")
+ }
case 2:
// compressed, dynamic Huffman tables
if f.err = f.readHuffman(); f.err != nil {
@@ -341,6 +354,9 @@ func (f *decompressor) nextBlock() {
f.hl = &f.h1
f.hd = &f.h2
f.huffmanBlockDecoder()()
+ if debugDecode {
+ fmt.Println("dynamic huffman block")
+ }
default:
// 3 is reserved.
if debugDecode {
@@ -550,221 +566,6 @@ func (f *decompressor) readHuffman() error {
return nil
}
-// Decode a single Huffman block from f.
-// hl and hd are the Huffman states for the lit/length values
-// and the distance values, respectively. If hd == nil, using the
-// fixed distance encoding associated with fixed Huffman blocks.
-func (f *decompressor) huffmanBlockGeneric() {
- const (
- stateInit = iota // Zero value must be stateInit
- stateDict
- )
-
- switch f.stepState {
- case stateInit:
- goto readLiteral
- case stateDict:
- goto copyHistory
- }
-
-readLiteral:
- // Read literal and/or (length, distance) according to RFC section 3.2.3.
- {
- var v int
- {
- // Inlined v, err := f.huffSym(f.hl)
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
- for {
- for nb < n {
- c, err := f.r.ReadByte()
- if err != nil {
- f.b = b
- f.nb = nb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
- }
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= nb {
- if n == 0 {
- f.b = b
- f.nb = nb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
- v = int(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- var n uint // number of bits extra
- var length int
- var err error
- switch {
- case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanBlockGeneric
- f.stepState = stateInit
- return
- }
- goto readLiteral
- case v == 256:
- f.finishBlock()
- return
- // otherwise, reference to older data
- case v < 265:
- length = v - (257 - 3)
- n = 0
- case v < 269:
- length = v*2 - (265*2 - 11)
- n = 1
- case v < 273:
- length = v*4 - (269*4 - 19)
- n = 2
- case v < 277:
- length = v*8 - (273*8 - 35)
- n = 3
- case v < 281:
- length = v*16 - (277*16 - 67)
- n = 4
- case v < 285:
- length = v*32 - (281*32 - 131)
- n = 5
- case v < maxNumLit:
- length = 258
- n = 0
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- if n > 0 {
- for f.nb < n {
- if err = f.moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits n>0:", err)
- }
- f.err = err
- return
- }
- }
- length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1))
- f.b >>= n & regSizeMaskUint32
- f.nb -= n
- }
-
- var dist uint32
- if f.hd == nil {
- for f.nb < 5 {
- if err = f.moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb<5:", err)
- }
- f.err = err
- return
- }
- }
- dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
- } else {
- sym, err := f.huffSym(f.hd)
- if err != nil {
- if debugDecode {
- fmt.Println("huffsym:", err)
- }
- f.err = err
- return
- }
- dist = uint32(sym)
- }
-
- switch {
- case dist < 4:
- dist++
- case dist < maxNumDist:
- nb := uint(dist-2) >> 1
- // have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << (nb & regSizeMaskUint32)
- for f.nb < nb {
- if err = f.moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
- f.nb -= nb
- dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
- default:
- if debugDecode {
- fmt.Println("dist too big:", dist, maxNumDist)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- // No check on length; encoding can be prescient.
- if dist > uint32(f.dict.histSize()) {
- if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- f.copyLen, f.copyDist = length, int(dist)
- goto copyHistory
- }
-
-copyHistory:
- // Perform a backwards copy according to RFC section 3.2.3.
- {
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
- if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
- }
- f.copyLen -= cnt
-
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work
- f.stepState = stateDict
- return
- }
- goto readLiteral
- }
-}
-
// Copy a single uncompressed data block from input to output.
func (f *decompressor) dataBlock() {
// Uncompressed.
diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
index cc6db27925c..61342b6b88f 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
@@ -21,6 +21,11 @@ func (f *decompressor) huffmanBytesBuffer() {
)
fr := f.r.(*bytes.Buffer)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
switch f.stepState {
case stateInit:
goto readLiteral
@@ -39,41 +44,35 @@ readLiteral:
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
@@ -83,15 +82,17 @@ readLiteral:
var length int
switch {
case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesBuffer
f.stepState = stateInit
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
+ f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
@@ -101,9 +102,10 @@ readLiteral:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
- for f.nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
@@ -111,25 +113,27 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1))
- f.b >>= n & regSizeMaskUint32
- f.nb -= n
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
- for f.nb < 5 {
+ for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -137,12 +141,12 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
@@ -152,38 +156,35 @@ readLiteral:
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
@@ -197,9 +198,10 @@ readLiteral:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
- for f.nb < nb {
+ for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
- f.nb -= nb
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
default:
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
@@ -223,9 +227,10 @@ readLiteral:
}
// No check on length; encoding can be prescient.
- if dist > uint32(f.dict.histSize()) {
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
@@ -238,20 +243,22 @@ readLiteral:
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work
f.stepState = stateDict
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
+ // Not reached
}
// Decode a single Huffman block from f.
@@ -265,6 +272,11 @@ func (f *decompressor) huffmanBytesReader() {
)
fr := f.r.(*bytes.Reader)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
switch f.stepState {
case stateInit:
goto readLiteral
@@ -283,41 +295,35 @@ readLiteral:
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
@@ -327,15 +333,17 @@ readLiteral:
var length int
switch {
case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesReader
f.stepState = stateInit
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
+ f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
@@ -345,9 +353,10 @@ readLiteral:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
- for f.nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
@@ -355,25 +364,27 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1))
- f.b >>= n & regSizeMaskUint32
- f.nb -= n
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
- for f.nb < 5 {
+ for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -381,12 +392,12 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
@@ -396,38 +407,35 @@ readLiteral:
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
@@ -441,9 +449,10 @@ readLiteral:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
- for f.nb < nb {
+ for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
- f.nb -= nb
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
default:
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
@@ -467,9 +478,10 @@ readLiteral:
}
// No check on length; encoding can be prescient.
- if dist > uint32(f.dict.histSize()) {
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
@@ -482,20 +494,22 @@ readLiteral:
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesReader // We need to continue this work
f.stepState = stateDict
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
+ // Not reached
}
// Decode a single Huffman block from f.
@@ -509,6 +523,11 @@ func (f *decompressor) huffmanBufioReader() {
)
fr := f.r.(*bufio.Reader)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
switch f.stepState {
case stateInit:
goto readLiteral
@@ -527,41 +546,35 @@ readLiteral:
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
@@ -571,15 +584,17 @@ readLiteral:
var length int
switch {
case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBufioReader
f.stepState = stateInit
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
+ f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
@@ -589,9 +604,10 @@ readLiteral:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
- for f.nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
@@ -599,25 +615,27 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1))
- f.b >>= n & regSizeMaskUint32
- f.nb -= n
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
- for f.nb < 5 {
+ for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -625,12 +643,12 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
@@ -640,38 +658,35 @@ readLiteral:
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
@@ -685,9 +700,10 @@ readLiteral:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
- for f.nb < nb {
+ for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
- f.nb -= nb
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
default:
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
@@ -711,9 +729,10 @@ readLiteral:
}
// No check on length; encoding can be prescient.
- if dist > uint32(f.dict.histSize()) {
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
@@ -726,20 +745,22 @@ readLiteral:
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBufioReader // We need to continue this work
f.stepState = stateDict
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
+ // Not reached
}
// Decode a single Huffman block from f.
@@ -753,6 +774,11 @@ func (f *decompressor) huffmanStringsReader() {
)
fr := f.r.(*strings.Reader)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
switch f.stepState {
case stateInit:
goto readLiteral
@@ -771,41 +797,286 @@ readLiteral:
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanStringsReader
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanStringsReader // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanGenericReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(Reader)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
@@ -815,15 +1086,17 @@ readLiteral:
var length int
switch {
case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanStringsReader
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanGenericReader
f.stepState = stateInit
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
+ f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
@@ -833,9 +1106,10 @@ readLiteral:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
- for f.nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
@@ -843,25 +1117,27 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1))
- f.b >>= n & regSizeMaskUint32
- f.nb -= n
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
- for f.nb < 5 {
+ for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -869,12 +1145,12 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
@@ -884,38 +1160,35 @@ readLiteral:
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
@@ -929,9 +1202,10 @@ readLiteral:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
- for f.nb < nb {
+ for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
- f.nb -= nb
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
default:
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
@@ -955,9 +1231,10 @@ readLiteral:
}
// No check on length; encoding can be prescient.
- if dist > uint32(f.dict.histSize()) {
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
@@ -970,20 +1247,22 @@ readLiteral:
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanStringsReader // We need to continue this work
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanGenericReader // We need to continue this work
f.stepState = stateDict
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
+ // Not reached
}
func (f *decompressor) huffmanBlockDecoder() func() {
@@ -996,7 +1275,9 @@ func (f *decompressor) huffmanBlockDecoder() func() {
return f.huffmanBufioReader
case *strings.Reader:
return f.huffmanStringsReader
+ case Reader:
+ return f.huffmanGenericReader
default:
- return f.huffmanBlockGeneric
+ return f.huffmanGenericReader
}
}
diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go
index 1e5eea3968a..703b9a89aa3 100644
--- a/vendor/github.com/klauspost/compress/flate/level1.go
+++ b/vendor/github.com/klauspost/compress/flate/level1.go
@@ -1,6 +1,10 @@
package flate
-import "fmt"
+import (
+ "encoding/binary"
+ "fmt"
+ "math/bits"
+)
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
@@ -15,6 +19,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashBytes = 5
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
@@ -64,7 +69,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
- cv := load3232(src, s)
+ cv := load6432(src, s)
for {
const skipLog = 5
@@ -73,7 +78,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
nextS := s
var candidate tableEntry
for {
- nextHash := hash(cv)
+ nextHash := hashLen(cv, tableBits, hashBytes)
candidate = e.table[nextHash]
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
@@ -82,16 +87,16 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
now := load6432(src, nextS)
e.table[nextHash] = tableEntry{offset: s + e.cur}
- nextHash = hash(uint32(now))
+ nextHash = hashLen(now, tableBits, hashBytes)
offset := s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
// Do one right away...
- cv = uint32(now)
+ cv = now
s = nextS
nextS++
candidate = e.table[nextHash]
@@ -99,11 +104,11 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
e.table[nextHash] = tableEntry{offset: s + e.cur}
offset = s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
- cv = uint32(now)
+ cv = now
s = nextS
}
@@ -116,7 +121,32 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
// Extend the 4-byte match as long as possible.
t := candidate.offset - e.cur
- l := e.matchlenLong(s+4, t+4, src) + 4
+ var l = int32(4)
+ if false {
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else {
+ // inlined:
+ a := src[s+4:]
+ b := src[t+4:]
+ for len(a) >= 8 {
+ if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
+ l += int32(bits.TrailingZeros64(diff) >> 3)
+ break
+ }
+ l += 8
+ a = a[8:]
+ b = b[8:]
+ }
+ if len(a) < 8 {
+ b = b[:len(a)]
+ for i := range a {
+ if a[i] != b[i] {
+ break
+ }
+ l++
+ }
+ }
+ }
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
@@ -125,11 +155,43 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
l++
}
if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
}
// Save the match found
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ if false {
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ } else {
+ // Inlined...
+ xoffset := uint32(s - t - baseMatchOffset)
+ xlength := l
+ oc := offsetCode(xoffset)
+ xoffset |= oc << 16
+ for xlength > 0 {
+ xl := xlength
+ if xl > 258 {
+ if xl > 258+baseMatchLength {
+ xl = 258
+ } else {
+ xl = 258 - baseMatchLength
+ }
+ }
+ xlength -= xl
+ xl -= baseMatchLength
+ dst.extraHist[lengthCodes1[uint8(xl)]]++
+ dst.offHist[oc]++
+ dst.tokens[dst.n] = token(matchType | uint32(xl)<= s {
@@ -137,9 +199,9 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
}
if s >= sLimit {
// Index first pair after match end.
- if int(s+l+4) < len(src) {
- cv := load3232(src, s)
- e.table[hash(cv)] = tableEntry{offset: s + e.cur}
+ if int(s+l+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur}
}
goto emitRemainder
}
@@ -152,16 +214,16 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
// three load32 calls.
x := load6432(src, s-2)
o := e.cur + s - 2
- prevHash := hash(uint32(x))
+ prevHash := hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntry{offset: o}
x >>= 16
- currHash := hash(uint32(x))
+ currHash := hashLen(x, tableBits, hashBytes)
candidate = e.table[currHash]
e.table[currHash] = tableEntry{offset: o + 2}
offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
- cv = uint32(x >> 8)
+ cv = x >> 8
s++
break
}
diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go
index 234c4389ab3..876dfbe3054 100644
--- a/vendor/github.com/klauspost/compress/flate/level2.go
+++ b/vendor/github.com/klauspost/compress/flate/level2.go
@@ -16,6 +16,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashBytes = 5
)
if debugDeflate && e.cur < 0 {
@@ -66,7 +67,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
- cv := load3232(src, s)
+ cv := load6432(src, s)
for {
// When should we start skipping if we haven't found matches in a long while.
const skipLog = 5
@@ -75,7 +76,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
nextS := s
var candidate tableEntry
for {
- nextHash := hash4u(cv, bTableBits)
+ nextHash := hashLen(cv, bTableBits, hashBytes)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
@@ -84,16 +85,16 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
candidate = e.table[nextHash]
now := load6432(src, nextS)
e.table[nextHash] = tableEntry{offset: s + e.cur}
- nextHash = hash4u(uint32(now), bTableBits)
+ nextHash = hashLen(now, bTableBits, hashBytes)
offset := s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
// Do one right away...
- cv = uint32(now)
+ cv = now
s = nextS
nextS++
candidate = e.table[nextHash]
@@ -101,10 +102,10 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
e.table[nextHash] = tableEntry{offset: s + e.cur}
offset = s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
break
}
- cv = uint32(now)
+ cv = now
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
@@ -134,7 +135,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
l++
}
if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
@@ -146,9 +155,9 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
if s >= sLimit {
// Index first pair after match end.
- if int(s+l+4) < len(src) {
- cv := load3232(src, s)
- e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur}
+ if int(s+l+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur}
}
goto emitRemainder
}
@@ -156,15 +165,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
// Store every second hash in-between, but offset by 1.
for i := s - l + 2; i < s-5; i += 7 {
x := load6432(src, i)
- nextHash := hash4u(uint32(x), bTableBits)
+ nextHash := hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i}
// Skip one
x >>= 16
- nextHash = hash4u(uint32(x), bTableBits)
+ nextHash = hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
// Skip one
x >>= 16
- nextHash = hash4u(uint32(x), bTableBits)
+ nextHash = hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
}
@@ -176,17 +185,17 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
// three load32 calls.
x := load6432(src, s-2)
o := e.cur + s - 2
- prevHash := hash4u(uint32(x), bTableBits)
- prevHash2 := hash4u(uint32(x>>8), bTableBits)
+ prevHash := hashLen(x, bTableBits, hashBytes)
+ prevHash2 := hashLen(x>>8, bTableBits, hashBytes)
e.table[prevHash] = tableEntry{offset: o}
e.table[prevHash2] = tableEntry{offset: o + 1}
- currHash := hash4u(uint32(x>>16), bTableBits)
+ currHash := hashLen(x>>16, bTableBits, hashBytes)
candidate = e.table[currHash]
e.table[currHash] = tableEntry{offset: o + 2}
offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
- cv = uint32(x >> 24)
+ cv = x >> 24
s++
break
}
diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go
index c22b4244a5c..7aa2b72a129 100644
--- a/vendor/github.com/klauspost/compress/flate/level3.go
+++ b/vendor/github.com/klauspost/compress/flate/level3.go
@@ -5,14 +5,17 @@ import "fmt"
// fastEncL3
type fastEncL3 struct {
fastGen
- table [tableSize]tableEntryPrev
+ table [1 << 16]tableEntryPrev
}
// Encode uses a similar algorithm to level 2, will check up to two candidates.
func (e *fastEncL3) Encode(dst *tokens, src []byte) {
const (
- inputMargin = 8 - 1
+ inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
+ tableBits = 16
+ tableSize = 1 << tableBits
+ hashBytes = 5
)
if debugDeflate && e.cur < 0 {
@@ -67,20 +70,20 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
- cv := load3232(src, s)
+ cv := load6432(src, s)
for {
- const skipLog = 6
+ const skipLog = 7
nextS := s
var candidate tableEntry
for {
- nextHash := hash(cv)
+ nextHash := hashLen(cv, tableBits, hashBytes)
s = nextS
nextS = s + 1 + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
candidates := e.table[nextHash]
- now := load3232(src, nextS)
+ now := load6432(src, nextS)
// Safe offset distance until s + 4...
minOffset := e.cur + s - (maxMatchOffset - 4)
@@ -94,8 +97,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
continue
}
- if cv == load3232(src, candidate.offset-e.cur) {
- if candidates.Prev.offset < minOffset || cv != load3232(src, candidates.Prev.offset-e.cur) {
+ if uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) {
break
}
// Both match and are valid, pick longest.
@@ -110,7 +113,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
// We only check if value mismatches.
// Offset will always be invalid in other cases.
candidate = candidates.Prev
- if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) {
+ if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
break
}
}
@@ -141,7 +144,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
l++
}
if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
@@ -154,9 +165,9 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
if s >= sLimit {
t += l
// Index first pair after match end.
- if int(t+4) < len(src) && t > 0 {
- cv := load3232(src, t)
- nextHash := hash(cv)
+ if int(t+8) < len(src) && t > 0 {
+ cv = load6432(src, t)
+ nextHash := hashLen(cv, tableBits, hashBytes)
e.table[nextHash] = tableEntryPrev{
Prev: e.table[nextHash].Cur,
Cur: tableEntry{offset: e.cur + t},
@@ -165,32 +176,33 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
goto emitRemainder
}
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-3 to s.
- x := load6432(src, s-3)
- prevHash := hash(uint32(x))
- e.table[prevHash] = tableEntryPrev{
- Prev: e.table[prevHash].Cur,
- Cur: tableEntry{offset: e.cur + s - 3},
+ // Store every 5th hash in-between.
+ for i := s - l + 2; i < s-5; i += 6 {
+ nextHash := hashLen(load6432(src, i), tableBits, hashBytes)
+ e.table[nextHash] = tableEntryPrev{
+ Prev: e.table[nextHash].Cur,
+ Cur: tableEntry{offset: e.cur + i}}
}
- x >>= 8
- prevHash = hash(uint32(x))
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 to s.
+ x := load6432(src, s-2)
+ prevHash := hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntryPrev{
Prev: e.table[prevHash].Cur,
Cur: tableEntry{offset: e.cur + s - 2},
}
x >>= 8
- prevHash = hash(uint32(x))
+ prevHash = hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntryPrev{
Prev: e.table[prevHash].Cur,
Cur: tableEntry{offset: e.cur + s - 1},
}
x >>= 8
- currHash := hash(uint32(x))
+ currHash := hashLen(x, tableBits, hashBytes)
candidates := e.table[currHash]
- cv = uint32(x)
+ cv = x
e.table[currHash] = tableEntryPrev{
Prev: candidates.Cur,
Cur: tableEntry{offset: s + e.cur},
@@ -200,18 +212,18 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
candidate = candidates.Cur
minOffset := e.cur + s - (maxMatchOffset - 4)
- if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) {
- // We only check if value mismatches.
- // Offset will always be invalid in other cases.
+ if candidate.offset > minOffset {
+ if uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ // Found a match...
+ continue
+ }
candidate = candidates.Prev
- if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) {
- offset := s - (candidate.offset - e.cur)
- if offset <= maxMatchOffset {
- continue
- }
+ if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ // Match at prev...
+ continue
}
}
- cv = uint32(x >> 8)
+ cv = x >> 8
s++
break
}
diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go
index e62f0c02b1e..23c08b325cf 100644
--- a/vendor/github.com/klauspost/compress/flate/level4.go
+++ b/vendor/github.com/klauspost/compress/flate/level4.go
@@ -12,6 +12,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
@@ -80,7 +81,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
nextS := s
var t int32
for {
- nextHashS := hash4x64(cv, tableBits)
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
@@ -135,7 +136,15 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
l++
}
if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
}
if debugDeflate {
if t >= s {
@@ -160,7 +169,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
// Index first pair after match end.
if int(s+8) < len(src) {
cv := load6432(src, s)
- e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur}
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur}
e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
}
goto emitRemainder
@@ -175,7 +184,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
t2 := tableEntry{offset: t.offset + 1}
e.bTable[hash7(cv, tableBits)] = t
e.bTable[hash7(cv>>8, tableBits)] = t2
- e.table[hash4u(uint32(cv>>8), tableBits)] = t2
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
i += 3
for ; i < s-1; i += 3 {
@@ -184,7 +193,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
t2 := tableEntry{offset: t.offset + 1}
e.bTable[hash7(cv, tableBits)] = t
e.bTable[hash7(cv>>8, tableBits)] = t2
- e.table[hash4u(uint32(cv>>8), tableBits)] = t2
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
}
}
}
@@ -193,7 +202,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
// compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1)
o := e.cur + s - 1
- prevHashS := hash4x64(x, tableBits)
+ prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o}
e.bTable[prevHashL] = tableEntry{offset: o}
diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go
index 293a3a320b7..83ef50ba45f 100644
--- a/vendor/github.com/klauspost/compress/flate/level5.go
+++ b/vendor/github.com/klauspost/compress/flate/level5.go
@@ -12,6 +12,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
@@ -88,7 +89,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
var l int32
var t int32
for {
- nextHashS := hash4x64(cv, tableBits)
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
@@ -105,7 +106,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = entry, eLong.Cur
- nextHashS = hash4x64(next, tableBits)
+ nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur
@@ -191,14 +192,21 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
// Try to locate a better match by checking the end of best match...
if sAt := s + l; l < 30 && sAt < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is 2/3 bytes depending on input.
+ // 3 is only a little better when it is but sometimes a lot worse.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
- // Test current
- t2 := eLong - e.cur - l
- off := s - t2
+ t2 := eLong - e.cur - l + skipBeginning
+ s2 := s + skipBeginning
+ off := s2 - t2
if t2 >= 0 && off < maxMatchOffset && off > 0 {
- if l2 := e.matchlenLong(s, t2, src); l2 > l {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
+ s = s2
}
}
}
@@ -210,7 +218,15 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
l++
}
if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
}
if debugDeflate {
if t >= s {
@@ -242,7 +258,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
if i < s-1 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
- e.table[hash4x64(cv, tableBits)] = t
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
@@ -255,7 +271,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
// We only have enough bits for a short entry at i+2
cv >>= 8
t = tableEntry{offset: t.offset + 1}
- e.table[hash4x64(cv, tableBits)] = t
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
// Skip one - otherwise we risk hitting 's'
i += 4
@@ -265,7 +281,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
- e.table[hash4u(uint32(cv>>8), tableBits)] = t2
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
}
}
}
@@ -274,7 +290,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
// compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1)
o := e.cur + s - 1
- prevHashS := hash4x64(x, tableBits)
+ prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o}
eLong := &e.bTable[prevHashL]
diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go
index a709977ec49..f1e9d98fa50 100644
--- a/vendor/github.com/klauspost/compress/flate/level6.go
+++ b/vendor/github.com/klauspost/compress/flate/level6.go
@@ -12,6 +12,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
@@ -90,7 +91,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
var l int32
var t int32
for {
- nextHashS := hash4x64(cv, tableBits)
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
@@ -107,7 +108,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
eLong.Cur, eLong.Prev = entry, eLong.Cur
// Calculate hashes of 'next'
- nextHashS = hash4x64(next, tableBits)
+ nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur
@@ -213,24 +214,33 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
// Try to locate a better match by checking the end-of-match...
if sAt := s + l; sAt < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is 2/3 bytes depending on input.
+ // 3 is only a little better when it is but sometimes a lot worse.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)]
// Test current
- t2 := eLong.Cur.offset - e.cur - l
- off := s - t2
+ t2 := eLong.Cur.offset - e.cur - l + skipBeginning
+ s2 := s + skipBeginning
+ off := s2 - t2
if off < maxMatchOffset {
if off > 0 && t2 >= 0 {
- if l2 := e.matchlenLong(s, t2, src); l2 > l {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
+ s = s2
}
}
// Test next:
- t2 = eLong.Prev.offset - e.cur - l
- off := s - t2
+ t2 = eLong.Prev.offset - e.cur - l + skipBeginning
+ off := s2 - t2
if off > 0 && off < maxMatchOffset && t2 >= 0 {
- if l2 := e.matchlenLong(s, t2, src); l2 > l {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
+ s = s2
}
}
}
@@ -243,7 +253,15 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
l++
}
if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
}
if false {
if t >= s {
@@ -269,7 +287,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
// Index after match end.
for i := nextS + 1; i < int32(len(src))-8; i += 2 {
cv := load6432(src, i)
- e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur}
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
}
@@ -284,7 +302,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
- e.table[hash4x64(cv, tableBits)] = t
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong.Cur, eLong.Prev = t, eLong.Cur
eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
}
diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go
index 53e89912463..93a1d150312 100644
--- a/vendor/github.com/klauspost/compress/flate/stateless.go
+++ b/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -59,9 +59,9 @@ var bitWriterPool = sync.Pool{
},
}
-// StatelessDeflate allows to compress directly to a Writer without retaining state.
+// StatelessDeflate allows compressing directly to a Writer without retaining state.
// When returning everything will be flushed.
-// Up to 8KB of an optional dictionary can be given which is presumed to presumed to precede the block.
+// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
// Longer dictionaries will be truncated and will still produce valid output.
// Sending nil dictionary is perfectly fine.
func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
@@ -249,7 +249,15 @@ func statelessEnc(dst *tokens, src []byte, startAt int16) {
l++
}
if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
}
// Save the match found
diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go
index eb862d7a920..d818790c132 100644
--- a/vendor/github.com/klauspost/compress/flate/token.go
+++ b/vendor/github.com/klauspost/compress/flate/token.go
@@ -13,11 +13,10 @@ import (
)
const (
- // From top
- // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
- // 8 bits: xlength = length - MIN_MATCH_LENGTH
- // 5 bits offsetcode
- // 16 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
+ // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits
+ // bits 16-22 offsetcode - 5 bits
+ // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits
+ // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits
lengthShift = 22
offsetMask = 1<maxnumlit
offHist [32]uint16 // offset codes
litHist [256]uint16 // codes 0->255
- n uint16 // Must be able to contain maxStoreBlockSize
+ nFilled int
+ n uint16 // Must be able to contain maxStoreBlockSize
tokens [maxStoreBlockSize + 1]token
}
@@ -142,7 +141,7 @@ func (t *tokens) Reset() {
return
}
t.n = 0
- t.nLits = 0
+ t.nFilled = 0
for i := range t.litHist[:] {
t.litHist[i] = 0
}
@@ -161,12 +160,12 @@ func (t *tokens) Fill() {
for i, v := range t.litHist[:] {
if v == 0 {
t.litHist[i] = 1
- t.nLits++
+ t.nFilled++
}
}
for i, v := range t.extraHist[:literalCount-256] {
if v == 0 {
- t.nLits++
+ t.nFilled++
t.extraHist[i] = 1
}
}
@@ -196,20 +195,17 @@ func (t *tokens) indexTokens(in []token) {
// emitLiteral writes a literal chunk and returns the number of bytes written.
func emitLiteral(dst *tokens, lit []byte) {
- ol := int(dst.n)
- for i, v := range lit {
- dst.tokens[(i+ol)&maxStoreBlockSize] = token(v)
+ for _, v := range lit {
+ dst.tokens[dst.n] = token(v)
dst.litHist[v]++
+ dst.n++
}
- dst.n += uint16(len(lit))
- dst.nLits += len(lit)
}
func (t *tokens) AddLiteral(lit byte) {
t.tokens[t.n] = token(lit)
t.litHist[lit]++
t.n++
- t.nLits++
}
// from https://stackoverflow.com/a/28730362
@@ -230,8 +226,9 @@ func (t *tokens) EstimatedBits() int {
shannon := float32(0)
bits := int(0)
nMatches := 0
- if t.nLits > 0 {
- invTotal := 1.0 / float32(t.nLits)
+ total := int(t.n) + t.nFilled
+ if total > 0 {
+ invTotal := 1.0 / float32(total)
for _, v := range t.litHist[:] {
if v > 0 {
n := float32(v)
@@ -275,10 +272,9 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
}
oCode := offsetCode(xoffset)
xoffset |= oCode << 16
- t.nLits++
t.extraHist[lengthCodes1[uint8(xlength)]]++
- t.offHist[oCode]++
+ t.offHist[oCode&31]++
t.tokens[t.n] = token(matchType | xlength< 258 {
// We need to have at least baseMatchLength left over for next loop.
- xl = 258 - baseMatchLength
+ if xl > 258+baseMatchLength {
+ xl = 258
+ } else {
+ xl = 258 - baseMatchLength
+ }
}
xlength -= xl
xl -= baseMatchLength
- t.nLits++
t.extraHist[lengthCodes1[uint8(xl)]]++
- t.offHist[oc]++
+ t.offHist[oc&31]++
t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) }
-// The code is never more than 8 bits, but is returned as uint32 for convenience.
-func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) }
+// Convert length to code.
+func lengthCode(len uint8) uint8 { return lengthCodes[len] }
// Returns the offset code corresponding to a specific offset
func offsetCode(off uint32) uint32 {
diff --git a/vendor/github.com/klauspost/compress/go.mod b/vendor/github.com/klauspost/compress/go.mod
index 5aa64a436af..cd8e6b29f51 100644
--- a/vendor/github.com/klauspost/compress/go.mod
+++ b/vendor/github.com/klauspost/compress/go.mod
@@ -1,3 +1,3 @@
module github.com/klauspost/compress
-go 1.15
+go 1.17
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
index a4979e8868a..504a7be9dae 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitreader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -8,115 +8,10 @@ package huff0
import (
"encoding/binary"
"errors"
+ "fmt"
"io"
)
-// bitReader reads a bitstream in reverse.
-// The last set bit indicates the start of the stream and is used
-// for aligning the input.
-type bitReader struct {
- in []byte
- off uint // next byte to read is at in[off - 1]
- value uint64
- bitsRead uint8
-}
-
-// init initializes and resets the bit reader.
-func (b *bitReader) init(in []byte) error {
- if len(in) < 1 {
- return errors.New("corrupt stream: too short")
- }
- b.in = in
- b.off = uint(len(in))
- // The highest bit of the last byte indicates where to start
- v := in[len(in)-1]
- if v == 0 {
- return errors.New("corrupt stream, did not find end of stream")
- }
- b.bitsRead = 64
- b.value = 0
- if len(in) >= 8 {
- b.fillFastStart()
- } else {
- b.fill()
- b.fill()
- }
- b.bitsRead += 8 - uint8(highBit32(uint32(v)))
- return nil
-}
-
-// peekBitsFast requires that at least one bit is requested every time.
-// There are no checks if the buffer is filled.
-func (b *bitReader) peekBitsFast(n uint8) uint16 {
- const regMask = 64 - 1
- v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
- return v
-}
-
-// fillFast() will make sure at least 32 bits are available.
-// There must be at least 4 bytes available.
-func (b *bitReader) fillFast() {
- if b.bitsRead < 32 {
- return
- }
-
- // 2 bounds checks.
- v := b.in[b.off-4 : b.off]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- b.value = (b.value << 32) | uint64(low)
- b.bitsRead -= 32
- b.off -= 4
-}
-
-func (b *bitReader) advance(n uint8) {
- b.bitsRead += n
-}
-
-// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
-func (b *bitReader) fillFastStart() {
- // Do single re-slice to avoid bounds checks.
- b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
- b.bitsRead = 0
- b.off -= 8
-}
-
-// fill() will make sure at least 32 bits are available.
-func (b *bitReader) fill() {
- if b.bitsRead < 32 {
- return
- }
- if b.off > 4 {
- v := b.in[b.off-4:]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- b.value = (b.value << 32) | uint64(low)
- b.bitsRead -= 32
- b.off -= 4
- return
- }
- for b.off > 0 {
- b.value = (b.value << 8) | uint64(b.in[b.off-1])
- b.bitsRead -= 8
- b.off--
- }
-}
-
-// finished returns true if all bits have been read from the bit stream.
-func (b *bitReader) finished() bool {
- return b.off == 0 && b.bitsRead >= 64
-}
-
-// close the bitstream and returns an error if out-of-buffer reads occurred.
-func (b *bitReader) close() error {
- // Release reference.
- b.in = nil
- if b.bitsRead > 64 {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-
// bitReader reads a bitstream in reverse.
// The last set bit indicates the start of the stream and is used
// for aligning the input.
@@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool {
return b.off == 0 && b.bitsRead >= 64
}
+func (b *bitReaderBytes) remaining() uint {
+ return b.off*8 + uint(64-b.bitsRead)
+}
+
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReaderBytes) close() error {
// Release reference.
b.in = nil
+ if b.remaining() > 0 {
+ return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
+ }
if b.bitsRead > 64 {
return io.ErrUnexpectedEOF
}
@@ -313,15 +215,17 @@ func (b *bitReaderShifted) fill() {
}
}
-// finished returns true if all bits have been read from the bit stream.
-func (b *bitReaderShifted) finished() bool {
- return b.off == 0 && b.bitsRead >= 64
+func (b *bitReaderShifted) remaining() uint {
+ return b.off*8 + uint(64-b.bitsRead)
}
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReaderShifted) close() error {
// Release reference.
b.in = nil
+ if b.remaining() > 0 {
+ return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
+ }
if b.bitsRead > 64 {
return io.ErrUnexpectedEOF
}
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
index 6bce4e87d4f..ec71f7a349a 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -5,8 +5,6 @@
package huff0
-import "fmt"
-
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
@@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF} /* up to 16 bits */
-// addBits16NC will add up to 16 bits.
-// It will not check if there is space for them,
-// so the caller must ensure that it has flushed recently.
-func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
- b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
- b.nBits += bits
-}
-
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
@@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
b.nBits += encA.nBits + encB.nBits
}
-// addBits16ZeroNC will add up to 16 bits.
-// It will not check if there is space for them,
-// so the caller must ensure that it has flushed recently.
-// This is fastest if bits can be zero.
-func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
- if bits == 0 {
- return
- }
- value <<= (16 - bits) & 15
- value >>= (16 - bits) & 15
- b.bitContainer |= uint64(value) << (b.nBits & 63)
- b.nBits += bits
-}
-
-// flush will flush all pending full bytes.
-// There will be at least 56 bits available for writing when this has been called.
-// Using flush32 is faster, but leaves less space for writing.
-func (b *bitWriter) flush() {
- v := b.nBits >> 3
- switch v {
- case 0:
- return
- case 1:
- b.out = append(b.out,
- byte(b.bitContainer),
- )
- b.bitContainer >>= 1 << 3
- case 2:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- )
- b.bitContainer >>= 2 << 3
- case 3:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- )
- b.bitContainer >>= 3 << 3
- case 4:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- )
- b.bitContainer >>= 4 << 3
- case 5:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- )
- b.bitContainer >>= 5 << 3
- case 6:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- )
- b.bitContainer >>= 6 << 3
- case 7:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- )
- b.bitContainer >>= 7 << 3
- case 8:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- byte(b.bitContainer>>56),
- )
- b.bitContainer = 0
- b.nBits = 0
- return
- default:
- panic(fmt.Errorf("bits (%d) > 64", b.nBits))
- }
- b.nBits &= 7
-}
-
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {
@@ -201,10 +93,3 @@ func (b *bitWriter) close() error {
b.flushAlign()
return nil
}
-
-// reset and continue writing by appending to out.
-func (b *bitWriter) reset(out []byte) {
- b.bitContainer = 0
- b.nBits = 0
- b.out = out
-}
diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go
index 50bcdf6ea99..4dcab8d2327 100644
--- a/vendor/github.com/klauspost/compress/huff0/bytereader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go
@@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) {
b.off = 0
}
-// advance the stream b n bytes.
-func (b *byteReader) advance(n uint) {
- b.off += int(n)
-}
-
// Int32 returns a little endian int32 starting at current offset.
func (b byteReader) Int32() int32 {
v3 := int32(b.b[b.off+3])
@@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 {
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
}
-// unread returns the unread portion of the input.
-func (b byteReader) unread() []byte {
- return b.b[b.off:]
-}
-
// remain will return the number of bytes remaining.
func (b byteReader) remain() int {
return len(b.b) - b.off
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index 8323dc05389..4d14542facf 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -2,6 +2,7 @@ package huff0
import (
"fmt"
+ "math"
"runtime"
"sync"
)
@@ -289,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
if err != nil {
return nil, err
}
+ if len(s.Out)-idx > math.MaxUint16 {
+ // We cannot store the size in the jump table
+ return nil, ErrIncompressible
+ }
// Write compressed length as little endian before block.
if i < 3 {
// Last length is not written.
@@ -332,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
return nil, errs[i]
}
o := s.tmpOut[i]
+ if len(o) > math.MaxUint16 {
+ // We cannot store the size in the jump table
+ return nil, ErrIncompressible
+ }
// Write compressed length as little endian before block.
if i < 3 {
// Last length is not written.
@@ -395,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool {
return true
}
+//lint:ignore U1000 used for debugging
func (s *Scratch) validateTable(c cTable) bool {
if len(c) < int(s.symbolLen) {
return false
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 41703bba4d6..42a237eac4a 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -4,13 +4,13 @@ import (
"errors"
"fmt"
"io"
+ "sync"
"github.com/klauspost/compress/fse"
)
type dTable struct {
single []dEntrySingle
- double []dEntryDouble
}
// single-symbols decoding
@@ -18,13 +18,6 @@ type dEntrySingle struct {
entry uint16
}
-// double-symbols decoding
-type dEntryDouble struct {
- seq uint16
- nBits uint8
- len uint8
-}
-
// Uses special code for all tables that are < 8 bits.
const use8BitTables = true
@@ -34,7 +27,7 @@ const use8BitTables = true
// If no Scratch is provided a new one is allocated.
// The returned Scratch can be used for encoding or decoding input using this table.
func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
- s, err = s.prepare(in)
+ s, err = s.prepare(nil)
if err != nil {
return s, nil, err
}
@@ -216,6 +209,7 @@ func (s *Scratch) Decoder() *Decoder {
return &Decoder{
dt: s.dt,
actualTableLog: s.actualTableLog,
+ bufs: &s.decPool,
}
}
@@ -223,103 +217,15 @@ func (s *Scratch) Decoder() *Decoder {
type Decoder struct {
dt dTable
actualTableLog uint8
+ bufs *sync.Pool
}
-// Decompress1X will decompress a 1X encoded stream.
-// The cap of the output buffer will be the maximum decompressed size.
-// The length of the supplied input must match the end of a block exactly.
-func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
- if len(d.dt.single) == 0 {
- return nil, errors.New("no table loaded")
- }
- if use8BitTables && d.actualTableLog <= 8 {
- return d.decompress1X8Bit(dst, src)
- }
- var br bitReaderShifted
- err := br.init(src)
- if err != nil {
- return dst, err
- }
- maxDecodedSize := cap(dst)
- dst = dst[:0]
-
- // Avoid bounds check by always having full sized table.
- const tlSize = 1 << tableLogMax
- const tlMask = tlSize - 1
- dt := d.dt.single[:tlSize]
-
- // Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
- var off uint8
-
- for br.off >= 8 {
- br.fillFast()
- v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+0] = uint8(v.entry >> 8)
-
- v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+1] = uint8(v.entry >> 8)
-
- // Refill
- br.fillFast()
-
- v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+2] = uint8(v.entry >> 8)
-
- v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+3] = uint8(v.entry >> 8)
-
- off += 4
- if off == 0 {
- if len(dst)+256 > maxDecodedSize {
- br.close()
- return nil, ErrMaxDecodedSizeExceeded
- }
- dst = append(dst, buf[:]...)
- }
- }
-
- if len(dst)+int(off) > maxDecodedSize {
- br.close()
- return nil, ErrMaxDecodedSizeExceeded
- }
- dst = append(dst, buf[:off]...)
-
- // br < 8, so uint8 is fine
- bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
- for bitsLeft > 0 {
- br.fill()
- if false && br.bitsRead >= 32 {
- if br.off >= 4 {
- v := br.in[br.off-4:]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- br.value = (br.value << 32) | uint64(low)
- br.bitsRead -= 32
- br.off -= 4
- } else {
- for br.off > 0 {
- br.value = (br.value << 8) | uint64(br.in[br.off-1])
- br.bitsRead -= 8
- br.off--
- }
- }
- }
- if len(dst) >= maxDecodedSize {
- br.close()
- return nil, ErrMaxDecodedSizeExceeded
- }
- v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
- nBits := uint8(v.entry)
- br.advance(nBits)
- bitsLeft -= nBits
- dst = append(dst, uint8(v.entry>>8))
+func (d *Decoder) buffer() *[4][256]byte {
+ buf, ok := d.bufs.Get().(*[4][256]byte)
+ if ok {
+ return buf
}
- return dst, br.close()
+ return &[4][256]byte{}
}
// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
@@ -341,41 +247,258 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
dt := d.dt.single[:256]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ bufs := d.buffer()
+ buf := &bufs[0]
var off uint8
- shift := (8 - d.actualTableLog) & 7
-
- //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog)
- for br.off >= 4 {
- br.fillFast()
- v := dt[br.peekByteFast()>>shift]
- br.advance(uint8(v.entry))
- buf[off+0] = uint8(v.entry >> 8)
-
- v = dt[br.peekByteFast()>>shift]
- br.advance(uint8(v.entry))
- buf[off+1] = uint8(v.entry >> 8)
-
- v = dt[br.peekByteFast()>>shift]
- br.advance(uint8(v.entry))
- buf[off+2] = uint8(v.entry >> 8)
-
- v = dt[br.peekByteFast()>>shift]
- br.advance(uint8(v.entry))
- buf[off+3] = uint8(v.entry >> 8)
-
- off += 4
- if off == 0 {
- if len(dst)+256 > maxDecodedSize {
- br.close()
- return nil, ErrMaxDecodedSizeExceeded
+ switch d.actualTableLog {
+ case 8:
+ const shift = 8 - 8
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ d.bufs.Put(bufs)
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 7:
+ const shift = 8 - 7
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ d.bufs.Put(bufs)
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 6:
+ const shift = 8 - 6
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 5:
+ const shift = 8 - 5
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 4:
+ const shift = 8 - 4
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 3:
+ const shift = 8 - 3
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
}
- dst = append(dst, buf[:]...)
}
+ case 2:
+ const shift = 8 - 2
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 1:
+ const shift = 8 - 1
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ default:
+ d.bufs.Put(bufs)
+ return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
}
if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -383,6 +506,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
// br < 4, so uint8 is fine
bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead))
+ shift := (8 - d.actualTableLog) & 7
+
for bitsLeft > 0 {
if br.bitsRead >= 64-8 {
for br.off > 0 {
@@ -393,6 +518,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
}
if len(dst) >= maxDecodedSize {
br.close()
+ d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
v := dt[br.peekByteFast()>>shift]
@@ -401,6 +527,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
bitsLeft -= int8(nBits)
dst = append(dst, uint8(v.entry>>8))
}
+ d.bufs.Put(bufs)
return dst, br.close()
}
@@ -420,33 +547,35 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
dt := d.dt.single[:256]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ bufs := d.buffer()
+ buf := &bufs[0]
var off uint8
- const shift = 0
+ const shift = 56
//fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog)
for br.off >= 4 {
br.fillFast()
- v := dt[br.peekByteFast()>>shift]
+ v := dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
- v = dt[br.peekByteFast()>>shift]
+ v = dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
- v = dt[br.peekByteFast()>>shift]
+ v = dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
- v = dt[br.peekByteFast()>>shift]
+ v = dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -455,6 +584,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
}
if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -471,208 +601,20 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
}
}
if len(dst) >= maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
- v := dt[br.peekByteFast()>>shift]
+ v := dt[br.peekByteFast()]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= int8(nBits)
dst = append(dst, uint8(v.entry>>8))
}
+ d.bufs.Put(bufs)
return dst, br.close()
}
-// Decompress4X will decompress a 4X encoded stream.
-// The length of the supplied input must match the end of a block exactly.
-// The *capacity* of the dst slice must match the destination size of
-// the uncompressed data exactly.
-func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
- if len(d.dt.single) == 0 {
- return nil, errors.New("no table loaded")
- }
- if len(src) < 6+(4*1) {
- return nil, errors.New("input too small")
- }
- if use8BitTables && d.actualTableLog <= 8 {
- return d.decompress4X8bit(dst, src)
- }
-
- var br [4]bitReaderShifted
- start := 6
- for i := 0; i < 3; i++ {
- length := int(src[i*2]) | (int(src[i*2+1]) << 8)
- if start+length >= len(src) {
- return nil, errors.New("truncated input (or invalid offset)")
- }
- err := br[i].init(src[start : start+length])
- if err != nil {
- return nil, err
- }
- start += length
- }
- err := br[3].init(src[start:])
- if err != nil {
- return nil, err
- }
-
- // destination, offset to match first output
- dstSize := cap(dst)
- dst = dst[:dstSize]
- out := dst
- dstEvery := (dstSize + 3) / 4
-
- const tlSize = 1 << tableLogMax
- const tlMask = tlSize - 1
- single := d.dt.single[:tlSize]
-
- // Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
- var off uint8
- var decoded int
-
- // Decode 2 values from each decoder/loop.
- const bufoff = 256 / 4
- for {
- if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
- break
- }
-
- {
- const stream = 0
- const stream2 = 1
- br[stream].fillFast()
- br[stream2].fillFast()
-
- val := br[stream].peekBitsFast(d.actualTableLog)
- v := single[val&tlMask]
- br[stream].advance(uint8(v.entry))
- buf[off+bufoff*stream] = uint8(v.entry >> 8)
-
- val2 := br[stream2].peekBitsFast(d.actualTableLog)
- v2 := single[val2&tlMask]
- br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
-
- val = br[stream].peekBitsFast(d.actualTableLog)
- v = single[val&tlMask]
- br[stream].advance(uint8(v.entry))
- buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
-
- val2 = br[stream2].peekBitsFast(d.actualTableLog)
- v2 = single[val2&tlMask]
- br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
- }
-
- {
- const stream = 2
- const stream2 = 3
- br[stream].fillFast()
- br[stream2].fillFast()
-
- val := br[stream].peekBitsFast(d.actualTableLog)
- v := single[val&tlMask]
- br[stream].advance(uint8(v.entry))
- buf[off+bufoff*stream] = uint8(v.entry >> 8)
-
- val2 := br[stream2].peekBitsFast(d.actualTableLog)
- v2 := single[val2&tlMask]
- br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
-
- val = br[stream].peekBitsFast(d.actualTableLog)
- v = single[val&tlMask]
- br[stream].advance(uint8(v.entry))
- buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
-
- val2 = br[stream2].peekBitsFast(d.actualTableLog)
- v2 = single[val2&tlMask]
- br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
- }
-
- off += 2
-
- if off == bufoff {
- if bufoff > dstEvery {
- return nil, errors.New("corruption detected: stream overrun 1")
- }
- copy(out, buf[:bufoff])
- copy(out[dstEvery:], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
- off = 0
- out = out[bufoff:]
- decoded += 256
- // There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
- return nil, errors.New("corruption detected: stream overrun 2")
- }
- }
- }
- if off > 0 {
- ioff := int(off)
- if len(out) < dstEvery*3+ioff {
- return nil, errors.New("corruption detected: stream overrun 3")
- }
- copy(out, buf[:off])
- copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
- decoded += int(off) * 4
- out = out[off:]
- }
-
- // Decode remaining.
- for i := range br {
- offset := dstEvery * i
- br := &br[i]
- bitsLeft := br.off*8 + uint(64-br.bitsRead)
- for bitsLeft > 0 {
- br.fill()
- if false && br.bitsRead >= 32 {
- if br.off >= 4 {
- v := br.in[br.off-4:]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- br.value = (br.value << 32) | uint64(low)
- br.bitsRead -= 32
- br.off -= 4
- } else {
- for br.off > 0 {
- br.value = (br.value << 8) | uint64(br.in[br.off-1])
- br.bitsRead -= 8
- br.off--
- }
- }
- }
- // end inline...
- if offset >= len(out) {
- return nil, errors.New("corruption detected: stream overrun 4")
- }
-
- // Read value and increment offset.
- val := br.peekBitsFast(d.actualTableLog)
- v := single[val&tlMask].entry
- nBits := uint8(v)
- br.advance(nBits)
- bitsLeft -= uint(nBits)
- out[offset] = uint8(v >> 8)
- offset++
- }
- decoded += offset - dstEvery*i
- err = br.close()
- if err != nil {
- return nil, err
- }
- }
- if dstSize != decoded {
- return nil, errors.New("corruption detected: short output block")
- }
- return dst, nil
-}
-
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
@@ -706,19 +648,18 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
out := dst
dstEvery := (dstSize + 3) / 4
- shift := (8 - d.actualTableLog) & 7
+ shift := (56 + (8 - d.actualTableLog)) & 63
const tlSize = 1 << 8
- const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ buf := d.buffer()
var off uint8
var decoded int
// Decode 4 values from each decoder/loop.
- const bufoff = 256 / 4
+ const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
@@ -728,120 +669,144 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
// Interleave 2 decodes.
const stream = 0
const stream2 = 1
- br[stream].fillFast()
- br[stream2].fillFast()
-
- v := single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 := single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
-
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
-
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
-
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+3] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
{
const stream = 2
const stream2 = 3
- br[stream].fillFast()
- br[stream2].fillFast()
-
- v := single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 := single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
-
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
-
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
-
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+3] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
off += 4
- if off == bufoff {
+ if off == 0 {
if bufoff > dstEvery {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[:bufoff])
- copy(out[dstEvery:], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
- off = 0
- out = out[bufoff:]
- decoded += 256
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
- copy(out, buf[:off])
- copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
br := &br[i]
- bitsLeft := int(br.off*8) + int(64-br.bitsRead)
+ bitsLeft := br.remaining()
for bitsLeft > 0 {
if br.finished() {
+ d.bufs.Put(buf)
return nil, io.ErrUnexpectedEOF
}
if br.bitsRead >= 56 {
@@ -861,24 +826,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
}
}
// end inline...
- if offset >= len(out) {
+ if offset >= endsAt {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
// Read value and increment offset.
- v := single[br.peekByteFast()>>shift].entry
+ v := single[uint8(br.value>>shift)].entry
nBits := uint8(v)
br.advance(nBits)
- bitsLeft -= int(nBits)
+ bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
+ d.bufs.Put(buf)
return nil, err
}
}
+ d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
@@ -914,18 +886,17 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
out := dst
dstEvery := (dstSize + 3) / 4
- const shift = 0
+ const shift = 56
const tlSize = 1 << 8
- const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ buf := d.buffer()
var off uint8
var decoded int
// Decode 4 values from each decoder/loop.
- const bufoff = 256 / 4
+ const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
@@ -935,98 +906,116 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
// Interleave 2 decodes.
const stream = 0
const stream2 = 1
- br[stream].fillFast()
- br[stream2].fillFast()
-
- v := single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 := single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
-
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
-
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
-
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+3] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
{
const stream = 2
const stream2 = 3
- br[stream].fillFast()
- br[stream2].fillFast()
-
- v := single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 := single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
-
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
-
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
-
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+3] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
off += 4
- if off == bufoff {
+ if off == 0 {
if bufoff > dstEvery {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[:bufoff])
- copy(out[dstEvery:], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
- off = 0
- out = out[bufoff:]
- decoded += 256
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ // copy(out[dstEvery*3:], buf[3][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
@@ -1034,21 +1023,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
if len(out) < dstEvery*3+ioff {
return nil, errors.New("corruption detected: stream overrun 3")
}
- copy(out, buf[:off])
- copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
br := &br[i]
- bitsLeft := int(br.off*8) + int(64-br.bitsRead)
+ bitsLeft := br.remaining()
for bitsLeft > 0 {
if br.finished() {
+ d.bufs.Put(buf)
return nil, io.ErrUnexpectedEOF
}
if br.bitsRead >= 56 {
@@ -1068,24 +1063,32 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
}
}
// end inline...
- if offset >= len(out) {
+ if offset >= endsAt {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
// Read value and increment offset.
- v := single[br.peekByteFast()>>shift].entry
+ v := single[br.peekByteFast()].entry
nBits := uint8(v)
br.advance(nBits)
- bitsLeft -= int(nBits)
+ bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
+ d.bufs.Put(buf)
return nil, err
}
}
+ d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
new file mode 100644
index 00000000000..ba7e8e6b027
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -0,0 +1,226 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+// This file contains the specialisation of Decoder.Decompress4X
+// and Decoder.Decompress1X that use an asm implementation of thir main loops.
+package huff0
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/klauspost/compress/internal/cpuinfo"
+)
+
+// decompress4x_main_loop_x86 is an x86 assembler implementation
+// of Decompress4X when tablelog > 8.
+//
+//go:noescape
+func decompress4x_main_loop_amd64(ctx *decompress4xContext)
+
+// decompress4x_8b_loop_x86 is an x86 assembler implementation
+// of Decompress4X when tablelog <= 8 which decodes 4 entries
+// per loop.
+//
+//go:noescape
+func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
+
+// fallback8BitSize is the size where using Go version is faster.
+const fallback8BitSize = 800
+
+type decompress4xContext struct {
+ pbr *[4]bitReaderShifted
+ peekBits uint8
+ out *byte
+ dstEvery int
+ tbl *dEntrySingle
+ decoded int
+ limit *byte
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if len(src) < 6+(4*1) {
+ return nil, errors.New("input too small")
+ }
+
+ use8BitTables := d.actualTableLog <= 8
+ if cap(dst) < fallback8BitSize && use8BitTables {
+ return d.decompress4X8bit(dst, src)
+ }
+
+ var br [4]bitReaderShifted
+ // Decode "jump table"
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ single := d.dt.single[:tlSize]
+
+ var decoded int
+
+ if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
+ ctx := decompress4xContext{
+ pbr: &br,
+ peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
+ out: &out[0],
+ dstEvery: dstEvery,
+ tbl: &single[0],
+ limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last.
+ }
+ if use8BitTables {
+ decompress4x_8b_main_loop_amd64(&ctx)
+ } else {
+ decompress4x_main_loop_amd64(&ctx)
+ }
+
+ decoded = ctx.decoded
+ out = out[decoded/4:]
+ }
+
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
+ for i := range br {
+ offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
+ br := &br[i]
+ bitsLeft := br.remaining()
+ for bitsLeft > 0 {
+ br.fill()
+ if offset >= endsAt {
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ val := br.peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= uint(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ if offset != endsAt {
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
+
+// decompress4x_main_loop_x86 is an x86 assembler implementation
+// of Decompress1X when tablelog > 8.
+//
+//go:noescape
+func decompress1x_main_loop_amd64(ctx *decompress1xContext)
+
+// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
+// of Decompress1X when tablelog > 8.
+//
+//go:noescape
+func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
+
+type decompress1xContext struct {
+ pbr *bitReaderShifted
+ peekBits uint8
+ out *byte
+ outCap int
+ tbl *dEntrySingle
+ decoded int
+}
+
+// Error reported by asm implementations
+const error_max_decoded_size_exeeded = -1
+
+// Decompress1X will decompress a 1X encoded stream.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ var br bitReaderShifted
+ err := br.init(src)
+ if err != nil {
+ return dst, err
+ }
+ maxDecodedSize := cap(dst)
+ dst = dst[:maxDecodedSize]
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+
+ if maxDecodedSize >= 4 {
+ ctx := decompress1xContext{
+ pbr: &br,
+ out: &dst[0],
+ outCap: maxDecodedSize,
+ peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
+ tbl: &d.dt.single[0],
+ }
+
+ if cpuinfo.HasBMI2() {
+ decompress1x_main_loop_bmi2(&ctx)
+ } else {
+ decompress1x_main_loop_amd64(&ctx)
+ }
+ if ctx.decoded == error_max_decoded_size_exeeded {
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+
+ dst = dst[:ctx.decoded]
+ }
+
+ // br < 8, so uint8 is fine
+ bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
+ for bitsLeft > 0 {
+ br.fill()
+ if len(dst) >= maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
+ nBits := uint8(v.entry)
+ br.advance(nBits)
+ bitsLeft -= nBits
+ dst = append(dst, uint8(v.entry>>8))
+ }
+ return dst, br.close()
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
new file mode 100644
index 00000000000..8d2187a2ce6
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
@@ -0,0 +1,846 @@
+// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
+
+//go:build amd64 && !appengine && !noasm && gc
+
+// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
+TEXT ·decompress4x_main_loop_amd64(SB), $0-8
+ XORQ DX, DX
+
+ // Preload values
+ MOVQ ctx+0(FP), AX
+ MOVBQZX 8(AX), DI
+ MOVQ 16(AX), SI
+ MOVQ 48(AX), BX
+ MOVQ 24(AX), R9
+ MOVQ 32(AX), R10
+ MOVQ (AX), R11
+
+ // Main loop
+main_loop:
+ MOVQ SI, R8
+ CMPQ R8, BX
+ SETGE DL
+
+ // br0.fillFast32()
+ MOVQ 32(R11), R12
+ MOVBQZX 40(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill0
+ MOVQ 24(R11), AX
+ SUBQ $0x20, R13
+ SUBQ $0x04, AX
+ MOVQ (R11), R14
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (AX)(R14*1), R14
+ MOVQ R13, CX
+ SHLQ CL, R14
+ MOVQ AX, 24(R11)
+ ORQ R14, R12
+
+ // exhausted = exhausted || (br0.off < 4)
+ CMPQ AX, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill0:
+ // val0 := br0.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br0.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br0.peekTopBits(peekBits)
+ MOVQ DI, CX
+ MOVQ R12, R14
+ SHRQ CL, R14
+
+ // v1 := table[val1&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br0.advance(uint8(v1.entry))
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // these two writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ MOVW AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 32(R11)
+ MOVB R13, 40(R11)
+ ADDQ R9, R8
+
+ // br1.fillFast32()
+ MOVQ 80(R11), R12
+ MOVBQZX 88(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill1
+ MOVQ 72(R11), AX
+ SUBQ $0x20, R13
+ SUBQ $0x04, AX
+ MOVQ 48(R11), R14
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (AX)(R14*1), R14
+ MOVQ R13, CX
+ SHLQ CL, R14
+ MOVQ AX, 72(R11)
+ ORQ R14, R12
+
+ // exhausted = exhausted || (br1.off < 4)
+ CMPQ AX, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill1:
+ // val0 := br1.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br1.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br1.peekTopBits(peekBits)
+ MOVQ DI, CX
+ MOVQ R12, R14
+ SHRQ CL, R14
+
+ // v1 := table[val1&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br1.advance(uint8(v1.entry))
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // these two writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ MOVW AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 80(R11)
+ MOVB R13, 88(R11)
+ ADDQ R9, R8
+
+ // br2.fillFast32()
+ MOVQ 128(R11), R12
+ MOVBQZX 136(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill2
+ MOVQ 120(R11), AX
+ SUBQ $0x20, R13
+ SUBQ $0x04, AX
+ MOVQ 96(R11), R14
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (AX)(R14*1), R14
+ MOVQ R13, CX
+ SHLQ CL, R14
+ MOVQ AX, 120(R11)
+ ORQ R14, R12
+
+ // exhausted = exhausted || (br2.off < 4)
+ CMPQ AX, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill2:
+ // val0 := br2.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br2.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br2.peekTopBits(peekBits)
+ MOVQ DI, CX
+ MOVQ R12, R14
+ SHRQ CL, R14
+
+ // v1 := table[val1&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br2.advance(uint8(v1.entry))
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // these two writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ MOVW AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 128(R11)
+ MOVB R13, 136(R11)
+ ADDQ R9, R8
+
+ // br3.fillFast32()
+ MOVQ 176(R11), R12
+ MOVBQZX 184(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill3
+ MOVQ 168(R11), AX
+ SUBQ $0x20, R13
+ SUBQ $0x04, AX
+ MOVQ 144(R11), R14
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (AX)(R14*1), R14
+ MOVQ R13, CX
+ SHLQ CL, R14
+ MOVQ AX, 168(R11)
+ ORQ R14, R12
+
+ // exhausted = exhausted || (br3.off < 4)
+ CMPQ AX, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill3:
+ // val0 := br3.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br3.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br3.peekTopBits(peekBits)
+ MOVQ DI, CX
+ MOVQ R12, R14
+ SHRQ CL, R14
+
+ // v1 := table[val1&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br3.advance(uint8(v1.entry))
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // these two writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ MOVW AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 176(R11)
+ MOVB R13, 184(R11)
+ ADDQ $0x02, SI
+ TESTB DL, DL
+ JZ main_loop
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), SI
+ SHLQ $0x02, SI
+ MOVQ SI, 40(AX)
+ RET
+
+// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
+TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
+ XORQ DX, DX
+
+ // Preload values
+ MOVQ ctx+0(FP), CX
+ MOVBQZX 8(CX), DI
+ MOVQ 16(CX), BX
+ MOVQ 48(CX), SI
+ MOVQ 24(CX), R9
+ MOVQ 32(CX), R10
+ MOVQ (CX), R11
+
+ // Main loop
+main_loop:
+ MOVQ BX, R8
+ CMPQ R8, SI
+ SETGE DL
+
+ // br0.fillFast32()
+ MOVQ 32(R11), R12
+ MOVBQZX 40(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill0
+ MOVQ 24(R11), R14
+ SUBQ $0x20, R13
+ SUBQ $0x04, R14
+ MOVQ (R11), R15
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (R14)(R15*1), R15
+ MOVQ R13, CX
+ SHLQ CL, R15
+ MOVQ R14, 24(R11)
+ ORQ R15, R12
+
+ // exhausted = exhausted || (br0.off < 4)
+ CMPQ R14, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill0:
+ // val0 := br0.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br0.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br0.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v1 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br0.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // val2 := br0.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v2 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br0.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val3 := br0.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v3 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br0.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ MOVL AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 32(R11)
+ MOVB R13, 40(R11)
+ ADDQ R9, R8
+
+ // br1.fillFast32()
+ MOVQ 80(R11), R12
+ MOVBQZX 88(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill1
+ MOVQ 72(R11), R14
+ SUBQ $0x20, R13
+ SUBQ $0x04, R14
+ MOVQ 48(R11), R15
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (R14)(R15*1), R15
+ MOVQ R13, CX
+ SHLQ CL, R15
+ MOVQ R14, 72(R11)
+ ORQ R15, R12
+
+ // exhausted = exhausted || (br1.off < 4)
+ CMPQ R14, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill1:
+ // val0 := br1.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br1.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br1.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v1 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br1.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // val2 := br1.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v2 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br1.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val3 := br1.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v3 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br1.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ MOVL AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 80(R11)
+ MOVB R13, 88(R11)
+ ADDQ R9, R8
+
+ // br2.fillFast32()
+ MOVQ 128(R11), R12
+ MOVBQZX 136(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill2
+ MOVQ 120(R11), R14
+ SUBQ $0x20, R13
+ SUBQ $0x04, R14
+ MOVQ 96(R11), R15
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (R14)(R15*1), R15
+ MOVQ R13, CX
+ SHLQ CL, R15
+ MOVQ R14, 120(R11)
+ ORQ R15, R12
+
+ // exhausted = exhausted || (br2.off < 4)
+ CMPQ R14, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill2:
+ // val0 := br2.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br2.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br2.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v1 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br2.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // val2 := br2.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v2 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br2.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val3 := br2.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v3 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br2.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ MOVL AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 128(R11)
+ MOVB R13, 136(R11)
+ ADDQ R9, R8
+
+ // br3.fillFast32()
+ MOVQ 176(R11), R12
+ MOVBQZX 184(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill3
+ MOVQ 168(R11), R14
+ SUBQ $0x20, R13
+ SUBQ $0x04, R14
+ MOVQ 144(R11), R15
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (R14)(R15*1), R15
+ MOVQ R13, CX
+ SHLQ CL, R15
+ MOVQ R14, 168(R11)
+ ORQ R15, R12
+
+ // exhausted = exhausted || (br3.off < 4)
+ CMPQ R14, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill3:
+ // val0 := br3.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br3.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br3.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v1 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br3.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // val2 := br3.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v2 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br3.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val3 := br3.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v3 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br3.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ MOVL AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 176(R11)
+ MOVB R13, 184(R11)
+ ADDQ $0x04, BX
+ TESTB DL, DL
+ JZ main_loop
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), BX
+ SHLQ $0x02, BX
+ MOVQ BX, 40(AX)
+ RET
+
+// func decompress1x_main_loop_amd64(ctx *decompress1xContext)
+TEXT ·decompress1x_main_loop_amd64(SB), $0-8
+ MOVQ ctx+0(FP), CX
+ MOVQ 16(CX), DX
+ MOVQ 24(CX), BX
+ CMPQ BX, $0x04
+ JB error_max_decoded_size_exeeded
+ LEAQ (DX)(BX*1), BX
+ MOVQ (CX), SI
+ MOVQ (SI), R8
+ MOVQ 24(SI), R9
+ MOVQ 32(SI), R10
+ MOVBQZX 40(SI), R11
+ MOVQ 32(CX), SI
+ MOVBQZX 8(CX), DI
+ JMP loop_condition
+
+main_loop:
+ // Check if we have room for 4 bytes in the output buffer
+ LEAQ 4(DX), CX
+ CMPQ CX, BX
+ JGE error_max_decoded_size_exeeded
+
+ // Decode 4 values
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_1_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), R12
+ MOVQ R11, CX
+ SHLQ CL, R12
+ ORQ R12, R10
+
+bitReader_fillFast_1_end:
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ BSWAPL AX
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_2_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), R12
+ MOVQ R11, CX
+ SHLQ CL, R12
+ ORQ R12, R10
+
+bitReader_fillFast_2_end:
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ BSWAPL AX
+
+ // Store the decoded values
+ MOVL AX, (DX)
+ ADDQ $0x04, DX
+
+loop_condition:
+ CMPQ R9, $0x08
+ JGE main_loop
+
+ // Update ctx structure
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), DX
+ MOVQ DX, 40(AX)
+ MOVQ (AX), AX
+ MOVQ R9, 24(AX)
+ MOVQ R10, 32(AX)
+ MOVB R11, 40(AX)
+ RET
+
+ // Report error
+error_max_decoded_size_exeeded:
+ MOVQ ctx+0(FP), AX
+ MOVQ $-1, CX
+ MOVQ CX, 40(AX)
+ RET
+
+// func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
+// Requires: BMI2
+TEXT ·decompress1x_main_loop_bmi2(SB), $0-8
+ MOVQ ctx+0(FP), CX
+ MOVQ 16(CX), DX
+ MOVQ 24(CX), BX
+ CMPQ BX, $0x04
+ JB error_max_decoded_size_exeeded
+ LEAQ (DX)(BX*1), BX
+ MOVQ (CX), SI
+ MOVQ (SI), R8
+ MOVQ 24(SI), R9
+ MOVQ 32(SI), R10
+ MOVBQZX 40(SI), R11
+ MOVQ 32(CX), SI
+ MOVBQZX 8(CX), DI
+ JMP loop_condition
+
+main_loop:
+ // Check if we have room for 4 bytes in the output buffer
+ LEAQ 4(DX), CX
+ CMPQ CX, BX
+ JGE error_max_decoded_size_exeeded
+
+ // Decode 4 values
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_1_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), CX
+ SHLXQ R11, CX, CX
+ ORQ CX, R10
+
+bitReader_fillFast_1_end:
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ BSWAPL AX
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_2_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), CX
+ SHLXQ R11, CX, CX
+ ORQ CX, R10
+
+bitReader_fillFast_2_end:
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ BSWAPL AX
+
+ // Store the decoded values
+ MOVL AX, (DX)
+ ADDQ $0x04, DX
+
+loop_condition:
+ CMPQ R9, $0x08
+ JGE main_loop
+
+ // Update ctx structure
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), DX
+ MOVQ DX, 40(AX)
+ MOVQ (AX), AX
+ MOVQ R9, 24(AX)
+ MOVQ R10, 32(AX)
+ MOVB R11, 40(AX)
+ RET
+
+ // Report error
+error_max_decoded_size_exeeded:
+ MOVQ ctx+0(FP), AX
+ MOVQ $-1, CX
+ MOVQ CX, 40(AX)
+ RET
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
new file mode 100644
index 00000000000..908c17de63f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
@@ -0,0 +1,299 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+// This file contains a generic implementation of Decoder.Decompress4X.
+package huff0
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if len(src) < 6+(4*1) {
+ return nil, errors.New("input too small")
+ }
+ if use8BitTables && d.actualTableLog <= 8 {
+ return d.decompress4X8bit(dst, src)
+ }
+
+ var br [4]bitReaderShifted
+ // Decode "jump table"
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ single := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ buf := d.buffer()
+ var off uint8
+ var decoded int
+
+ // Decode 2 values from each decoder/loop.
+ const bufoff = 256
+ for {
+ if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+ break
+ }
+
+ {
+ const stream = 0
+ const stream2 = 1
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ val := br[stream].peekBitsFast(d.actualTableLog)
+ val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
+ v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off] = uint8(v.entry >> 8)
+ buf[stream2][off] = uint8(v2.entry >> 8)
+
+ val = br[stream].peekBitsFast(d.actualTableLog)
+ val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
+ v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off+1] = uint8(v.entry >> 8)
+ buf[stream2][off+1] = uint8(v2.entry >> 8)
+ }
+
+ {
+ const stream = 2
+ const stream2 = 3
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ val := br[stream].peekBitsFast(d.actualTableLog)
+ val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
+ v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off] = uint8(v.entry >> 8)
+ buf[stream2][off] = uint8(v2.entry >> 8)
+
+ val = br[stream].peekBitsFast(d.actualTableLog)
+ val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
+ v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off+1] = uint8(v.entry >> 8)
+ buf[stream2][off+1] = uint8(v2.entry >> 8)
+ }
+
+ off += 2
+
+ if off == 0 {
+ if bufoff > dstEvery {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 1")
+ }
+ // There must at least be 3 buffers left.
+ if len(out)-bufoff < dstEvery*3 {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 2")
+ }
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ //copy(out[dstEvery*3:], buf[3][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
+ }
+ }
+ if off > 0 {
+ ioff := int(off)
+ if len(out) < dstEvery*3+ioff {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 3")
+ }
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
+ decoded += int(off) * 4
+ out = out[off:]
+ }
+
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
+ for i := range br {
+ offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
+ br := &br[i]
+ bitsLeft := br.remaining()
+ for bitsLeft > 0 {
+ br.fill()
+ if offset >= endsAt {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ val := br.peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= uint(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ return nil, err
+ }
+ }
+ d.bufs.Put(buf)
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
+
+// Decompress1X will decompress a 1X encoded stream.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if use8BitTables && d.actualTableLog <= 8 {
+ return d.decompress1X8Bit(dst, src)
+ }
+ var br bitReaderShifted
+ err := br.init(src)
+ if err != nil {
+ return dst, err
+ }
+ maxDecodedSize := cap(dst)
+ dst = dst[:0]
+
+ // Avoid bounds check by always having full sized table.
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ dt := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ bufs := d.buffer()
+ buf := &bufs[0]
+ var off uint8
+
+ for br.off >= 8 {
+ br.fillFast()
+ v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ // Refill
+ br.fillFast()
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ d.bufs.Put(bufs)
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+
+ if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:off]...)
+
+ // br < 8, so uint8 is fine
+ bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
+ for bitsLeft > 0 {
+ br.fill()
+ if false && br.bitsRead >= 32 {
+ if br.off >= 4 {
+ v := br.in[br.off-4:]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ br.value = (br.value << 32) | uint64(low)
+ br.bitsRead -= 32
+ br.off -= 4
+ } else {
+ for br.off > 0 {
+ br.value = (br.value << 8) | uint64(br.in[br.off-1])
+ br.bitsRead -= 8
+ br.off--
+ }
+ }
+ }
+ if len(dst) >= maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
+ nBits := uint8(v.entry)
+ br.advance(nBits)
+ bitsLeft -= nBits
+ dst = append(dst, uint8(v.entry>>8))
+ }
+ d.bufs.Put(bufs)
+ return dst, br.close()
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index 3ee00ecb470..e8ad17ad08e 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -8,6 +8,7 @@ import (
"fmt"
"math"
"math/bits"
+ "sync"
"github.com/klauspost/compress/fse"
)
@@ -116,6 +117,7 @@ type Scratch struct {
nodes []nodeElt
tmpOut [4][]byte
fse *fse.Scratch
+ decPool sync.Pool // *[4][256]byte buffers.
huffWeight [maxSymbolValue + 1]byte
}
diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
new file mode 100644
index 00000000000..3954c51219b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
@@ -0,0 +1,34 @@
+// Package cpuinfo gives runtime info about the current CPU.
+//
+// This is a very limited module meant for use internally
+// in this project. For more versatile solution check
+// https://github.com/klauspost/cpuid.
+package cpuinfo
+
+// HasBMI1 checks whether an x86 CPU supports the BMI1 extension.
+func HasBMI1() bool {
+ return hasBMI1
+}
+
+// HasBMI2 checks whether an x86 CPU supports the BMI2 extension.
+func HasBMI2() bool {
+ return hasBMI2
+}
+
+// DisableBMI2 will disable BMI2, for testing purposes.
+// Call returned function to restore previous state.
+func DisableBMI2() func() {
+ old := hasBMI2
+ hasBMI2 = false
+ return func() {
+ hasBMI2 = old
+ }
+}
+
+// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions.
+func HasBMI() bool {
+ return HasBMI1() && HasBMI2()
+}
+
+var hasBMI1 bool
+var hasBMI2 bool
diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
new file mode 100644
index 00000000000..e802579c4f9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
@@ -0,0 +1,11 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+package cpuinfo
+
+// go:noescape
+func x86extensions() (bmi1, bmi2 bool)
+
+func init() {
+ hasBMI1, hasBMI2 = x86extensions()
+}
diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
new file mode 100644
index 00000000000..4465fbe9e90
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
@@ -0,0 +1,36 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+TEXT ·x86extensions(SB), NOSPLIT, $0
+ // 1. determine max EAX value
+ XORQ AX, AX
+ CPUID
+
+ CMPQ AX, $7
+ JB unsupported
+
+ // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction"
+ MOVQ $7, AX
+ MOVQ $0, CX
+ CPUID
+
+ BTQ $3, BX // bit 3 = BMI1
+ SETCS AL
+
+ BTQ $8, BX // bit 8 = BMI2
+ SETCS AH
+
+ MOVB AL, bmi1+0(FP)
+ MOVB AH, bmi2+1(FP)
+ RET
+
+unsupported:
+ XORQ AX, AX
+ MOVB AL, bmi1+0(FP)
+ MOVB AL, bmi2+1(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
index 511bba65db8..298c4f8e97d 100644
--- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
@@ -18,6 +18,7 @@ func load64(b []byte, i int) uint64 {
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
+//
// dst is long enough to hold the encoded bytes
// 1 <= len(lit) && len(lit) <= 65536
func emitLiteral(dst, lit []byte) int {
@@ -42,6 +43,7 @@ func emitLiteral(dst, lit []byte) int {
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
+//
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= 65535
// 4 <= length && length <= 65535
@@ -89,6 +91,7 @@ func emitCopy(dst []byte, offset, length int) int {
// src[i:i+k-j] and src[j:k] have the same contents.
//
// It assumes that:
+//
// 0 <= i && i < j && j <= len(src)
func extendMatch(src []byte, i, j int) int {
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
@@ -105,8 +108,9 @@ func hash(u, shift uint32) uint32 {
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
// The table element type is uint16, as s < sLimit and sLimit < len(src)
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index c8f0f16fc1e..65b38abed80 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -12,6 +12,8 @@ The `zstd` package is provided as open source software using a Go standard licen
Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
+For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go).
+
## Installation
Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
@@ -78,6 +80,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is
in the future. So if you want to limit concurrency for future updates, specify the concurrency
you would like.
+If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)`
+which will compress input as each block is completed, blocking on writes until each has completed.
+
You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined
compression settings can be specified.
@@ -104,7 +109,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h
For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`.
`EncodeAll` will encode all input in src and append it to dst.
-This function can be called concurrently, but each call will only run on a single goroutine.
+This function can be called concurrently.
+Each call will only run on a same goroutine as the caller.
Encoded blocks can be concatenated and the result will be the combined input stream.
Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`.
@@ -149,10 +155,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
This package:
file out level insize outsize millis mb/s
-silesia.tar zskp 1 211947520 73101992 643 313.87
-silesia.tar zskp 2 211947520 67504318 969 208.38
-silesia.tar zskp 3 211947520 64595893 2007 100.68
-silesia.tar zskp 4 211947520 60995370 8825 22.90
+silesia.tar zskp 1 211947520 73821326 634 318.47
+silesia.tar zskp 2 211947520 67655404 1508 133.96
+silesia.tar zskp 3 211947520 64746933 3000 67.37
+silesia.tar zskp 4 211947520 60073508 16926 11.94
cgo zstd:
silesia.tar zstd 1 211947520 73605392 543 371.56
@@ -161,94 +167,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66
silesia.tar zstd 9 211947520 60212393 5063 39.92
gzip, stdlib/this package:
-silesia.tar gzstd 1 211947520 80007735 1654 122.21
-silesia.tar gzkp 1 211947520 80136201 1152 175.45
+silesia.tar gzstd 1 211947520 80007735 1498 134.87
+silesia.tar gzkp 1 211947520 80088272 1009 200.31
GOB stream of binary data. Highly compressible.
https://files.klauspost.com/compress/gob-stream.7z
file out level insize outsize millis mb/s
-gob-stream zskp 1 1911399616 235022249 3088 590.30
-gob-stream zskp 2 1911399616 205669791 3786 481.34
-gob-stream zskp 3 1911399616 175034659 9636 189.17
-gob-stream zskp 4 1911399616 165609838 50369 36.19
+gob-stream zskp 1 1911399616 233948096 3230 564.34
+gob-stream zskp 2 1911399616 203997694 4997 364.73
+gob-stream zskp 3 1911399616 173526523 13435 135.68
+gob-stream zskp 4 1911399616 162195235 47559 38.33
gob-stream zstd 1 1911399616 249810424 2637 691.26
gob-stream zstd 3 1911399616 208192146 3490 522.31
gob-stream zstd 6 1911399616 193632038 6687 272.56
gob-stream zstd 9 1911399616 177620386 16175 112.70
-gob-stream gzstd 1 1911399616 357382641 10251 177.82
-gob-stream gzkp 1 1911399616 359753026 5438 335.20
+gob-stream gzstd 1 1911399616 357382013 9046 201.49
+gob-stream gzkp 1 1911399616 359136669 4885 373.08
The test data for the Large Text Compression Benchmark is the first
10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
http://mattmahoney.net/dc/textdata.html
file out level insize outsize millis mb/s
-enwik9 zskp 1 1000000000 343848582 3609 264.18
-enwik9 zskp 2 1000000000 317276632 5746 165.97
-enwik9 zskp 3 1000000000 292243069 12162 78.41
-enwik9 zskp 4 1000000000 262183768 82837 11.51
+enwik9 zskp 1 1000000000 343833605 3687 258.64
+enwik9 zskp 2 1000000000 317001237 7672 124.29
+enwik9 zskp 3 1000000000 291915823 15923 59.89
+enwik9 zskp 4 1000000000 261710291 77697 12.27
enwik9 zstd 1 1000000000 358072021 3110 306.65
enwik9 zstd 3 1000000000 313734672 4784 199.35
enwik9 zstd 6 1000000000 295138875 10290 92.68
enwik9 zstd 9 1000000000 278348700 28549 33.40
-enwik9 gzstd 1 1000000000 382578136 9604 99.30
-enwik9 gzkp 1 1000000000 383825945 6544 145.73
+enwik9 gzstd 1 1000000000 382578136 8608 110.78
+enwik9 gzkp 1 1000000000 382781160 5628 169.45
Highly compressible JSON file.
https://files.klauspost.com/compress/github-june-2days-2019.json.zst
file out level insize outsize millis mb/s
-github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40
-github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96
-github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75
-github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16
+github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17
+github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49
+github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41
+github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
-github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79
-github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61
+github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32
+github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16
VM Image, Linux mint with a few installed applications:
https://files.klauspost.com/compress/rawstudio-mint14.7z
file out level insize outsize millis mb/s
-rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84
-rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07
-rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08
-rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52
+rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29
+rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15
+rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49
+rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
-rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40
-rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92
+rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96
+rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26
CSV data:
https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
file out level insize outsize millis mb/s
-nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35
-nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44
-nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66
-nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33
+nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17
+nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50
+nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79
+nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
-nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83
-nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00
+nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11
+nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68
```
## Decompressor
@@ -283,8 +289,13 @@ func Decompress(in io.Reader, out io.Writer) error {
}
```
-It is important to use the "Close" function when you no longer need the Reader to stop running goroutines.
-See "Allocation-less operation" below.
+It is important to use the "Close" function when you no longer need the Reader to stop running goroutines,
+when running with default settings.
+Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream.
+
+Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput.
+However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data
+as it is being requested only.
For decoding buffers, it could look something like this:
@@ -293,7 +304,7 @@ import "github.com/klauspost/compress/zstd"
// Create a reader that caches decompressors.
// For this operation type we supply a nil Reader.
-var decoder, _ = zstd.NewReader(nil)
+var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0))
// Decompress a buffer. We don't supply a destination buffer,
// so it will be allocated by the decoder.
@@ -303,9 +314,12 @@ func Decompress(src []byte) ([]byte, error) {
```
Both of these cases should provide the functionality needed.
-The decoder can be used for *concurrent* decompression of multiple buffers.
+The decoder can be used for *concurrent* decompression of multiple buffers.
+By default 4 decompressors will be created.
+
It will only allow a certain number of concurrent operations to run.
-To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
+To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
+It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders.
### Dictionaries
@@ -357,62 +371,48 @@ In this case no unneeded allocations should be made.
The buffer decoder does everything on the same goroutine and does nothing concurrently.
It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that.
-The stream decoder operates on
+The stream decoder will create goroutines that:
-* One goroutine reads input and splits the input to several block decoders.
-* A number of decoders will decode blocks.
-* A goroutine coordinates these blocks and sends history from one to the next.
+1) Reads input and splits the input into blocks.
+2) Decompression of literals.
+3) Decompression of sequences.
+4) Reconstruction of output stream.
So effectively this also means the decoder will "read ahead" and prepare data to always be available for output.
+The concurrency level will, for streams, determine how many blocks ahead the compression will start.
+
Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency.
-In practice this means that concurrency is often limited to utilizing about 2 cores effectively.
-
-
+In practice this means that concurrency is often limited to utilizing about 3 cores effectively.
+
### Benchmarks
-These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
-
The first two are streaming decodes and the last are smaller inputs.
-
+
+Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used.
+
```
-BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op
-BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op
-
-BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op
-BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op
-
-Concurrent performance:
-
-BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op
-
-BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op
+BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op
+BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op
+
+Concurrent blocks, performance:
+
+BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op
```
-This reflects the performance around May 2020, but this may be out of date.
+This reflects the performance around May 2022, but this may be out of date.
## Zstd inside ZIP files
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
index 85445853715..97299d499cf 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitreader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -7,6 +7,7 @@ package zstd
import (
"encoding/binary"
"errors"
+ "fmt"
"io"
"math/bits"
)
@@ -50,16 +51,16 @@ func (b *bitReader) getBits(n uint8) int {
if n == 0 /*|| b.bitsRead >= 64 */ {
return 0
}
- return b.getBitsFast(n)
+ return int(b.get32BitsFast(n))
}
-// getBitsFast requires that at least one bit is requested every time.
+// get32BitsFast requires that at least one bit is requested every time.
// There are no checks if the buffer is filled.
-func (b *bitReader) getBitsFast(n uint8) int {
+func (b *bitReader) get32BitsFast(n uint8) uint32 {
const regMask = 64 - 1
v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
b.bitsRead += n
- return int(v)
+ return v
}
// fillFast() will make sure at least 32 bits are available.
@@ -125,6 +126,9 @@ func (b *bitReader) remain() uint {
func (b *bitReader) close() error {
// Release reference.
b.in = nil
+ if !b.finished() {
+ return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
+ }
if b.bitsRead > 64 {
return io.ErrUnexpectedEOF
}
diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
index 303ae90f944..78b3c61be3e 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
@@ -5,8 +5,6 @@
package zstd
-import "fmt"
-
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
@@ -38,7 +36,7 @@ func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
b.nBits += bits
}
-// addBits32NC will add up to 32 bits.
+// addBits32NC will add up to 31 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits32NC(value uint32, bits uint8) {
@@ -46,6 +44,26 @@ func (b *bitWriter) addBits32NC(value uint32, bits uint8) {
b.nBits += bits
}
+// addBits64NC will add up to 64 bits.
+// There must be space for 32 bits.
+func (b *bitWriter) addBits64NC(value uint64, bits uint8) {
+ if bits <= 31 {
+ b.addBits32Clean(uint32(value), bits)
+ return
+ }
+ b.addBits32Clean(uint32(value), 32)
+ b.flush32()
+ b.addBits32Clean(uint32(value>>32), bits-32)
+}
+
+// addBits32Clean will add up to 32 bits.
+// It will not check if there is space for them.
+// The input must not contain more bits than specified.
+func (b *bitWriter) addBits32Clean(value uint32, bits uint8) {
+ b.bitContainer |= uint64(value) << (b.nBits & 63)
+ b.nBits += bits
+}
+
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
@@ -53,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
b.nBits += bits
}
-// flush will flush all pending full bytes.
-// There will be at least 56 bits available for writing when this has been called.
-// Using flush32 is faster, but leaves less space for writing.
-func (b *bitWriter) flush() {
- v := b.nBits >> 3
- switch v {
- case 0:
- case 1:
- b.out = append(b.out,
- byte(b.bitContainer),
- )
- case 2:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- )
- case 3:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- )
- case 4:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- )
- case 5:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- )
- case 6:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- )
- case 7:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- )
- case 8:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- byte(b.bitContainer>>56),
- )
- default:
- panic(fmt.Errorf("bits (%d) > 64", b.nBits))
- }
- b.bitContainer >>= v << 3
- b.nBits &= 7
-}
-
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 8a98c4562e0..f52d1aed6fe 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -5,9 +5,13 @@
package zstd
import (
+ "bytes"
+ "encoding/binary"
"errors"
"fmt"
"io"
+ "os"
+ "path/filepath"
"sync"
"github.com/klauspost/compress/huff0"
@@ -38,14 +42,14 @@ const (
// maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
maxCompressedBlockSize = 128 << 10
+ compressedBlockOverAlloc = 16
+ maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc
+
// Maximum possible block size (all Raw+Uncompressed).
maxBlockSize = (1 << 21) - 1
- // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
- maxCompressedLiteralSize = 1 << 18
- maxRLELiteralSize = 1 << 20
- maxMatchLen = 131074
- maxSequences = 0x7f00 + 0xffff
+ maxMatchLen = 131074
+ maxSequences = 0x7f00 + 0xffff
// We support slightly less than the reference decoder to be able to
// use ints on 32 bit archs.
@@ -76,20 +80,27 @@ type blockDec struct {
// Window size of the block.
WindowSize uint64
- history chan *history
- input chan struct{}
- result chan decodeOutput
- sequenceBuf []seq
- err error
- decWG sync.WaitGroup
+ err error
+
+ // Check against this crc
+ checkCRC []byte
// Frame to use for singlethreaded decoding.
// Should not be used by the decoder itself since parent may be another frame.
localFrame *frameDec
+ sequence []seqVals
+
+ async struct {
+ newHist *history
+ literals []byte
+ seqData []byte
+ seqSize int // Size of uncompressed sequences
+ fcs uint64
+ }
+
// Block is RLE, this is the size.
RLESize uint32
- tmp [4]byte
Type blockType
@@ -109,13 +120,8 @@ func (b *blockDec) String() string {
func newBlockDec(lowMem bool) *blockDec {
b := blockDec{
- lowMem: lowMem,
- result: make(chan decodeOutput, 1),
- input: make(chan struct{}, 1),
- history: make(chan *history, 1),
+ lowMem: lowMem,
}
- b.decWG.Add(1)
- go b.startDecoder()
return &b
}
@@ -133,11 +139,17 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
b.Type = blockType((bh >> 1) & 3)
// find size.
cSize := int(bh >> 3)
- maxSize := maxBlockSize
+ maxSize := maxCompressedBlockSizeAlloc
switch b.Type {
case blockTypeReserved:
return ErrReservedBlockType
case blockTypeRLE:
+ if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
+ if debugDecoder {
+ printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
+ }
+ return ErrWindowSizeExceeded
+ }
b.RLESize = uint32(cSize)
if b.lowMem {
maxSize = cSize
@@ -148,9 +160,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
println("Data size on stream:", cSize)
}
b.RLESize = 0
- maxSize = maxCompressedBlockSize
+ maxSize = maxCompressedBlockSizeAlloc
if windowSize < maxCompressedBlockSize && b.lowMem {
- maxSize = int(windowSize)
+ maxSize = int(windowSize) + compressedBlockOverAlloc
}
if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
if debugDecoder {
@@ -158,7 +170,19 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
}
return ErrCompressedSizeTooBig
}
+ // Empty compressed blocks must at least be 2 bytes
+ // for Literals_Block_Type and one for Sequences_Section_Header.
+ if cSize < 2 {
+ return ErrBlockTooSmall
+ }
case blockTypeRaw:
+ if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
+ if debugDecoder {
+ printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
+ }
+ return ErrWindowSizeExceeded
+ }
+
b.RLESize = 0
// We do not need a destination for raw blocks.
maxSize = -1
@@ -169,9 +193,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
// Read block data.
if cap(b.dataStorage) < cSize {
if b.lowMem || cSize > maxCompressedBlockSize {
- b.dataStorage = make([]byte, 0, cSize)
+ b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
} else {
- b.dataStorage = make([]byte, 0, maxCompressedBlockSize)
+ b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
}
}
if cap(b.dst) <= maxSize {
@@ -193,85 +217,14 @@ func (b *blockDec) sendErr(err error) {
b.Last = true
b.Type = blockTypeReserved
b.err = err
- b.input <- struct{}{}
}
// Close will release resources.
// Closed blockDec cannot be reset.
func (b *blockDec) Close() {
- close(b.input)
- close(b.history)
- close(b.result)
- b.decWG.Wait()
}
-// decodeAsync will prepare decoding the block when it receives input.
-// This will separate output and history.
-func (b *blockDec) startDecoder() {
- defer b.decWG.Done()
- for range b.input {
- //println("blockDec: Got block input")
- switch b.Type {
- case blockTypeRLE:
- if cap(b.dst) < int(b.RLESize) {
- if b.lowMem {
- b.dst = make([]byte, b.RLESize)
- } else {
- b.dst = make([]byte, maxBlockSize)
- }
- }
- o := decodeOutput{
- d: b,
- b: b.dst[:b.RLESize],
- err: nil,
- }
- v := b.data[0]
- for i := range o.b {
- o.b[i] = v
- }
- hist := <-b.history
- hist.append(o.b)
- b.result <- o
- case blockTypeRaw:
- o := decodeOutput{
- d: b,
- b: b.data,
- err: nil,
- }
- hist := <-b.history
- hist.append(o.b)
- b.result <- o
- case blockTypeCompressed:
- b.dst = b.dst[:0]
- err := b.decodeCompressed(nil)
- o := decodeOutput{
- d: b,
- b: b.dst,
- err: err,
- }
- if debugDecoder {
- println("Decompressed to", len(b.dst), "bytes, error:", err)
- }
- b.result <- o
- case blockTypeReserved:
- // Used for returning errors.
- <-b.history
- b.result <- decodeOutput{
- d: b,
- b: nil,
- err: b.err,
- }
- default:
- panic("Invalid block type")
- }
- if debugDecoder {
- println("blockDec: Finished block")
- }
- }
-}
-
-// decodeAsync will prepare decoding the block when it receives the history.
-// If history is provided, it will not fetch it from the channel.
+// decodeBuf
func (b *blockDec) decodeBuf(hist *history) error {
switch b.Type {
case blockTypeRLE:
@@ -294,14 +247,23 @@ func (b *blockDec) decodeBuf(hist *history) error {
return nil
case blockTypeCompressed:
saved := b.dst
- b.dst = hist.b
- hist.b = nil
+ // Append directly to history
+ if hist.ignoreBuffer == 0 {
+ b.dst = hist.b
+ hist.b = nil
+ } else {
+ b.dst = b.dst[:0]
+ }
err := b.decodeCompressed(hist)
if debugDecoder {
println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
}
- hist.b = b.dst
- b.dst = saved
+ if hist.ignoreBuffer == 0 {
+ hist.b = b.dst
+ b.dst = saved
+ } else {
+ hist.appendKeep(b.dst)
+ }
return err
case blockTypeReserved:
// Used for returning errors.
@@ -311,30 +273,18 @@ func (b *blockDec) decodeBuf(hist *history) error {
}
}
-// decodeCompressed will start decompressing a block.
-// If no history is supplied the decoder will decodeAsync as much as possible
-// before fetching from blockDec.history
-func (b *blockDec) decodeCompressed(hist *history) error {
- in := b.data
- delayedHistory := hist == nil
-
- if delayedHistory {
- // We must always grab history.
- defer func() {
- if hist == nil {
- <-b.history
- }
- }()
- }
+func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) {
// There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
if len(in) < 2 {
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
+
litType := literalsBlockType(in[0] & 3)
var litRegenSize int
var litCompSize int
sizeFormat := (in[0] >> 2) & 3
var fourStreams bool
+ var literals []byte
switch litType {
case literalsBlockRaw, literalsBlockRLE:
switch sizeFormat {
@@ -350,7 +300,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
// Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
if len(in) < 3 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
in = in[3:]
@@ -361,7 +311,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
// Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
if len(in) < 3 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
litRegenSize = int(n & 1023)
@@ -372,7 +322,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
fourStreams = true
if len(in) < 4 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
litRegenSize = int(n & 16383)
@@ -382,7 +332,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
fourStreams = true
if len(in) < 5 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
litRegenSize = int(n & 262143)
@@ -393,13 +343,15 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if debugDecoder {
println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
}
- var literals []byte
- var huff *huff0.Scratch
+ if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize {
+ return in, ErrWindowSizeExceeded
+ }
+
switch litType {
case literalsBlockRaw:
if len(in) < litRegenSize {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
literals = in[:litRegenSize]
in = in[litRegenSize:]
@@ -407,19 +359,13 @@ func (b *blockDec) decodeCompressed(hist *history) error {
case literalsBlockRLE:
if len(in) < 1 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
- b.literalBuf = make([]byte, litRegenSize)
+ b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc)
} else {
- if litRegenSize > maxCompressedLiteralSize {
- // Exceptional
- b.literalBuf = make([]byte, litRegenSize)
- } else {
- b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
-
- }
+ b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
literals = b.literalBuf[:litRegenSize]
@@ -434,7 +380,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
case literalsBlockTreeless:
if len(in) < litCompSize {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
// Store compressed literals, so we defer decoding until we get history.
literals = in[:litCompSize]
@@ -442,31 +388,65 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if debugDecoder {
printf("Found %d compressed literals\n", litCompSize)
}
+ huff := hist.huffTree
+ if huff == nil {
+ return in, errors.New("literal block was treeless, but no history was defined")
+ }
+ // Ensure we have space to store it.
+ if cap(b.literalBuf) < litRegenSize {
+ if b.lowMem {
+ b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
+ } else {
+ b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
+ }
+ }
+ var err error
+ // Use our out buffer.
+ huff.MaxDecodedSize = litRegenSize
+ if fourStreams {
+ literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
+ } else {
+ literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
+ }
+ // Make sure we don't leak our literals buffer
+ if err != nil {
+ println("decompressing literals:", err)
+ return in, err
+ }
+ if len(literals) != litRegenSize {
+ return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+ }
+
case literalsBlockCompressed:
if len(in) < litCompSize {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
literals = in[:litCompSize]
in = in[litCompSize:]
- huff = huffDecoderPool.Get().(*huff0.Scratch)
- var err error
// Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
- b.literalBuf = make([]byte, 0, litRegenSize)
+ b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else {
- b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
+ b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
- if huff == nil {
- huff = &huff0.Scratch{}
+ huff := hist.huffTree
+ if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) {
+ huff = huffDecoderPool.Get().(*huff0.Scratch)
+ if huff == nil {
+ huff = &huff0.Scratch{}
+ }
}
+ var err error
huff, literals, err = huff0.ReadTable(literals, huff)
if err != nil {
println("reading huffman table:", err)
- return err
+ return in, err
}
+ hist.huffTree = huff
+ huff.MaxDecodedSize = litRegenSize
// Use our out buffer.
if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
@@ -475,27 +455,63 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
if err != nil {
println("decoding compressed literals:", err)
- return err
+ return in, err
}
// Make sure we don't leak our literals buffer
if len(literals) != litRegenSize {
- return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+ return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
}
+ // Re-cap to get extra size.
+ literals = b.literalBuf[:len(literals)]
if debugDecoder {
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
}
}
+ hist.decoders.literals = literals
+ return in, nil
+}
+
+// decodeCompressed will start decompressing a block.
+func (b *blockDec) decodeCompressed(hist *history) error {
+ in := b.data
+ in, err := b.decodeLiterals(in, hist)
+ if err != nil {
+ return err
+ }
+ err = b.prepareSequences(in, hist)
+ if err != nil {
+ return err
+ }
+ if hist.decoders.nSeqs == 0 {
+ b.dst = append(b.dst, hist.decoders.literals...)
+ return nil
+ }
+ before := len(hist.decoders.out)
+ err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:])
+ if err != nil {
+ return err
+ }
+ if hist.decoders.maxSyncLen > 0 {
+ hist.decoders.maxSyncLen += uint64(before)
+ hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out))
+ }
+ b.dst = hist.decoders.out
+ hist.recentOffsets = hist.decoders.prevOffset
+ return nil
+}
+func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
+ if debugDecoder {
+ printf("prepareSequences: %d byte(s) input\n", len(in))
+ }
// Decode Sequences
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
if len(in) < 1 {
return ErrBlockTooSmall
}
+ var nSeqs int
seqHeader := in[0]
- nSeqs := 0
switch {
- case seqHeader == 0:
- in = in[1:]
case seqHeader < 128:
nSeqs = int(seqHeader)
in = in[1:]
@@ -512,19 +528,16 @@ func (b *blockDec) decodeCompressed(hist *history) error {
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
in = in[3:]
}
- // Allocate sequences
- if cap(b.sequenceBuf) < nSeqs {
- if b.lowMem {
- b.sequenceBuf = make([]seq, nSeqs)
- } else {
- // Allocate max
- b.sequenceBuf = make([]seq, nSeqs, maxSequences)
+ if nSeqs == 0 && len(in) != 0 {
+ // When no sequences, there should not be any more data...
+ if debugDecoder {
+ printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in))
}
- } else {
- // Reuse buffer
- b.sequenceBuf = b.sequenceBuf[:nSeqs]
+ return ErrUnexpectedBlockSize
}
- var seqs = &sequenceDecs{}
+
+ var seqs = &hist.decoders
+ seqs.nSeqs = nSeqs
if nSeqs > 0 {
if len(in) < 1 {
return ErrBlockTooSmall
@@ -553,6 +566,9 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
switch mode {
case compModePredefined:
+ if seq.fse != nil && !seq.fse.preDefined {
+ fseDecoderPool.Put(seq.fse)
+ }
seq.fse = &fsePredef[i]
case compModeRLE:
if br.remain() < 1 {
@@ -560,34 +576,36 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
v := br.Uint8()
br.advance(1)
- dec := fseDecoderPool.Get().(*fseDecoder)
+ if seq.fse == nil || seq.fse.preDefined {
+ seq.fse = fseDecoderPool.Get().(*fseDecoder)
+ }
symb, err := decSymbolValue(v, symbolTableX[i])
if err != nil {
printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
return err
}
- dec.setRLE(symb)
- seq.fse = dec
+ seq.fse.setRLE(symb)
if debugDecoder {
printf("RLE set to %+v, code: %v", symb, v)
}
case compModeFSE:
println("Reading table for", tableIndex(i))
- dec := fseDecoderPool.Get().(*fseDecoder)
- err := dec.readNCount(&br, uint16(maxTableSymbol[i]))
+ if seq.fse == nil || seq.fse.preDefined {
+ seq.fse = fseDecoderPool.Get().(*fseDecoder)
+ }
+ err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i]))
if err != nil {
println("Read table error:", err)
return err
}
- err = dec.transform(symbolTableX[i])
+ err = seq.fse.transform(symbolTableX[i])
if err != nil {
println("Transform table error:", err)
return err
}
if debugDecoder {
- println("Read table ok", "symbolLen:", dec.symbolLen)
+ println("Read table ok", "symbolLen:", seq.fse.symbolLen)
}
- seq.fse = dec
case compModeRepeat:
seq.repeat = true
}
@@ -597,140 +615,106 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
in = br.unread()
}
-
- // Wait for history.
- // All time spent after this is critical since it is strictly sequential.
- if hist == nil {
- hist = <-b.history
- if hist.error {
- return ErrDecoderClosed
- }
- }
-
- // Decode treeless literal block.
- if litType == literalsBlockTreeless {
- // TODO: We could send the history early WITHOUT the stream history.
- // This would allow decoding treeless literals before the byte history is available.
- // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless.
- // So not much obvious gain here.
-
- if hist.huffTree == nil {
- return errors.New("literal block was treeless, but no history was defined")
- }
- // Ensure we have space to store it.
- if cap(b.literalBuf) < litRegenSize {
- if b.lowMem {
- b.literalBuf = make([]byte, 0, litRegenSize)
- } else {
- b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
- }
- }
- var err error
- // Use our out buffer.
- huff = hist.huffTree
- if fourStreams {
- literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
- } else {
- literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
- }
- // Make sure we don't leak our literals buffer
- if err != nil {
- println("decompressing literals:", err)
- return err
- }
- if len(literals) != litRegenSize {
- return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
- }
- } else {
- if hist.huffTree != nil && huff != nil {
- if hist.dict == nil || hist.dict.litEnc != hist.huffTree {
- huffDecoderPool.Put(hist.huffTree)
- }
- hist.huffTree = nil
- }
- }
- if huff != nil {
- hist.huffTree = huff
- }
if debugDecoder {
- println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.")
+ println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.")
}
if nSeqs == 0 {
- // Decompressed content is defined entirely as Literals Section content.
- b.dst = append(b.dst, literals...)
- if delayedHistory {
- hist.append(literals)
+ if len(b.sequence) > 0 {
+ b.sequence = b.sequence[:0]
}
return nil
}
+ br := seqs.br
+ if br == nil {
+ br = &bitReader{}
+ }
+ if err := br.init(in); err != nil {
+ return err
+ }
- seqs, err := seqs.mergeHistory(&hist.decoders)
- if err != nil {
+ if err := seqs.initialize(br, hist, b.dst); err != nil {
+ println("initializing sequences:", err)
return err
}
- if debugDecoder {
- println("History merged ok")
+ // Extract blocks...
+ if false && hist.dict == nil {
+ fatalErr := func(err error) {
+ if err != nil {
+ panic(err)
+ }
+ }
+ fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
+ var buf bytes.Buffer
+ fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
+ fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
+ fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
+ buf.Write(in)
+ os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
}
- br := &bitReader{}
- if err := br.init(in); err != nil {
- return err
+
+ return nil
+}
+
+func (b *blockDec) decodeSequences(hist *history) error {
+ if cap(b.sequence) < hist.decoders.nSeqs {
+ if b.lowMem {
+ b.sequence = make([]seqVals, 0, hist.decoders.nSeqs)
+ } else {
+ b.sequence = make([]seqVals, 0, 0x7F00+0xffff)
+ }
}
+ b.sequence = b.sequence[:hist.decoders.nSeqs]
+ if hist.decoders.nSeqs == 0 {
+ hist.decoders.seqSize = len(hist.decoders.literals)
+ return nil
+ }
+ hist.decoders.windowSize = hist.windowSize
+ hist.decoders.prevOffset = hist.recentOffsets
- // TODO: Investigate if sending history without decoders are faster.
- // This would allow the sequences to be decoded async and only have to construct stream history.
- // If only recent offsets were not transferred, this would be an obvious win.
- // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded.
+ err := hist.decoders.decode(b.sequence)
+ hist.recentOffsets = hist.decoders.prevOffset
+ return err
+}
+func (b *blockDec) executeSequences(hist *history) error {
hbytes := hist.b
if len(hbytes) > hist.windowSize {
hbytes = hbytes[len(hbytes)-hist.windowSize:]
- // We do not need history any more.
+ // We do not need history anymore.
if hist.dict != nil {
hist.dict.content = nil
}
}
-
- if err := seqs.initialize(br, hist, literals, b.dst); err != nil {
- println("initializing sequences:", err)
- return err
- }
-
- err = seqs.decode(nSeqs, br, hbytes)
+ hist.decoders.windowSize = hist.windowSize
+ hist.decoders.out = b.dst[:0]
+ err := hist.decoders.execute(b.sequence, hbytes)
if err != nil {
return err
}
- if !br.finished() {
- return fmt.Errorf("%d extra bits on block, should be 0", br.remain())
- }
+ return b.updateHistory(hist)
+}
- err = br.close()
- if err != nil {
- printf("Closing sequences: %v, %+v\n", err, *br)
- }
+func (b *blockDec) updateHistory(hist *history) error {
if len(b.data) > maxCompressedBlockSize {
return fmt.Errorf("compressed block size too large (%d)", len(b.data))
}
// Set output and release references.
- b.dst = seqs.out
- seqs.out, seqs.literals, seqs.hist = nil, nil, nil
+ b.dst = hist.decoders.out
+ hist.recentOffsets = hist.decoders.prevOffset
- if !delayedHistory {
- // If we don't have delayed history, no need to update.
- hist.recentOffsets = seqs.prevOffset
- return nil
- }
if b.Last {
// if last block we don't care about history.
println("Last block, no history returned")
hist.b = hist.b[:0]
return nil
+ } else {
+ hist.append(b.dst)
+ if debugDecoder {
+ println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b))
+ }
}
- hist.append(b.dst)
- hist.recentOffsets = seqs.prevOffset
- if debugDecoder {
- println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.")
- }
+ hist.decoders.out, hist.decoders.literals = nil, nil
return nil
}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 3df185ee465..12e8f6f0b61 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -51,7 +51,7 @@ func (b *blockEnc) init() {
if cap(b.literals) < maxCompressedBlockSize {
b.literals = make([]byte, 0, maxCompressedBlockSize)
}
- const defSeqs = 200
+ const defSeqs = 2000
if cap(b.sequences) < defSeqs {
b.sequences = make([]seq, 0, defSeqs)
}
@@ -426,7 +426,7 @@ func fuzzFseEncoder(data []byte) int {
return 0
}
enc := fseEncoder{}
- hist := enc.Histogram()[:256]
+ hist := enc.Histogram()
maxSym := uint8(0)
for i, v := range data {
v = v & 63
@@ -722,52 +722,53 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB)
}
seq--
- if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 {
- // No need to flush (common)
- for seq >= 0 {
- s = b.sequences[seq]
- wr.flush32()
- llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
- // tabelog max is 8 for all.
- of.encode(ofB)
- ml.encode(mlB)
- ll.encode(llB)
- wr.flush32()
-
- // We checked that all can stay within 32 bits
- wr.addBits32NC(s.litLen, llB.outBits)
- wr.addBits32NC(s.matchLen, mlB.outBits)
- wr.addBits32NC(s.offset, ofB.outBits)
-
- if debugSequences {
- println("Encoded seq", seq, s)
- }
-
- seq--
- }
- } else {
- for seq >= 0 {
- s = b.sequences[seq]
- wr.flush32()
- llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
- // tabelog max is below 8 for each.
- of.encode(ofB)
- ml.encode(mlB)
- ll.encode(llB)
- wr.flush32()
-
- // ml+ll = max 32 bits total
- wr.addBits32NC(s.litLen, llB.outBits)
- wr.addBits32NC(s.matchLen, mlB.outBits)
- wr.flush32()
- wr.addBits32NC(s.offset, ofB.outBits)
-
- if debugSequences {
- println("Encoded seq", seq, s)
- }
-
- seq--
- }
+ // Store sequences in reverse...
+ for seq >= 0 {
+ s = b.sequences[seq]
+
+ ofB := ofTT[s.ofCode]
+ wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits.
+ //of.encode(ofB)
+ nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16
+ dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState)
+ wr.addBits16NC(of.state, uint8(nbBitsOut))
+ of.state = of.stateTable[dstState]
+
+ // Accumulate extra bits.
+ outBits := ofB.outBits & 31
+ extraBits := uint64(s.offset & bitMask32[outBits])
+ extraBitsN := outBits
+
+ mlB := mlTT[s.mlCode]
+ //ml.encode(mlB)
+ nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16
+ dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState)
+ wr.addBits16NC(ml.state, uint8(nbBitsOut))
+ ml.state = ml.stateTable[dstState]
+
+ outBits = mlB.outBits & 31
+ extraBits = extraBits<> 16
+ dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState)
+ wr.addBits16NC(ll.state, uint8(nbBitsOut))
+ ll.state = ll.stateTable[dstState]
+
+ outBits = llB.outBits & 31
+ extraBits = extraBits< math.MaxUint16 {
panic("can only encode up to 64K sequences")
}
// No bounds checks after here:
- llH := b.coders.llEnc.Histogram()[:256]
- ofH := b.coders.ofEnc.Histogram()[:256]
- mlH := b.coders.mlEnc.Histogram()[:256]
+ llH := b.coders.llEnc.Histogram()
+ ofH := b.coders.ofEnc.Histogram()
+ mlH := b.coders.mlEnc.Histogram()
for i := range llH {
llH[i] = 0
}
@@ -820,7 +820,8 @@ func (b *blockEnc) genCodes() {
}
var llMax, ofMax, mlMax uint8
- for i, seq := range b.sequences {
+ for i := range b.sequences {
+ seq := &b.sequences[i]
v := llCode(seq.litLen)
seq.llCode = v
llH[v]++
@@ -844,7 +845,6 @@ func (b *blockEnc) genCodes() {
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen))
}
}
- b.sequences[i] = seq
}
maxCount := func(a []uint32) int {
var max uint32
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
index aab71c6cf85..176788f2597 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -7,7 +7,6 @@ package zstd
import (
"fmt"
"io"
- "io/ioutil"
)
type byteBuffer interface {
@@ -23,7 +22,7 @@ type byteBuffer interface {
readByte() (byte, error)
// Skip n bytes.
- skipN(n int) error
+ skipN(n int64) error
}
// in-memory buffer
@@ -52,10 +51,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
return r, nil
}
-func (b *byteBuf) remain() []byte {
- return *b
-}
-
func (b *byteBuf) readByte() (byte, error) {
bb := *b
if len(bb) < 1 {
@@ -66,9 +61,12 @@ func (b *byteBuf) readByte() (byte, error) {
return r, nil
}
-func (b *byteBuf) skipN(n int) error {
+func (b *byteBuf) skipN(n int64) error {
bb := *b
- if len(bb) < n {
+ if n < 0 {
+ return fmt.Errorf("negative skip (%d) requested", n)
+ }
+ if int64(len(bb)) < n {
return io.ErrUnexpectedEOF
}
*b = bb[n:]
@@ -113,6 +111,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
func (r *readerWrapper) readByte() (byte, error) {
n2, err := r.r.Read(r.tmp[:1])
if err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
return 0, err
}
if n2 != 1 {
@@ -121,9 +122,9 @@ func (r *readerWrapper) readByte() (byte, error) {
return r.tmp[0], nil
}
-func (r *readerWrapper) skipN(n int) error {
- n2, err := io.CopyN(ioutil.Discard, r.r, int64(n))
- if n2 != int64(n) {
+func (r *readerWrapper) skipN(n int64) error {
+ n2, err := io.CopyN(io.Discard, r.r, n)
+ if n2 != n {
err = io.ErrUnexpectedEOF
}
return err
diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go
index 2c4fca17fa1..0e59a242d8d 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytereader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go
@@ -13,12 +13,6 @@ type byteReader struct {
off int
}
-// init will initialize the reader and set the input.
-func (b *byteReader) init(in []byte) {
- b.b = in
- b.off = 0
-}
-
// advance the stream b n bytes.
func (b *byteReader) advance(n uint) {
b.off += int(n)
diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
index 69736e8d4bb..5022e71c836 100644
--- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go
+++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
@@ -5,6 +5,7 @@ package zstd
import (
"bytes"
+ "encoding/binary"
"errors"
"io"
)
@@ -15,18 +16,50 @@ const HeaderMaxSize = 14 + 3
// Header contains information about the first frame and block within that.
type Header struct {
- // Window Size the window of data to keep while decoding.
- // Will only be set if HasFCS is false.
- WindowSize uint64
+ // SingleSegment specifies whether the data is to be decompressed into a
+ // single contiguous memory segment.
+ // It implies that WindowSize is invalid and that FrameContentSize is valid.
+ SingleSegment bool
- // Frame content size.
- // Expected size of the entire frame.
- FrameContentSize uint64
+ // WindowSize is the window of data to keep while decoding.
+ // Will only be set if SingleSegment is false.
+ WindowSize uint64
// Dictionary ID.
// If 0, no dictionary.
DictionaryID uint32
+ // HasFCS specifies whether FrameContentSize has a valid value.
+ HasFCS bool
+
+ // FrameContentSize is the expected uncompressed size of the entire frame.
+ FrameContentSize uint64
+
+ // Skippable will be true if the frame is meant to be skipped.
+ // This implies that FirstBlock.OK is false.
+ Skippable bool
+
+ // SkippableID is the user-specific ID for the skippable frame.
+ // Valid values are between 0 to 15, inclusive.
+ SkippableID int
+
+ // SkippableSize is the length of the user data to skip following
+ // the header.
+ SkippableSize uint32
+
+ // HeaderSize is the raw size of the frame header.
+ //
+ // For normal frames, it includes the size of the magic number and
+ // the size of the header (per section 3.1.1.1).
+ // It does not include the size for any data blocks (section 3.1.1.2) nor
+ // the size for the trailing content checksum.
+ //
+ // For skippable frames, this counts the size of the magic number
+ // along with the size of the size field of the payload.
+ // It does not include the size of the skippable payload itself.
+ // The total frame size is the HeaderSize plus the SkippableSize.
+ HeaderSize int
+
// First block information.
FirstBlock struct {
// OK will be set if first block could be decoded.
@@ -51,17 +84,9 @@ type Header struct {
CompressedSize int
}
- // Skippable will be true if the frame is meant to be skipped.
- // No other information will be populated.
- Skippable bool
-
// If set there is a checksum present for the block content.
+ // The checksum field at the end is always 4 bytes long.
HasCheckSum bool
-
- // If this is true FrameContentSize will have a valid value
- HasFCS bool
-
- SingleSegment bool
}
// Decode the header from the beginning of the stream.
@@ -71,39 +96,46 @@ type Header struct {
// If there isn't enough input, io.ErrUnexpectedEOF is returned.
// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
func (h *Header) Decode(in []byte) error {
+ *h = Header{}
if len(in) < 4 {
return io.ErrUnexpectedEOF
}
+ h.HeaderSize += 4
b, in := in[:4], in[4:]
if !bytes.Equal(b, frameMagic) {
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
return ErrMagicMismatch
}
- *h = Header{Skippable: true}
+ if len(in) < 4 {
+ return io.ErrUnexpectedEOF
+ }
+ h.HeaderSize += 4
+ h.Skippable = true
+ h.SkippableID = int(b[0] & 0xf)
+ h.SkippableSize = binary.LittleEndian.Uint32(in)
return nil
}
+
+ // Read Window_Descriptor
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
if len(in) < 1 {
return io.ErrUnexpectedEOF
}
-
- // Clear output
- *h = Header{}
fhd, in := in[0], in[1:]
+ h.HeaderSize++
h.SingleSegment = fhd&(1<<5) != 0
h.HasCheckSum = fhd&(1<<2) != 0
-
if fhd&(1<<3) != 0 {
return errors.New("reserved bit set on frame header")
}
- // Read Window_Descriptor
- // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
if !h.SingleSegment {
if len(in) < 1 {
return io.ErrUnexpectedEOF
}
var wd byte
wd, in = in[0], in[1:]
+ h.HeaderSize++
windowLog := 10 + (wd >> 3)
windowBase := uint64(1) << windowLog
windowAdd := (windowBase / 8) * uint64(wd&0x7)
@@ -120,9 +152,7 @@ func (h *Header) Decode(in []byte) error {
return io.ErrUnexpectedEOF
}
b, in = in[:size], in[size:]
- if b == nil {
- return io.ErrUnexpectedEOF
- }
+ h.HeaderSize += int(size)
switch size {
case 1:
h.DictionaryID = uint32(b[0])
@@ -152,9 +182,7 @@ func (h *Header) Decode(in []byte) error {
return io.ErrUnexpectedEOF
}
b, in = in[:fcsSize], in[fcsSize:]
- if b == nil {
- return io.ErrUnexpectedEOF
- }
+ h.HeaderSize += int(fcsSize)
switch fcsSize {
case 1:
h.FrameContentSize = uint64(b[0])
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index f430f58b572..78c10755f88 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -5,9 +5,13 @@
package zstd
import (
- "errors"
+ "bytes"
+ "context"
+ "encoding/binary"
"io"
"sync"
+
+ "github.com/klauspost/compress/zstd/internal/xxhash"
)
// Decoder provides decoding of zstandard streams.
@@ -22,12 +26,20 @@ type Decoder struct {
// Unreferenced decoders, ready for use.
decoders chan *blockDec
- // Streams ready to be decoded.
- stream chan decodeStream
-
// Current read position used for Reader functionality.
current decoderState
+ // sync stream decoding
+ syncStream struct {
+ decodedFrame uint64
+ br readerWrapper
+ enabled bool
+ inFrame bool
+ dstBuf []byte
+ }
+
+ frame *frameDec
+
// Custom dictionaries.
// Always uses copies.
dicts map[uint32]dict
@@ -46,7 +58,10 @@ type decoderState struct {
output chan decodeOutput
// cancel remaining output.
- cancel chan struct{}
+ cancel context.CancelFunc
+
+ // crc of current frame
+ crc *xxhash.Digest
flushed bool
}
@@ -81,7 +96,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
return nil, err
}
}
- d.current.output = make(chan decodeOutput, d.o.concurrent)
+ d.current.crc = xxhash.New()
d.current.flushed = true
if r == nil {
@@ -130,7 +145,7 @@ func (d *Decoder) Read(p []byte) (int, error) {
break
}
if !d.nextBlock(n == 0) {
- return n, nil
+ return n, d.current.err
}
}
}
@@ -162,6 +177,7 @@ func (d *Decoder) Reset(r io.Reader) error {
d.drainOutput()
+ d.syncStream.br.r = nil
if r == nil {
d.current.err = ErrDecoderNilInput
if len(d.current.b) > 0 {
@@ -172,21 +188,23 @@ func (d *Decoder) Reset(r io.Reader) error {
}
// If bytes buffer and < 5MB, do sync decoding anyway.
- if bb, ok := r.(byter); ok && bb.Len() < 5<<20 {
+ if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap {
bb2 := bb
if debugDecoder {
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
}
b := bb2.Bytes()
var dst []byte
- if cap(d.current.b) > 0 {
- dst = d.current.b
+ if cap(d.syncStream.dstBuf) > 0 {
+ dst = d.syncStream.dstBuf[:0]
}
- dst, err := d.DecodeAll(b, dst[:0])
+ dst, err := d.DecodeAll(b, dst)
if err == nil {
err = io.EOF
}
+ // Save output buffer
+ d.syncStream.dstBuf = dst
d.current.b = dst
d.current.err = err
d.current.flushed = true
@@ -195,33 +213,40 @@ func (d *Decoder) Reset(r io.Reader) error {
}
return nil
}
-
- if d.stream == nil {
- d.stream = make(chan decodeStream, 1)
- d.streamWg.Add(1)
- go d.startStreamDecoder(d.stream)
- }
-
// Remove current block.
+ d.stashDecoder()
d.current.decodeOutput = decodeOutput{}
d.current.err = nil
- d.current.cancel = make(chan struct{})
d.current.flushed = false
d.current.d = nil
+ d.syncStream.dstBuf = nil
- d.stream <- decodeStream{
- r: r,
- output: d.current.output,
- cancel: d.current.cancel,
+ // Ensure no-one else is still running...
+ d.streamWg.Wait()
+ if d.frame == nil {
+ d.frame = newFrameDec(d.o)
}
+
+ if d.o.concurrent == 1 {
+ return d.startSyncDecoder(r)
+ }
+
+ d.current.output = make(chan decodeOutput, d.o.concurrent)
+ ctx, cancel := context.WithCancel(context.Background())
+ d.current.cancel = cancel
+ d.streamWg.Add(1)
+ go d.startStreamDecoder(ctx, r, d.current.output)
+
return nil
}
// drainOutput will drain the output until errEndOfStream is sent.
func (d *Decoder) drainOutput() {
if d.current.cancel != nil {
- println("cancelling current")
- close(d.current.cancel)
+ if debugDecoder {
+ println("cancelling current")
+ }
+ d.current.cancel()
d.current.cancel = nil
}
if d.current.d != nil {
@@ -243,12 +268,9 @@ func (d *Decoder) drainOutput() {
}
d.decoders <- v.d
}
- if v.err == errEndOfStream {
- println("current flushed")
- d.current.flushed = true
- return
- }
}
+ d.current.output = nil
+ d.current.flushed = true
}
// WriteTo writes data to w until there's no more data to write or when an error occurs.
@@ -287,19 +309,23 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
// DecodeAll can be used concurrently.
// The Decoder concurrency limits will be respected.
func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
- if d.current.err == ErrDecoderClosed {
+ if d.decoders == nil {
return dst, ErrDecoderClosed
}
// Grab a block decoder and frame decoder.
block := <-d.decoders
frame := block.localFrame
+ initialSize := len(dst)
defer func() {
if debugDecoder {
printf("re-adding decoder: %p", block)
}
frame.rawInput = nil
frame.bBuf = nil
+ if frame.history.decoders.br != nil {
+ frame.history.decoders.br.in = nil
+ }
d.decoders <- block
}()
frame.bBuf = input
@@ -307,34 +333,52 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
for {
frame.history.reset()
err := frame.reset(&frame.bBuf)
- if err == io.EOF {
- if debugDecoder {
- println("frame reset return EOF")
+ if err != nil {
+ if err == io.EOF {
+ if debugDecoder {
+ println("frame reset return EOF")
+ }
+ return dst, nil
}
- return dst, nil
+ return dst, err
}
if frame.DictionaryID != nil {
dict, ok := d.dicts[*frame.DictionaryID]
if !ok {
return nil, ErrUnknownDictionary
}
+ if debugDecoder {
+ println("setting dict", frame.DictionaryID)
+ }
frame.history.setDict(&dict)
}
- if err != nil {
- return dst, err
- }
- if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
- return dst, ErrDecoderSizeExceeded
+ if frame.WindowSize > d.o.maxWindowSize {
+ if debugDecoder {
+ println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize)
+ }
+ return dst, ErrWindowSizeExceeded
}
- if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
- // Never preallocate moe than 1 GB up front.
+ if frame.FrameContentSize != fcsUnknown {
+ if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
+ if debugDecoder {
+ println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
+ }
+ return dst, ErrDecoderSizeExceeded
+ }
+ if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
+ if debugDecoder {
+ println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
+ }
+ return dst, ErrDecoderSizeExceeded
+ }
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
- dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
+ dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc)
copy(dst2, dst)
dst = dst2
}
}
- if cap(dst) == 0 {
+
+ if cap(dst) == 0 && !d.o.limitToCap {
// Allocate len(input) * 2 by default if nothing is provided
// and we didn't get frame content size.
size := len(input) * 2
@@ -352,6 +396,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
if err != nil {
return dst, err
}
+ if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
+ return dst, ErrDecoderSizeExceeded
+ }
if len(frame.bBuf) == 0 {
if debugDecoder {
println("frame dbuf empty")
@@ -368,33 +415,176 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
// If non-blocking mode is used the returned boolean will be false
// if no data was available without blocking.
func (d *Decoder) nextBlock(blocking bool) (ok bool) {
- if d.current.d != nil {
- if debugDecoder {
- printf("re-adding current decoder %p", d.current.d)
- }
- d.decoders <- d.current.d
- d.current.d = nil
- }
if d.current.err != nil {
// Keep error state.
- return blocking
+ return false
+ }
+ d.current.b = d.current.b[:0]
+
+ // SYNC:
+ if d.syncStream.enabled {
+ if !blocking {
+ return false
+ }
+ ok = d.nextBlockSync()
+ if !ok {
+ d.stashDecoder()
+ }
+ return ok
}
+ //ASYNC:
+ d.stashDecoder()
if blocking {
- d.current.decodeOutput = <-d.current.output
+ d.current.decodeOutput, ok = <-d.current.output
} else {
select {
- case d.current.decodeOutput = <-d.current.output:
+ case d.current.decodeOutput, ok = <-d.current.output:
default:
return false
}
}
+ if !ok {
+ // This should not happen, so signal error state...
+ d.current.err = io.ErrUnexpectedEOF
+ return false
+ }
+ next := d.current.decodeOutput
+ if next.d != nil && next.d.async.newHist != nil {
+ d.current.crc.Reset()
+ }
if debugDecoder {
- println("got", len(d.current.b), "bytes, error:", d.current.err)
+ var tmp [4]byte
+ binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b)))
+ println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
}
+
+ if !d.o.ignoreChecksum && len(next.b) > 0 {
+ n, err := d.current.crc.Write(next.b)
+ if err == nil {
+ if n != len(next.b) {
+ d.current.err = io.ErrShortWrite
+ }
+ }
+ }
+ if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 {
+ got := d.current.crc.Sum64()
+ var tmp [4]byte
+ binary.LittleEndian.PutUint32(tmp[:], uint32(got))
+ if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
+ if debugDecoder {
+ println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
+ }
+ d.current.err = ErrCRCMismatch
+ } else {
+ if debugDecoder {
+ println("CRC ok", tmp[:])
+ }
+ }
+ }
+
return true
}
+func (d *Decoder) nextBlockSync() (ok bool) {
+ if d.current.d == nil {
+ d.current.d = <-d.decoders
+ }
+ for len(d.current.b) == 0 {
+ if !d.syncStream.inFrame {
+ d.frame.history.reset()
+ d.current.err = d.frame.reset(&d.syncStream.br)
+ if d.current.err != nil {
+ return false
+ }
+ if d.frame.DictionaryID != nil {
+ dict, ok := d.dicts[*d.frame.DictionaryID]
+ if !ok {
+ d.current.err = ErrUnknownDictionary
+ return false
+ } else {
+ d.frame.history.setDict(&dict)
+ }
+ }
+ if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
+ d.current.err = ErrDecoderSizeExceeded
+ return false
+ }
+
+ d.syncStream.decodedFrame = 0
+ d.syncStream.inFrame = true
+ }
+ d.current.err = d.frame.next(d.current.d)
+ if d.current.err != nil {
+ return false
+ }
+ d.frame.history.ensureBlock()
+ if debugDecoder {
+ println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame)
+ }
+ histBefore := len(d.frame.history.b)
+ d.current.err = d.current.d.decodeBuf(&d.frame.history)
+
+ if d.current.err != nil {
+ println("error after:", d.current.err)
+ return false
+ }
+ d.current.b = d.frame.history.b[histBefore:]
+ if debugDecoder {
+ println("history after:", len(d.frame.history.b))
+ }
+
+ // Check frame size (before CRC)
+ d.syncStream.decodedFrame += uint64(len(d.current.b))
+ if d.syncStream.decodedFrame > d.frame.FrameContentSize {
+ if debugDecoder {
+ printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
+ }
+ d.current.err = ErrFrameSizeExceeded
+ return false
+ }
+
+ // Check FCS
+ if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize {
+ if debugDecoder {
+ printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
+ }
+ d.current.err = ErrFrameSizeMismatch
+ return false
+ }
+
+ // Update/Check CRC
+ if d.frame.HasCheckSum {
+ if !d.o.ignoreChecksum {
+ d.frame.crc.Write(d.current.b)
+ }
+ if d.current.d.Last {
+ if !d.o.ignoreChecksum {
+ d.current.err = d.frame.checkCRC()
+ } else {
+ d.current.err = d.frame.consumeCRC()
+ }
+ if d.current.err != nil {
+ println("CRC error:", d.current.err)
+ return false
+ }
+ }
+ }
+ d.syncStream.inFrame = !d.current.d.Last
+ }
+ return true
+}
+
+func (d *Decoder) stashDecoder() {
+ if d.current.d != nil {
+ if debugDecoder {
+ printf("re-adding current decoder %p", d.current.d)
+ }
+ d.decoders <- d.current.d
+ d.current.d = nil
+ }
+}
+
// Close will release all resources.
// It is NOT possible to reuse the decoder after this.
func (d *Decoder) Close() {
@@ -402,10 +592,10 @@ func (d *Decoder) Close() {
return
}
d.drainOutput()
- if d.stream != nil {
- close(d.stream)
+ if d.current.cancel != nil {
+ d.current.cancel()
d.streamWg.Wait()
- d.stream = nil
+ d.current.cancel = nil
}
if d.decoders != nil {
close(d.decoders)
@@ -456,100 +646,305 @@ type decodeOutput struct {
err error
}
-type decodeStream struct {
- r io.Reader
-
- // Blocks ready to be written to output.
- output chan decodeOutput
-
- // cancel reading from the input
- cancel chan struct{}
+func (d *Decoder) startSyncDecoder(r io.Reader) error {
+ d.frame.history.reset()
+ d.syncStream.br = readerWrapper{r: r}
+ d.syncStream.inFrame = false
+ d.syncStream.enabled = true
+ d.syncStream.decodedFrame = 0
+ return nil
}
-// errEndOfStream indicates that everything from the stream was read.
-var errEndOfStream = errors.New("end-of-stream")
-
// Create Decoder:
-// Spawn n block decoders. These accept tasks to decode a block.
-// Create goroutine that handles stream processing, this will send history to decoders as they are available.
-// Decoders update the history as they decode.
-// When a block is returned:
-// a) history is sent to the next decoder,
-// b) content written to CRC.
-// c) return data to WRITER.
-// d) wait for next block to return data.
-// Once WRITTEN, the decoders reused by the writer frame decoder for re-use.
-func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
+// ASYNC:
+// Spawn 3 go routines.
+// 0: Read frames and decode block literals.
+// 1: Decode sequences.
+// 2: Execute sequences, send to output.
+func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
defer d.streamWg.Done()
- frame := newFrameDec(d.o)
- for stream := range inStream {
- if debugDecoder {
- println("got new stream")
+ br := readerWrapper{r: r}
+
+ var seqDecode = make(chan *blockDec, d.o.concurrent)
+ var seqExecute = make(chan *blockDec, d.o.concurrent)
+
+ // Async 1: Decode sequences...
+ go func() {
+ var hist history
+ var hasErr bool
+
+ for block := range seqDecode {
+ if hasErr {
+ if block != nil {
+ seqExecute <- block
+ }
+ continue
+ }
+ if block.async.newHist != nil {
+ if debugDecoder {
+ println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
+ }
+ hist.reset()
+ hist.decoders = block.async.newHist.decoders
+ hist.recentOffsets = block.async.newHist.recentOffsets
+ hist.windowSize = block.async.newHist.windowSize
+ if block.async.newHist.dict != nil {
+ hist.setDict(block.async.newHist.dict)
+ }
+ }
+ if block.err != nil || block.Type != blockTypeCompressed {
+ hasErr = block.err != nil
+ seqExecute <- block
+ continue
+ }
+
+ hist.decoders.literals = block.async.literals
+ block.err = block.prepareSequences(block.async.seqData, &hist)
+ if debugDecoder && block.err != nil {
+ println("prepareSequences returned:", block.err)
+ }
+ hasErr = block.err != nil
+ if block.err == nil {
+ block.err = block.decodeSequences(&hist)
+ if debugDecoder && block.err != nil {
+ println("decodeSequences returned:", block.err)
+ }
+ hasErr = block.err != nil
+ // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs]
+ block.async.seqSize = hist.decoders.seqSize
+ }
+ seqExecute <- block
}
- br := readerWrapper{r: stream.r}
- decodeStream:
- for {
- frame.history.reset()
- err := frame.reset(&br)
- if debugDecoder && err != nil {
- println("Frame decoder returned", err)
+ close(seqExecute)
+ hist.reset()
+ }()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ // Async 3: Execute sequences...
+ frameHistCache := d.frame.history.b
+ go func() {
+ var hist history
+ var decodedFrame uint64
+ var fcs uint64
+ var hasErr bool
+ for block := range seqExecute {
+ out := decodeOutput{err: block.err, d: block}
+ if block.err != nil || hasErr {
+ hasErr = true
+ output <- out
+ continue
}
- if err == nil && frame.DictionaryID != nil {
- dict, ok := d.dicts[*frame.DictionaryID]
- if !ok {
- err = ErrUnknownDictionary
+ if block.async.newHist != nil {
+ if debugDecoder {
+ println("Async 2: new history")
+ }
+ hist.reset()
+ hist.windowSize = block.async.newHist.windowSize
+ hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
+ if block.async.newHist.dict != nil {
+ hist.setDict(block.async.newHist.dict)
+ }
+
+ if cap(hist.b) < hist.allocFrameBuffer {
+ if cap(frameHistCache) >= hist.allocFrameBuffer {
+ hist.b = frameHistCache
+ } else {
+ hist.b = make([]byte, 0, hist.allocFrameBuffer)
+ println("Alloc history sized", hist.allocFrameBuffer)
+ }
+ }
+ hist.b = hist.b[:0]
+ fcs = block.async.fcs
+ decodedFrame = 0
+ }
+ do := decodeOutput{err: block.err, d: block}
+ switch block.Type {
+ case blockTypeRLE:
+ if debugDecoder {
+ println("add rle block length:", block.RLESize)
+ }
+
+ if cap(block.dst) < int(block.RLESize) {
+ if block.lowMem {
+ block.dst = make([]byte, block.RLESize)
+ } else {
+ block.dst = make([]byte, maxBlockSize)
+ }
+ }
+ block.dst = block.dst[:block.RLESize]
+ v := block.data[0]
+ for i := range block.dst {
+ block.dst[i] = v
+ }
+ hist.append(block.dst)
+ do.b = block.dst
+ case blockTypeRaw:
+ if debugDecoder {
+ println("add raw block length:", len(block.data))
+ }
+ hist.append(block.data)
+ do.b = block.data
+ case blockTypeCompressed:
+ if debugDecoder {
+ println("execute with history length:", len(hist.b), "window:", hist.windowSize)
+ }
+ hist.decoders.seqSize = block.async.seqSize
+ hist.decoders.literals = block.async.literals
+ do.err = block.executeSequences(&hist)
+ hasErr = do.err != nil
+ if debugDecoder && hasErr {
+ println("executeSequences returned:", do.err)
+ }
+ do.b = block.dst
+ }
+ if !hasErr {
+ decodedFrame += uint64(len(do.b))
+ if decodedFrame > fcs {
+ println("fcs exceeded", block.Last, fcs, decodedFrame)
+ do.err = ErrFrameSizeExceeded
+ hasErr = true
+ } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs {
+ do.err = ErrFrameSizeMismatch
+ hasErr = true
} else {
- frame.history.setDict(&dict)
+ if debugDecoder {
+ println("fcs ok", block.Last, fcs, decodedFrame)
+ }
}
}
- if err != nil {
- stream.output <- decodeOutput{
- err: err,
+ output <- do
+ }
+ close(output)
+ frameHistCache = hist.b
+ wg.Done()
+ if debugDecoder {
+ println("decoder goroutines finished")
+ }
+ hist.reset()
+ }()
+
+ var hist history
+decodeStream:
+ for {
+ var hasErr bool
+ hist.reset()
+ decodeBlock := func(block *blockDec) {
+ if hasErr {
+ if block != nil {
+ seqDecode <- block
}
- break
+ return
+ }
+ if block.err != nil || block.Type != blockTypeCompressed {
+ hasErr = block.err != nil
+ seqDecode <- block
+ return
}
+
+ remain, err := block.decodeLiterals(block.data, &hist)
+ block.err = err
+ hasErr = block.err != nil
+ if err == nil {
+ block.async.literals = hist.decoders.literals
+ block.async.seqData = remain
+ } else if debugDecoder {
+ println("decodeLiterals error:", err)
+ }
+ seqDecode <- block
+ }
+ frame := d.frame
+ if debugDecoder {
+ println("New frame...")
+ }
+ var historySent bool
+ frame.history.reset()
+ err := frame.reset(&br)
+ if debugDecoder && err != nil {
+ println("Frame decoder returned", err)
+ }
+ if err == nil && frame.DictionaryID != nil {
+ dict, ok := d.dicts[*frame.DictionaryID]
+ if !ok {
+ err = ErrUnknownDictionary
+ } else {
+ frame.history.setDict(&dict)
+ }
+ }
+ if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
if debugDecoder {
- println("starting frame decoder")
- }
-
- // This goroutine will forward history between frames.
- frame.frameDone.Add(1)
- frame.initAsync()
-
- go frame.startDecoder(stream.output)
- decodeFrame:
- // Go through all blocks of the frame.
- for {
- dec := <-d.decoders
- select {
- case <-stream.cancel:
- if !frame.sendErr(dec, io.EOF) {
- // To not let the decoder dangle, send it back.
- stream.output <- decodeOutput{d: dec}
- }
- break decodeStream
- default:
+ println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
+ }
+
+ err = ErrDecoderSizeExceeded
+ }
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ case dec := <-d.decoders:
+ dec.sendErr(err)
+ decodeBlock(dec)
+ }
+ break decodeStream
+ }
+
+ // Go through all blocks of the frame.
+ for {
+ var dec *blockDec
+ select {
+ case <-ctx.Done():
+ break decodeStream
+ case dec = <-d.decoders:
+ // Once we have a decoder, we MUST return it.
+ }
+ err := frame.next(dec)
+ if !historySent {
+ h := frame.history
+ if debugDecoder {
+ println("Alloc History:", h.allocFrameBuffer)
+ }
+ hist.reset()
+ if h.dict != nil {
+ hist.setDict(h.dict)
+ }
+ dec.async.newHist = &h
+ dec.async.fcs = frame.FrameContentSize
+ historySent = true
+ } else {
+ dec.async.newHist = nil
+ }
+ if debugDecoder && err != nil {
+ println("next block returned error:", err)
+ }
+ dec.err = err
+ dec.checkCRC = nil
+ if dec.Last && frame.HasCheckSum && err == nil {
+ crc, err := frame.rawInput.readSmall(4)
+ if err != nil {
+ println("CRC missing?", err)
+ dec.err = err
}
- err := frame.next(dec)
- switch err {
- case io.EOF:
- // End of current frame, no error
- println("EOF on next block")
- break decodeFrame
- case nil:
- continue
- default:
- println("block decoder returned", err)
- break decodeStream
+ var tmp [4]byte
+ copy(tmp[:], crc)
+ dec.checkCRC = tmp[:]
+ if debugDecoder {
+ println("found crc to check:", dec.checkCRC)
}
}
- // All blocks have started decoding, check if there are more frames.
- println("waiting for done")
- frame.frameDone.Wait()
- println("done waiting...")
+ err = dec.err
+ last := dec.Last
+ decodeBlock(dec)
+ if err != nil {
+ break decodeStream
+ }
+ if last {
+ break
+ }
}
- frame.frameDone.Wait()
- println("Sending EOS")
- stream.output <- decodeOutput{err: errEndOfStream}
}
+ close(seqDecode)
+ wg.Wait()
+ hist.reset()
+ d.frame.history.b = frameHistCache
}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
index 95cc9b8b81f..f42448e69c9 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -14,21 +14,28 @@ type DOption func(*decoderOptions) error
// options retains accumulated state of multiple options.
type decoderOptions struct {
- lowMem bool
- concurrent int
- maxDecodedSize uint64
- maxWindowSize uint64
- dicts []dict
+ lowMem bool
+ concurrent int
+ maxDecodedSize uint64
+ maxWindowSize uint64
+ dicts []dict
+ ignoreChecksum bool
+ limitToCap bool
+ decodeBufsBelow int
}
func (o *decoderOptions) setDefault() {
*o = decoderOptions{
// use less ram: true for now, but may change.
- lowMem: true,
- concurrent: runtime.GOMAXPROCS(0),
- maxWindowSize: MaxWindowSize,
+ lowMem: true,
+ concurrent: runtime.GOMAXPROCS(0),
+ maxWindowSize: MaxWindowSize,
+ decodeBufsBelow: 128 << 10,
}
- o.maxDecodedSize = 1 << 63
+ if o.concurrent > 4 {
+ o.concurrent = 4
+ }
+ o.maxDecodedSize = 64 << 30
}
// WithDecoderLowmem will set whether to use a lower amount of memory,
@@ -37,16 +44,25 @@ func WithDecoderLowmem(b bool) DOption {
return func(o *decoderOptions) error { o.lowMem = b; return nil }
}
-// WithDecoderConcurrency will set the concurrency,
-// meaning the maximum number of decoders to run concurrently.
-// The value supplied must be at least 1.
-// By default this will be set to GOMAXPROCS.
+// WithDecoderConcurrency sets the number of created decoders.
+// When decoding block with DecodeAll, this will limit the number
+// of possible concurrently running decodes.
+// When decoding streams, this will limit the number of
+// inflight blocks.
+// When decoding streams and setting maximum to 1,
+// no async decoding will be done.
+// When a value of 0 is provided GOMAXPROCS will be used.
+// By default this will be set to 4 or GOMAXPROCS, whatever is lower.
func WithDecoderConcurrency(n int) DOption {
return func(o *decoderOptions) error {
- if n <= 0 {
+ if n < 0 {
return errors.New("concurrency must be at least 1")
}
- o.concurrent = n
+ if n == 0 {
+ o.concurrent = runtime.GOMAXPROCS(0)
+ } else {
+ o.concurrent = n
+ }
return nil
}
}
@@ -54,7 +70,7 @@ func WithDecoderConcurrency(n int) DOption {
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
// non-streaming operations or maximum window size for streaming operations.
// This can be used to control memory usage of potentially hostile content.
-// Maximum and default is 1 << 63 bytes.
+// Maximum is 1 << 63 bytes. Default is 64GiB.
func WithDecoderMaxMemory(n uint64) DOption {
return func(o *decoderOptions) error {
if n == 0 {
@@ -100,3 +116,34 @@ func WithDecoderMaxWindow(size uint64) DOption {
return nil
}
}
+
+// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
+// or any size set in WithDecoderMaxMemory.
+// This can be used to limit decoding to a specific maximum output size.
+// Disabled by default.
+func WithDecodeAllCapLimit(b bool) DOption {
+ return func(o *decoderOptions) error {
+ o.limitToCap = b
+ return nil
+ }
+}
+
+// WithDecodeBuffersBelow will fully decode readers that have a
+// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer.
+// This typically uses less allocations but will have the full decompressed object in memory.
+// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less.
+// Default is 128KiB.
+func WithDecodeBuffersBelow(size int) DOption {
+ return func(o *decoderOptions) error {
+ o.decodeBufsBelow = size
+ return nil
+ }
+}
+
+// IgnoreChecksum allows to forcibly ignore checksum checking.
+func IgnoreChecksum(b bool) DOption {
+ return func(o *decoderOptions) error {
+ o.ignoreChecksum = b
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
index 295cd602a42..15ae8ee8077 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_base.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -108,11 +108,6 @@ func (e *fastBase) UseBlock(enc *blockEnc) {
e.blk = enc
}
-func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 {
- // Extend the match to be as long as possible.
- return int32(matchLen(src[s:], src[t:]))
-}
-
func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
if debugAsserts {
if s < 0 {
@@ -131,9 +126,24 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
}
}
+ a := src[s:]
+ b := src[t:]
+ b = b[:len(a)]
+ end := int32((len(a) >> 3) << 3)
+ for i := int32(0); i < end; i += 8 {
+ if diff := load6432(a, i) ^ load6432(b, i); diff != 0 {
+ return i + int32(bits.TrailingZeros64(diff)>>3)
+ }
+ }
- // Extend the match to be as long as possible.
- return int32(matchLen(src[s:], src[t:]))
+ a = a[end:]
+ b = b[end:]
+ for i := range a {
+ if a[i] != b[i] {
+ return int32(i) + end
+ }
+ }
+ return int32(len(a)) + end
}
// Reset the encoding table.
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go
index 96028ecd836..dbbb88d92b3 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_best.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -32,6 +32,7 @@ type match struct {
length int32
rep int32
est int32
+ _ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
}
const highScore = 25000
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index 602c05ee0c4..d70e3fd3d3e 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -156,8 +156,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -416,15 +416,23 @@ encodeLoop:
// Try to find a better match by searching for a long match at the end of the current best match
if s+matched < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is around 3 bytes, but depends on input.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 3
+
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
- cv := load3232(src, s)
+ s2 := s + skipBeginning
+ cv := load3232(src, s2)
candidateL := e.longTable[nextHashL]
- coffsetL := candidateL.offset - e.cur - matched
- if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ coffsetL := candidateL.offset - e.cur - matched + skipBeginning
+ if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
- matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+ matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
+ s = s2
matched = matchedNext
if debugMatches {
println("long match at end-of-match")
@@ -434,12 +442,13 @@ encodeLoop:
// Check prev long...
if true {
- coffsetL = candidateL.prev - e.cur - matched
- if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ coffsetL = candidateL.prev - e.cur - matched + skipBeginning
+ if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
- matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+ matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
+ s = s2
matched = matchedNext
if debugMatches {
println("prev long match at end-of-match")
@@ -518,8 +527,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -674,8 +683,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -1047,8 +1056,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index d6b3104240b..1f4a9a24556 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -127,8 +127,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -439,8 +439,8 @@ encodeLoop:
var t int32
for {
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -785,8 +785,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -969,7 +969,7 @@ encodeLoop:
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
- longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
+ longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen)
e.longTable[longHash1] = te0
e.longTable[longHash2] = te1
e.markLongShardDirty(longHash1)
@@ -1002,8 +1002,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -1103,7 +1103,8 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
}
if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
- copy(e.longTable[:], e.dictLongTable)
+ //copy(e.longTable[:], e.dictLongTable)
+ e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
for i := range e.longTableShardDirty {
e.longTableShardDirty[i] = false
}
@@ -1114,7 +1115,9 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
continue
}
- copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
+
e.longTableShardDirty[i] = false
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index f2502629bc5..181edc02b6c 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -6,8 +6,6 @@ package zstd
import (
"fmt"
- "math"
- "math/bits"
)
const (
@@ -87,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
- const kSearchStrength = 7
+ const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
@@ -136,20 +134,7 @@ encodeLoop:
// Consider history as well.
var seq seq
var length int32
- // length = 4 + e.matchlen(s+6, repIndex+4, src)
- {
- a := src[s+6:]
- b := src[repIndex+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- length = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
-
+ length = 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
@@ -236,20 +221,7 @@ encodeLoop:
}
// Extend the 4-byte match as long as possible.
- //l := e.matchlen(s+4, t+4, src) + 4
- var l int32
- {
- a := src[s+4:]
- b := src[t+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
@@ -286,20 +258,7 @@ encodeLoop:
if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
- //l := 4 + e.matchlen(s+4, o2+4, src)
- var l int32
- {
- a := src[s+4:]
- b := src[o2+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := 4 + e.matchlen(s+4, o2+4, src)
// Store this, since we have it.
nextHash := hashLen(cv, hashLog, tableFastHashLen)
@@ -375,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
- const kSearchStrength = 8
+ const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
@@ -418,21 +377,7 @@ encodeLoop:
if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) {
// Consider history as well.
var seq seq
- // length := 4 + e.matchlen(s+6, repIndex+4, src)
- // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:]))
- var length int32
- {
- a := src[s+6:]
- b := src[repIndex+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- length = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ length := 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
@@ -522,21 +467,7 @@ encodeLoop:
panic(fmt.Sprintf("t (%d) < 0 ", t))
}
// Extend the 4-byte match as long as possible.
- //l := e.matchlenNoHist(s+4, t+4, src) + 4
- // l := int32(matchLen(src[s+4:], src[t+4:])) + 4
- var l int32
- {
- a := src[s+4:]
- b := src[t+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
@@ -573,21 +504,7 @@ encodeLoop:
if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) {
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
- //l := 4 + e.matchlenNoHist(s+4, o2+4, src)
- // l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
- var l int32
- {
- a := src[s+4:]
- b := src[o2+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := 4 + e.matchlen(s+4, o2+4, src)
// Store this, since we have it.
nextHash := hashLen(cv, hashLog, tableFastHashLen)
@@ -731,19 +648,7 @@ encodeLoop:
// Consider history as well.
var seq seq
var length int32
- // length = 4 + e.matchlen(s+6, repIndex+4, src)
- {
- a := src[s+6:]
- b := src[repIndex+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- length = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ length = 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
@@ -831,20 +736,7 @@ encodeLoop:
}
// Extend the 4-byte match as long as possible.
- //l := e.matchlen(s+4, t+4, src) + 4
- var l int32
- {
- a := src[s+4:]
- b := src[t+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
@@ -881,20 +773,7 @@ encodeLoop:
if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
- //l := 4 + e.matchlen(s+4, o2+4, src)
- var l int32
- {
- a := src[s+4:]
- b := src[o2+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := 4 + e.matchlen(s+4, o2+4, src)
// Store this, since we have it.
nextHash := hashLen(cv, hashLog, tableFastHashLen)
@@ -992,7 +871,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
const shardCnt = tableShardCnt
const shardSize = tableShardSize
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
- copy(e.table[:], e.dictTable)
+ //copy(e.table[:], e.dictTable)
+ e.table = *(*[tableSize]tableEntry)(e.dictTable)
for i := range e.tableShardDirty {
e.tableShardDirty[i] = false
}
@@ -1004,7 +884,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
continue
}
- copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
e.tableShardDirty[i] = false
}
e.allDirty = false
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index e6e315969b0..7aaaedb23e5 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -98,23 +98,25 @@ func (e *Encoder) Reset(w io.Writer) {
if cap(s.filling) == 0 {
s.filling = make([]byte, 0, e.o.blockSize)
}
- if cap(s.current) == 0 {
- s.current = make([]byte, 0, e.o.blockSize)
- }
- if cap(s.previous) == 0 {
- s.previous = make([]byte, 0, e.o.blockSize)
+ if e.o.concurrent > 1 {
+ if cap(s.current) == 0 {
+ s.current = make([]byte, 0, e.o.blockSize)
+ }
+ if cap(s.previous) == 0 {
+ s.previous = make([]byte, 0, e.o.blockSize)
+ }
+ s.current = s.current[:0]
+ s.previous = s.previous[:0]
+ if s.writing == nil {
+ s.writing = &blockEnc{lowMem: e.o.lowMem}
+ s.writing.init()
+ }
+ s.writing.initNewEncode()
}
if s.encoder == nil {
s.encoder = e.o.encoder()
}
- if s.writing == nil {
- s.writing = &blockEnc{lowMem: e.o.lowMem}
- s.writing.init()
- }
- s.writing.initNewEncode()
s.filling = s.filling[:0]
- s.current = s.current[:0]
- s.previous = s.previous[:0]
s.encoder.Reset(e.o.dict, false)
s.headerWritten = false
s.eofWritten = false
@@ -258,6 +260,46 @@ func (e *Encoder) nextBlock(final bool) error {
return s.err
}
+ // SYNC:
+ if e.o.concurrent == 1 {
+ src := s.filling
+ s.nInput += int64(len(s.filling))
+ if debugEncoder {
+ println("Adding sync block,", len(src), "bytes, final:", final)
+ }
+ enc := s.encoder
+ blk := enc.Block()
+ blk.reset(nil)
+ enc.Encode(blk, src)
+ blk.last = final
+ if final {
+ s.eofWritten = true
+ }
+
+ err := errIncompressible
+ // If we got the exact same number of literals as input,
+ // assume the literals cannot be compressed.
+ if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
+ err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+ }
+ switch err {
+ case errIncompressible:
+ if debugEncoder {
+ println("Storing incompressible block as raw")
+ }
+ blk.encodeRaw(src)
+ // In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
+ case nil:
+ default:
+ s.err = err
+ return err
+ }
+ _, s.err = s.w.Write(blk.output)
+ s.nWritten += int64(len(blk.output))
+ s.filling = s.filling[:0]
+ return s.err
+ }
+
// Move blocks forward.
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
s.nInput += int64(len(s.current))
@@ -486,8 +528,8 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
// If a non-single block is needed the encoder will reset again.
e.encoders <- enc
}()
- // Use single segments when above minimum window and below 1MB.
- single := len(src) < 1<<20 && len(src) > MinWindowSize
+ // Use single segments when above minimum window and below window size.
+ single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
if e.o.single != nil {
single = *e.o.single
}
@@ -509,7 +551,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
// If we can do everything in one block, prefer that.
- if len(src) <= maxCompressedBlockSize {
+ if len(src) <= e.o.blockSize {
enc.Reset(e.o.dict, true)
// Slightly faster with no history and everything in one block.
if e.o.crc {
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index 7d29e1d689e..a7c5e1aac43 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -24,6 +24,7 @@ type encoderOptions struct {
allLitEntropy bool
customWindow bool
customALEntropy bool
+ customBlockSize bool
lowMem bool
dict *dict
}
@@ -33,7 +34,7 @@ func (o *encoderOptions) setDefault() {
concurrent: runtime.GOMAXPROCS(0),
crc: true,
single: nil,
- blockSize: 1 << 16,
+ blockSize: maxCompressedBlockSize,
windowSize: 8 << 20,
level: SpeedDefault,
allLitEntropy: true,
@@ -75,6 +76,7 @@ func WithEncoderCRC(b bool) EOption {
// WithEncoderConcurrency will set the concurrency,
// meaning the maximum number of encoders to run concurrently.
// The value supplied must be at least 1.
+// For streams, setting a value of 1 will disable async compression.
// By default this will be set to GOMAXPROCS.
func WithEncoderConcurrency(n int) EOption {
return func(o *encoderOptions) error {
@@ -106,6 +108,7 @@ func WithWindowSize(n int) EOption {
o.customWindow = true
if o.blockSize > o.windowSize {
o.blockSize = o.windowSize
+ o.customBlockSize = true
}
return nil
}
@@ -188,10 +191,9 @@ func EncoderLevelFromZstd(level int) EncoderLevel {
return SpeedDefault
case level >= 6 && level < 10:
return SpeedBetterCompression
- case level >= 10:
+ default:
return SpeedBestCompression
}
- return SpeedDefault
}
// String provides a string representation of the compression level.
@@ -222,6 +224,9 @@ func WithEncoderLevel(l EncoderLevel) EOption {
switch o.level {
case SpeedFastest:
o.windowSize = 4 << 20
+ if !o.customBlockSize {
+ o.blockSize = 1 << 16
+ }
case SpeedDefault:
o.windowSize = 8 << 20
case SpeedBetterCompression:
@@ -278,7 +283,7 @@ func WithNoEntropyCompression(b bool) EOption {
// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range.
// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB.
// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations.
-// If this is not specified, block encodes will automatically choose this based on the input size.
+// If this is not specified, block encodes will automatically choose this based on the input size and the window size.
// This setting has no effect on streamed encodes.
func WithSingleSegment(b bool) EOption {
return func(o *encoderOptions) error {
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 989c79f8c31..b6c5054176a 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -8,23 +8,17 @@ import (
"bytes"
"encoding/hex"
"errors"
- "hash"
"io"
- "sync"
"github.com/klauspost/compress/zstd/internal/xxhash"
)
type frameDec struct {
- o decoderOptions
- crc hash.Hash64
- offset int64
+ o decoderOptions
+ crc *xxhash.Digest
WindowSize uint64
- // In order queue of blocks being decoded.
- decoding chan *blockDec
-
// Frame history passed between blocks
history history
@@ -34,15 +28,10 @@ type frameDec struct {
bBuf byteBuf
FrameContentSize uint64
- frameDone sync.WaitGroup
DictionaryID *uint32
HasCheckSum bool
SingleSegment bool
-
- // asyncRunning indicates whether the async routine processes input on 'decoding'.
- asyncRunningMu sync.Mutex
- asyncRunning bool
}
const (
@@ -117,7 +106,7 @@ func (d *frameDec) reset(br byteBuffer) error {
}
n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
println("Skipping frame with", n, "bytes.")
- err = br.skipN(int(n))
+ err = br.skipN(int64(n))
if err != nil {
if debugDecoder {
println("Reading discarded frame", err)
@@ -208,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error {
default:
fcsSize = 1 << v
}
- d.FrameContentSize = 0
+ d.FrameContentSize = fcsUnknown
if fcsSize > 0 {
b, err := br.readSmall(fcsSize)
if err != nil {
@@ -229,9 +218,10 @@ func (d *frameDec) reset(br byteBuffer) error {
d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
}
if debugDecoder {
- println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize)
+ println("Read FCS:", d.FrameContentSize)
}
}
+
// Move this to shared.
d.HasCheckSum = fhd&(1<<2) != 0
if d.HasCheckSum {
@@ -241,20 +231,27 @@ func (d *frameDec) reset(br byteBuffer) error {
d.crc.Reset()
}
+ if d.WindowSize > d.o.maxWindowSize {
+ if debugDecoder {
+ printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
+ }
+ return ErrWindowSizeExceeded
+ }
+
if d.WindowSize == 0 && d.SingleSegment {
// We may not need window in this case.
d.WindowSize = d.FrameContentSize
if d.WindowSize < MinWindowSize {
d.WindowSize = MinWindowSize
}
- }
-
- if d.WindowSize > uint64(d.o.maxWindowSize) {
- if debugDecoder {
- printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
+ if d.WindowSize > d.o.maxDecodedSize {
+ if debugDecoder {
+ printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
+ }
+ return ErrDecoderSizeExceeded
}
- return ErrWindowSizeExceeded
}
+
// The minimum Window_Size is 1 KB.
if d.WindowSize < MinWindowSize {
if debugDecoder {
@@ -263,11 +260,18 @@ func (d *frameDec) reset(br byteBuffer) error {
return ErrWindowSizeTooSmall
}
d.history.windowSize = int(d.WindowSize)
- if d.o.lowMem && d.history.windowSize < maxBlockSize {
- d.history.maxSize = d.history.windowSize * 2
+ if !d.o.lowMem || d.history.windowSize < maxBlockSize {
+ // Alloc 2x window size if not low-mem, or very small window size.
+ d.history.allocFrameBuffer = d.history.windowSize * 2
} else {
- d.history.maxSize = d.history.windowSize + maxBlockSize
+ // Alloc with one additional block
+ d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
}
+
+ if debugDecoder {
+ println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum)
+ }
+
// history contains input - maybe we do something
d.rawInput = br
return nil
@@ -276,62 +280,24 @@ func (d *frameDec) reset(br byteBuffer) error {
// next will start decoding the next block from stream.
func (d *frameDec) next(block *blockDec) error {
if debugDecoder {
- printf("decoding new block %p:%p", block, block.data)
+ println("decoding new block")
}
err := block.reset(d.rawInput, d.WindowSize)
if err != nil {
println("block error:", err)
// Signal the frame decoder we have a problem.
- d.sendErr(block, err)
+ block.sendErr(err)
return err
}
- block.input <- struct{}{}
- if debugDecoder {
- println("next block:", block)
- }
- d.asyncRunningMu.Lock()
- defer d.asyncRunningMu.Unlock()
- if !d.asyncRunning {
- return nil
- }
- if block.Last {
- // We indicate the frame is done by sending io.EOF
- d.decoding <- block
- return io.EOF
- }
- d.decoding <- block
return nil
}
-// sendEOF will queue an error block on the frame.
-// This will cause the frame decoder to return when it encounters the block.
-// Returns true if the decoder was added.
-func (d *frameDec) sendErr(block *blockDec, err error) bool {
- d.asyncRunningMu.Lock()
- defer d.asyncRunningMu.Unlock()
- if !d.asyncRunning {
- return false
- }
-
- println("sending error", err.Error())
- block.sendErr(err)
- d.decoding <- block
- return true
-}
-
// checkCRC will check the checksum if the frame has one.
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
func (d *frameDec) checkCRC() error {
if !d.HasCheckSum {
return nil
}
- var tmp [4]byte
- got := d.crc.Sum64()
- // Flip to match file order.
- tmp[0] = byte(got >> 0)
- tmp[1] = byte(got >> 8)
- tmp[2] = byte(got >> 16)
- tmp[3] = byte(got >> 24)
// We can overwrite upper tmp now
want, err := d.rawInput.readSmall(4)
@@ -340,6 +306,18 @@ func (d *frameDec) checkCRC() error {
return err
}
+ if d.o.ignoreChecksum {
+ return nil
+ }
+
+ var tmp [4]byte
+ got := d.crc.Sum64()
+ // Flip to match file order.
+ tmp[0] = byte(got >> 0)
+ tmp[1] = byte(got >> 8)
+ tmp[2] = byte(got >> 16)
+ tmp[3] = byte(got >> 24)
+
if !bytes.Equal(tmp[:], want) {
if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want)
@@ -352,133 +330,52 @@ func (d *frameDec) checkCRC() error {
return nil
}
-func (d *frameDec) initAsync() {
- if !d.o.lowMem && !d.SingleSegment {
- // set max extra size history to 2MB.
- d.history.maxSize = d.history.windowSize + maxBlockSize
- }
- // re-alloc if more than one extra block size.
- if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize {
- d.history.b = make([]byte, 0, d.history.maxSize)
- }
- if cap(d.history.b) < d.history.maxSize {
- d.history.b = make([]byte, 0, d.history.maxSize)
- }
- if cap(d.decoding) < d.o.concurrent {
- d.decoding = make(chan *blockDec, d.o.concurrent)
- }
- if debugDecoder {
- h := d.history
- printf("history init. len: %d, cap: %d", len(h.b), cap(h.b))
- }
- d.asyncRunningMu.Lock()
- d.asyncRunning = true
- d.asyncRunningMu.Unlock()
-}
-
-// startDecoder will start decoding blocks and write them to the writer.
-// The decoder will stop as soon as an error occurs or at end of frame.
-// When the frame has finished decoding the *bufio.Reader
-// containing the remaining input will be sent on frameDec.frameDone.
-func (d *frameDec) startDecoder(output chan decodeOutput) {
- written := int64(0)
-
- defer func() {
- d.asyncRunningMu.Lock()
- d.asyncRunning = false
- d.asyncRunningMu.Unlock()
-
- // Drain the currently decoding.
- d.history.error = true
- flushdone:
- for {
- select {
- case b := <-d.decoding:
- b.history <- &d.history
- output <- <-b.result
- default:
- break flushdone
- }
- }
- println("frame decoder done, signalling done")
- d.frameDone.Done()
- }()
- // Get decoder for first block.
- block := <-d.decoding
- block.history <- &d.history
- for {
- var next *blockDec
- // Get result
- r := <-block.result
- if r.err != nil {
- println("Result contained error", r.err)
- output <- r
- return
- }
- if debugDecoder {
- println("got result, from ", d.offset, "to", d.offset+int64(len(r.b)))
- d.offset += int64(len(r.b))
- }
- if !block.Last {
- // Send history to next block
- select {
- case next = <-d.decoding:
- if debugDecoder {
- println("Sending ", len(d.history.b), "bytes as history")
- }
- next.history <- &d.history
- default:
- // Wait until we have sent the block, so
- // other decoders can potentially get the decoder.
- next = nil
- }
- }
-
- // Add checksum, async to decoding.
- if d.HasCheckSum {
- n, err := d.crc.Write(r.b)
- if err != nil {
- r.err = err
- if n != len(r.b) {
- r.err = io.ErrShortWrite
- }
- output <- r
- return
- }
- }
- written += int64(len(r.b))
- if d.SingleSegment && uint64(written) > d.FrameContentSize {
- println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize)
- r.err = ErrFrameSizeExceeded
- output <- r
- return
- }
- if block.Last {
- r.err = d.checkCRC()
- output <- r
- return
- }
- output <- r
- if next == nil {
- // There was no decoder available, we wait for one now that we have sent to the writer.
- if debugDecoder {
- println("Sending ", len(d.history.b), " bytes as history")
- }
- next = <-d.decoding
- next.history <- &d.history
+// consumeCRC reads the checksum data if the frame has one.
+func (d *frameDec) consumeCRC() error {
+ if d.HasCheckSum {
+ _, err := d.rawInput.readSmall(4)
+ if err != nil {
+ println("CRC missing?", err)
+ return err
}
- block = next
}
+
+ return nil
}
-// runDecoder will create a sync decoder that will decode a block of data.
+// runDecoder will run the decoder for the remainder of the frame.
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
saved := d.history.b
// We use the history for output to avoid copying it.
d.history.b = dst
+ d.history.ignoreBuffer = len(dst)
// Store input length, so we only check new data.
crcStart := len(dst)
+ d.history.decoders.maxSyncLen = 0
+ if d.o.limitToCap {
+ d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
+ }
+ if d.FrameContentSize != fcsUnknown {
+ if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
+ d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+ }
+ if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
+ if debugDecoder {
+ println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
+ }
+ return dst, ErrDecoderSizeExceeded
+ }
+ if debugDecoder {
+ println("maxSyncLen:", d.history.decoders.maxSyncLen)
+ }
+ if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen {
+ // Alloc for output
+ dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
+ copy(dst2, dst)
+ dst = dst2
+ }
+ }
var err error
for {
err = dec.reset(d.rawInput, d.WindowSize)
@@ -489,29 +386,47 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
println("next block:", dec)
}
err = dec.decodeBuf(&d.history)
- if err != nil || dec.Last {
+ if err != nil {
break
}
- if uint64(len(d.history.b)) > d.o.maxDecodedSize {
+ if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
+ println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
err = ErrDecoderSizeExceeded
break
}
- if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
- println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
+ if d.o.limitToCap && len(d.history.b) > cap(dst) {
+ println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
+ err = ErrDecoderSizeExceeded
+ break
+ }
+ if uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
+ println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize)
err = ErrFrameSizeExceeded
break
}
+ if dec.Last {
+ break
+ }
+ if debugDecoder {
+ println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize)
+ }
}
dst = d.history.b
if err == nil {
- if d.HasCheckSum {
- var n int
- n, err = d.crc.Write(dst[crcStart:])
- if err == nil {
- if n != len(dst)-crcStart {
- err = io.ErrShortWrite
- } else {
- err = d.checkCRC()
+ if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
+ err = ErrFrameSizeMismatch
+ } else if d.HasCheckSum {
+ if d.o.ignoreChecksum {
+ err = d.consumeCRC()
+ } else {
+ var n int
+ n, err = d.crc.Write(dst[crcStart:])
+ if err == nil {
+ if n != len(dst)-crcStart {
+ err = io.ErrShortWrite
+ } else {
+ err = d.checkCRC()
+ }
}
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
index e6d3d49b39c..2f8860a722b 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
@@ -5,8 +5,10 @@
package zstd
import (
+ "encoding/binary"
"errors"
"fmt"
+ "io"
)
const (
@@ -178,10 +180,32 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<> 3)
- // println(s.norm[:s.symbolLen], s.symbolLen)
return s.buildDtable()
}
+func (s *fseDecoder) mustReadFrom(r io.Reader) {
+ fatalErr := func(err error) {
+ if err != nil {
+ panic(err)
+ }
+ }
+ // dt [maxTablesize]decSymbol // Decompression table.
+ // symbolLen uint16 // Length of active part of the symbol table.
+ // actualTableLog uint8 // Selected tablelog.
+ // maxBits uint8 // Maximum number of additional bits
+ // // used for table creation to avoid allocations.
+ // stateTable [256]uint16
+ // norm [maxSymbolValue + 1]int16
+ // preDefined bool
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.dt))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.norm))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined))
+}
+
// decSymbol contains information about a state entry,
// Including the state offset base, the output symbol and
// the number of bits to read for the low part of the destination state.
@@ -204,18 +228,10 @@ func (d decSymbol) newState() uint16 {
return uint16(d >> 16)
}
-func (d decSymbol) baseline() uint32 {
- return uint32(d >> 32)
-}
-
func (d decSymbol) baselineInt() int {
return int(d >> 32)
}
-func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) {
- *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
-}
-
func (d *decSymbol) setNBits(nBits uint8) {
const mask = 0xffffffffffffff00
*d = (*d & mask) | decSymbol(nBits)
@@ -231,11 +247,6 @@ func (d *decSymbol) setNewState(state uint16) {
*d = (*d & mask) | decSymbol(state)<<16
}
-func (d *decSymbol) setBaseline(baseline uint32) {
- const mask = 0xffffffff
- *d = (*d & mask) | decSymbol(baseline)<<32
-}
-
func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
const mask = 0xffff00ff
*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
@@ -257,68 +268,6 @@ func (s *fseDecoder) setRLE(symbol decSymbol) {
s.dt[0] = symbol
}
-// buildDtable will build the decoding table.
-func (s *fseDecoder) buildDtable() error {
- tableSize := uint32(1 << s.actualTableLog)
- highThreshold := tableSize - 1
- symbolNext := s.stateTable[:256]
-
- // Init, lay down lowprob symbols
- {
- for i, v := range s.norm[:s.symbolLen] {
- if v == -1 {
- s.dt[highThreshold].setAddBits(uint8(i))
- highThreshold--
- symbolNext[i] = 1
- } else {
- symbolNext[i] = uint16(v)
- }
- }
- }
- // Spread symbols
- {
- tableMask := tableSize - 1
- step := tableStep(tableSize)
- position := uint32(0)
- for ss, v := range s.norm[:s.symbolLen] {
- for i := 0; i < int(v); i++ {
- s.dt[position].setAddBits(uint8(ss))
- position = (position + step) & tableMask
- for position > highThreshold {
- // lowprob area
- position = (position + step) & tableMask
- }
- }
- }
- if position != 0 {
- // position must reach all cells once, otherwise normalizedCounter is incorrect
- return errors.New("corrupted input (position != 0)")
- }
- }
-
- // Build Decoding table
- {
- tableSize := uint16(1 << s.actualTableLog)
- for u, v := range s.dt[:tableSize] {
- symbol := v.addBits()
- nextState := symbolNext[symbol]
- symbolNext[symbol] = nextState + 1
- nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
- s.dt[u&maxTableMask].setNBits(nBits)
- newState := (nextState << nBits) - tableSize
- if newState > tableSize {
- return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
- }
- if newState == uint16(u) && nBits == 0 {
- // Seems weird that this is possible with nbits > 0.
- return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
- }
- s.dt[u&maxTableMask].setNewState(newState)
- }
- }
- return nil
-}
-
// transform will transform the decoder table into a table usable for
// decoding without having to apply the transformation while decoding.
// The state will contain the base value and the number of bits to read.
@@ -352,34 +301,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
s.state = dt[br.getBits(tableLog)]
}
-// next returns the current symbol and sets the next state.
-// At least tablelog bits must be available in the bit reader.
-func (s *fseState) next(br *bitReader) {
- lowBits := uint16(br.getBits(s.state.nbBits()))
- s.state = s.dt[s.state.newState()+lowBits]
-}
-
-// finished returns true if all bits have been read from the bitstream
-// and the next state would require reading bits from the input.
-func (s *fseState) finished(br *bitReader) bool {
- return br.finished() && s.state.nbBits() > 0
-}
-
-// final returns the current state symbol without decoding the next.
-func (s *fseState) final() (int, uint8) {
- return s.state.baselineInt(), s.state.addBits()
-}
-
// final returns the current state symbol without decoding the next.
func (s decSymbol) final() (int, uint8) {
return s.baselineInt(), s.addBits()
}
-
-// nextFast returns the next symbol and sets the next state.
-// This can only be used if no symbols are 0 bits.
-// At least tablelog bits must be available in the bit reader.
-func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
- lowBits := uint16(br.getBitsFast(s.state.nbBits()))
- s.state = s.dt[s.state.newState()+lowBits]
- return s.state.baseline(), s.state.addBits()
-}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
new file mode 100644
index 00000000000..d04a829b0a0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
@@ -0,0 +1,65 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+package zstd
+
+import (
+ "fmt"
+)
+
+type buildDtableAsmContext struct {
+ // inputs
+ stateTable *uint16
+ norm *int16
+ dt *uint64
+
+ // outputs --- set by the procedure in the case of error;
+ // for interpretation please see the error handling part below
+ errParam1 uint64
+ errParam2 uint64
+}
+
+// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
+// Function returns non-zero exit code on error.
+//
+//go:noescape
+func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
+
+// please keep in sync with _generate/gen_fse.go
+const (
+ errorCorruptedNormalizedCounter = 1
+ errorNewStateTooBig = 2
+ errorNewStateNoBits = 3
+)
+
+// buildDtable will build the decoding table.
+func (s *fseDecoder) buildDtable() error {
+ ctx := buildDtableAsmContext{
+ stateTable: &s.stateTable[0],
+ norm: &s.norm[0],
+ dt: (*uint64)(&s.dt[0]),
+ }
+ code := buildDtable_asm(s, &ctx)
+
+ if code != 0 {
+ switch code {
+ case errorCorruptedNormalizedCounter:
+ position := ctx.errParam1
+ return fmt.Errorf("corrupted input (position=%d, expected 0)", position)
+
+ case errorNewStateTooBig:
+ newState := decSymbol(ctx.errParam1)
+ size := ctx.errParam2
+ return fmt.Errorf("newState (%d) outside table size (%d)", newState, size)
+
+ case errorNewStateNoBits:
+ newState := decSymbol(ctx.errParam1)
+ oldState := decSymbol(ctx.errParam2)
+ return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState)
+
+ default:
+ return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
new file mode 100644
index 00000000000..bcde3986953
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
@@ -0,0 +1,126 @@
+// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
+TEXT ·buildDtable_asm(SB), $0-24
+ MOVQ ctx+8(FP), CX
+ MOVQ s+0(FP), DI
+
+ // Load values
+ MOVBQZX 4098(DI), DX
+ XORQ AX, AX
+ BTSQ DX, AX
+ MOVQ (CX), BX
+ MOVQ 16(CX), SI
+ LEAQ -1(AX), R8
+ MOVQ 8(CX), CX
+ MOVWQZX 4096(DI), DI
+
+ // End load values
+ // Init, lay down lowprob symbols
+ XORQ R9, R9
+ JMP init_main_loop_condition
+
+init_main_loop:
+ MOVWQSX (CX)(R9*2), R10
+ CMPW R10, $-1
+ JNE do_not_update_high_threshold
+ MOVB R9, 1(SI)(R8*8)
+ DECQ R8
+ MOVQ $0x0000000000000001, R10
+
+do_not_update_high_threshold:
+ MOVW R10, (BX)(R9*2)
+ INCQ R9
+
+init_main_loop_condition:
+ CMPQ R9, DI
+ JL init_main_loop
+
+ // Spread symbols
+ // Calculate table step
+ MOVQ AX, R9
+ SHRQ $0x01, R9
+ MOVQ AX, R10
+ SHRQ $0x03, R10
+ LEAQ 3(R9)(R10*1), R9
+
+ // Fill add bits values
+ LEAQ -1(AX), R10
+ XORQ R11, R11
+ XORQ R12, R12
+ JMP spread_main_loop_condition
+
+spread_main_loop:
+ XORQ R13, R13
+ MOVWQSX (CX)(R12*2), R14
+ JMP spread_inner_loop_condition
+
+spread_inner_loop:
+ MOVB R12, 1(SI)(R11*8)
+
+adjust_position:
+ ADDQ R9, R11
+ ANDQ R10, R11
+ CMPQ R11, R8
+ JG adjust_position
+ INCQ R13
+
+spread_inner_loop_condition:
+ CMPQ R13, R14
+ JL spread_inner_loop
+ INCQ R12
+
+spread_main_loop_condition:
+ CMPQ R12, DI
+ JL spread_main_loop
+ TESTQ R11, R11
+ JZ spread_check_ok
+ MOVQ ctx+8(FP), AX
+ MOVQ R11, 24(AX)
+ MOVQ $+1, ret+16(FP)
+ RET
+
+spread_check_ok:
+ // Build Decoding table
+ XORQ DI, DI
+
+build_table_main_table:
+ MOVBQZX 1(SI)(DI*8), CX
+ MOVWQZX (BX)(CX*2), R8
+ LEAQ 1(R8), R9
+ MOVW R9, (BX)(CX*2)
+ MOVQ R8, R9
+ BSRQ R9, R9
+ MOVQ DX, CX
+ SUBQ R9, CX
+ SHLQ CL, R8
+ SUBQ AX, R8
+ MOVB CL, (SI)(DI*8)
+ MOVW R8, 2(SI)(DI*8)
+ CMPQ R8, AX
+ JLE build_table_check1_ok
+ MOVQ ctx+8(FP), CX
+ MOVQ R8, 24(CX)
+ MOVQ AX, 32(CX)
+ MOVQ $+2, ret+16(FP)
+ RET
+
+build_table_check1_ok:
+ TESTB CL, CL
+ JNZ build_table_check2_ok
+ CMPW R8, DI
+ JNE build_table_check2_ok
+ MOVQ ctx+8(FP), AX
+ MOVQ R8, 24(AX)
+ MOVQ DI, 32(AX)
+ MOVQ $+3, ret+16(FP)
+ RET
+
+build_table_check2_ok:
+ INCQ DI
+ CMPQ DI, AX
+ JL build_table_main_table
+ MOVQ $+0, ret+16(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
new file mode 100644
index 00000000000..332e51fe44f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
@@ -0,0 +1,72 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+)
+
+// buildDtable will build the decoding table.
+func (s *fseDecoder) buildDtable() error {
+ tableSize := uint32(1 << s.actualTableLog)
+ highThreshold := tableSize - 1
+ symbolNext := s.stateTable[:256]
+
+ // Init, lay down lowprob symbols
+ {
+ for i, v := range s.norm[:s.symbolLen] {
+ if v == -1 {
+ s.dt[highThreshold].setAddBits(uint8(i))
+ highThreshold--
+ symbolNext[i] = 1
+ } else {
+ symbolNext[i] = uint16(v)
+ }
+ }
+ }
+
+ // Spread symbols
+ {
+ tableMask := tableSize - 1
+ step := tableStep(tableSize)
+ position := uint32(0)
+ for ss, v := range s.norm[:s.symbolLen] {
+ for i := 0; i < int(v); i++ {
+ s.dt[position].setAddBits(uint8(ss))
+ position = (position + step) & tableMask
+ for position > highThreshold {
+ // lowprob area
+ position = (position + step) & tableMask
+ }
+ }
+ }
+ if position != 0 {
+ // position must reach all cells once, otherwise normalizedCounter is incorrect
+ return errors.New("corrupted input (position != 0)")
+ }
+ }
+
+ // Build Decoding table
+ {
+ tableSize := uint16(1 << s.actualTableLog)
+ for u, v := range s.dt[:tableSize] {
+ symbol := v.addBits()
+ nextState := symbolNext[symbol]
+ symbolNext[symbol] = nextState + 1
+ nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
+ s.dt[u&maxTableMask].setNBits(nBits)
+ newState := (nextState << nBits) - tableSize
+ if newState > tableSize {
+ return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
+ }
+ if newState == uint16(u) && nBits == 0 {
+ // Seems weird that this is possible with nbits > 0.
+ return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
+ }
+ s.dt[u&maxTableMask].setNewState(newState)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
index b4757ee3f03..ab26326a8ff 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
@@ -62,9 +62,8 @@ func (s symbolTransform) String() string {
// To indicate that you have populated the histogram call HistogramFinished
// with the value of the highest populated symbol, as well as the number of entries
// in the most populated entry. These are accepted at face value.
-// The returned slice will always be length 256.
-func (s *fseEncoder) Histogram() []uint32 {
- return s.count[:]
+func (s *fseEncoder) Histogram() *[256]uint32 {
+ return &s.count
}
// HistogramFinished can be called to indicate that the histogram has been populated.
@@ -77,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
s.clearCount = maxCount != 0
}
-// prepare will prepare and allocate scratch tables used for both compression and decompression.
-func (s *fseEncoder) prepare() (*fseEncoder, error) {
- if s == nil {
- s = &fseEncoder{}
- }
- s.useRLE = false
- if s.clearCount && s.maxCount == 0 {
- for i := range s.count {
- s.count[i] = 0
- }
- s.clearCount = false
- }
- return s, nil
-}
-
// allocCtable will allocate tables needed for compression.
// If existing tables a re big enough, they are simply re-used.
func (s *fseEncoder) allocCtable() {
@@ -710,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
c.state = c.stateTable[lu]
}
-// encode the output symbol provided and write it to the bitstream.
-func (c *cState) encode(symbolTT symbolTransform) {
- nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
- dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState)
- c.bw.addBits16NC(c.state, uint8(nbBitsOut))
- c.state = c.stateTable[dstState]
-}
-
// flush will write the tablelog to the output and flush the remaining full bytes.
func (c *cState) flush(tableLog uint8) {
c.bw.flush32()
diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go
index cf33f29a1b4..5d73c21ebdd 100644
--- a/vendor/github.com/klauspost/compress/zstd/hash.go
+++ b/vendor/github.com/klauspost/compress/zstd/hash.go
@@ -33,9 +33,3 @@ func hashLen(u uint64, length, mls uint8) uint32 {
return (uint32(u) * prime4bytes) >> (32 - length)
}
}
-
-// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <32.
-func hash3(u uint32, h uint8) uint32 {
- return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
-}
diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go
index f783e32d251..09164856d22 100644
--- a/vendor/github.com/klauspost/compress/zstd/history.go
+++ b/vendor/github.com/klauspost/compress/zstd/history.go
@@ -10,40 +10,48 @@ import (
// history contains the information transferred between blocks.
type history struct {
- b []byte
- huffTree *huff0.Scratch
- recentOffsets [3]int
+ // Literal decompression
+ huffTree *huff0.Scratch
+
+ // Sequence decompression
decoders sequenceDecs
- windowSize int
- maxSize int
- error bool
- dict *dict
+ recentOffsets [3]int
+
+ // History buffer...
+ b []byte
+
+ // ignoreBuffer is meant to ignore a number of bytes
+ // when checking for matches in history
+ ignoreBuffer int
+
+ windowSize int
+ allocFrameBuffer int // needed?
+ error bool
+ dict *dict
}
// reset will reset the history to initial state of a frame.
// The history must already have been initialized to the desired size.
func (h *history) reset() {
h.b = h.b[:0]
+ h.ignoreBuffer = 0
h.error = false
h.recentOffsets = [3]int{1, 4, 8}
- if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
- fseDecoderPool.Put(f)
- }
- if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
- fseDecoderPool.Put(f)
- }
- if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
- fseDecoderPool.Put(f)
- }
- h.decoders = sequenceDecs{}
+ h.decoders.freeDecoders()
+ h.decoders = sequenceDecs{br: h.decoders.br}
+ h.freeHuffDecoder()
+ h.huffTree = nil
+ h.dict = nil
+ //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
+}
+
+func (h *history) freeHuffDecoder() {
if h.huffTree != nil {
if h.dict == nil || h.dict.litEnc != h.huffTree {
huffDecoderPool.Put(h.huffTree)
+ h.huffTree = nil
}
}
- h.huffTree = nil
- h.dict = nil
- //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
}
func (h *history) setDict(dict *dict) {
@@ -54,6 +62,7 @@ func (h *history) setDict(dict *dict) {
h.decoders.litLengths = dict.llDec
h.decoders.offsets = dict.ofDec
h.decoders.matchLengths = dict.mlDec
+ h.decoders.dict = dict.content
h.recentOffsets = dict.offsets
h.huffTree = dict.litEnc
}
@@ -83,6 +92,24 @@ func (h *history) append(b []byte) {
copy(h.b[h.windowSize-len(b):], b)
}
+// ensureBlock will ensure there is space for at least one block...
+func (h *history) ensureBlock() {
+ if cap(h.b) < h.allocFrameBuffer {
+ h.b = make([]byte, 0, h.allocFrameBuffer)
+ return
+ }
+
+ avail := cap(h.b) - len(h.b)
+ if avail >= h.windowSize || avail > maxCompressedBlockSize {
+ return
+ }
+ // Move data down so we only have window size left.
+ // We know we have less than window size in b at this point.
+ discard := len(h.b) - h.windowSize
+ copy(h.b, h.b[discard:])
+ h.b = h.b[:h.windowSize]
+}
+
// append bytes to history without ever discarding anything.
func (h *history) appendKeep(b []byte) {
h.b = append(h.b, b...)
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
index 426b9cac786..2c112a0ab1c 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
@@ -195,7 +195,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
b, d.v4 = consumeUint64(b)
b, d.total = consumeUint64(b)
copy(d.mem[:], b)
- b = b[len(d.mem):]
d.n = int(d.total % uint64(len(d.mem)))
return nil
}
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
deleted file mode 100644
index 3ddbd5c0b0a..00000000000
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build !appengine && gc && !purego
-// +build !appengine,gc,!purego
-
-package xxhash
-
-// Sum64 computes the 64-bit xxHash digest of b.
-//
-//go:noescape
-func Sum64(b []byte) uint64
-
-//go:noescape
-func writeBlocks(*Digest, []byte) int
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
index 2c9c5357a14..cea17856197 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
@@ -1,12 +1,13 @@
// +build !appengine
// +build gc
// +build !purego
+// +build !noasm
#include "textflag.h"
// Register allocation:
// AX h
-// CX pointer to advance through b
+// SI pointer to advance through b
// DX n
// BX loop end
// R8 v1, k1
@@ -16,39 +17,39 @@
// R12 tmp
// R13 prime1v
// R14 prime2v
-// R15 prime4v
+// DI prime4v
-// round reads from and advances the buffer pointer in CX.
+// round reads from and advances the buffer pointer in SI.
// It assumes that R13 has prime1v and R14 has prime2v.
#define round(r) \
- MOVQ (CX), R12 \
- ADDQ $8, CX \
+ MOVQ (SI), R12 \
+ ADDQ $8, SI \
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
+// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
#define mergeRound(acc, val) \
IMULQ R14, val \
ROLQ $31, val \
IMULQ R13, val \
XORQ val, acc \
IMULQ R13, acc \
- ADDQ R15, acc
+ ADDQ DI, acc
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32
// Load fixed primes.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), R15
+ MOVQ ·prime4v(SB), DI
// Load slice.
- MOVQ b_base+0(FP), CX
+ MOVQ b_base+0(FP), SI
MOVQ b_len+8(FP), DX
- LEAQ (CX)(DX*1), BX
+ LEAQ (SI)(DX*1), BX
// The first loop limit will be len(b)-32.
SUBQ $32, BX
@@ -65,14 +66,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
XORQ R11, R11
SUBQ R13, R11
- // Loop until CX > BX.
+ // Loop until SI > BX.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
- CMPQ CX, BX
+ CMPQ SI, BX
JLE blockLoop
MOVQ R8, AX
@@ -100,16 +101,16 @@ noBlocks:
afterBlocks:
ADDQ DX, AX
- // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
+ // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
ADDQ $24, BX
- CMPQ CX, BX
+ CMPQ SI, BX
JG fourByte
wordLoop:
// Calculate k1.
- MOVQ (CX), R8
- ADDQ $8, CX
+ MOVQ (SI), R8
+ ADDQ $8, SI
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
@@ -117,18 +118,18 @@ wordLoop:
XORQ R8, AX
ROLQ $27, AX
IMULQ R13, AX
- ADDQ R15, AX
+ ADDQ DI, AX
- CMPQ CX, BX
+ CMPQ SI, BX
JLE wordLoop
fourByte:
ADDQ $4, BX
- CMPQ CX, BX
+ CMPQ SI, BX
JG singles
- MOVL (CX), R8
- ADDQ $4, CX
+ MOVL (SI), R8
+ ADDQ $4, SI
IMULQ R13, R8
XORQ R8, AX
@@ -138,19 +139,19 @@ fourByte:
singles:
ADDQ $4, BX
- CMPQ CX, BX
+ CMPQ SI, BX
JGE finalize
singlesLoop:
- MOVBQZX (CX), R12
- ADDQ $1, CX
+ MOVBQZX (SI), R12
+ ADDQ $1, SI
IMULQ ·prime5v(SB), R12
XORQ R12, AX
ROLQ $11, AX
IMULQ R13, AX
- CMPQ CX, BX
+ CMPQ SI, BX
JL singlesLoop
finalize:
@@ -179,13 +180,13 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
MOVQ ·prime2v(SB), R14
// Load slice.
- MOVQ arg1_base+8(FP), CX
- MOVQ arg1_len+16(FP), DX
- LEAQ (CX)(DX*1), BX
+ MOVQ b_base+8(FP), SI
+ MOVQ b_len+16(FP), DX
+ LEAQ (SI)(DX*1), BX
SUBQ $32, BX
// Load vN from d.
- MOVQ arg+0(FP), AX
+ MOVQ d+0(FP), AX
MOVQ 0(AX), R8 // v1
MOVQ 8(AX), R9 // v2
MOVQ 16(AX), R10 // v3
@@ -199,7 +200,7 @@ blockLoop:
round(R10)
round(R11)
- CMPQ CX, BX
+ CMPQ SI, BX
JLE blockLoop
// Copy vN back to d.
@@ -208,8 +209,8 @@ blockLoop:
MOVQ R10, 16(AX)
MOVQ R11, 24(AX)
- // The number of bytes written is CX minus the old base pointer.
- SUBQ arg1_base+8(FP), CX
- MOVQ CX, ret+32(FP)
+ // The number of bytes written is SI minus the old base pointer.
+ SUBQ b_base+8(FP), SI
+ MOVQ SI, ret+32(FP)
RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
new file mode 100644
index 00000000000..4d64a17d69c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
@@ -0,0 +1,186 @@
+// +build gc,!purego,!noasm
+
+#include "textflag.h"
+
+// Register allocation.
+#define digest R1
+#define h R2 // Return value.
+#define p R3 // Input pointer.
+#define len R4
+#define nblocks R5 // len / 32.
+#define prime1 R7
+#define prime2 R8
+#define prime3 R9
+#define prime4 R10
+#define prime5 R11
+#define v1 R12
+#define v2 R13
+#define v3 R14
+#define v4 R15
+#define x1 R20
+#define x2 R21
+#define x3 R22
+#define x4 R23
+
+#define round(acc, x) \
+ MADD prime2, acc, x, acc \
+ ROR $64-31, acc \
+ MUL prime1, acc \
+
+// x = round(0, x).
+#define round0(x) \
+ MUL prime2, x \
+ ROR $64-31, x \
+ MUL prime1, x \
+
+#define mergeRound(x) \
+ round0(x) \
+ EOR x, h \
+ MADD h, prime4, prime1, h \
+
+// Update v[1-4] with 32-byte blocks. Assumes len >= 32.
+#define blocksLoop() \
+ LSR $5, len, nblocks \
+ PCALIGN $16 \
+ loop: \
+ LDP.P 32(p), (x1, x2) \
+ round(v1, x1) \
+ LDP -16(p), (x3, x4) \
+ round(v2, x2) \
+ SUB $1, nblocks \
+ round(v3, x3) \
+ round(v4, x4) \
+ CBNZ nblocks, loop \
+
+// The primes are repeated here to ensure that they're stored
+// in a contiguous array, so we can load them with LDP.
+DATA primes<> +0(SB)/8, $11400714785074694791
+DATA primes<> +8(SB)/8, $14029467366897019727
+DATA primes<>+16(SB)/8, $1609587929392839161
+DATA primes<>+24(SB)/8, $9650029242287828579
+DATA primes<>+32(SB)/8, $2870177450012600261
+GLOBL primes<>(SB), NOPTR+RODATA, $40
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
+ LDP b_base+0(FP), (p, len)
+
+ LDP primes<> +0(SB), (prime1, prime2)
+ LDP primes<>+16(SB), (prime3, prime4)
+ MOVD primes<>+32(SB), prime5
+
+ CMP $32, len
+ CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 }
+ BLO afterLoop
+
+ ADD prime1, prime2, v1
+ MOVD prime2, v2
+ MOVD $0, v3
+ NEG prime1, v4
+
+ blocksLoop()
+
+ ROR $64-1, v1, x1
+ ROR $64-7, v2, x2
+ ADD x1, x2
+ ROR $64-12, v3, x3
+ ROR $64-18, v4, x4
+ ADD x3, x4
+ ADD x2, x4, h
+
+ mergeRound(v1)
+ mergeRound(v2)
+ mergeRound(v3)
+ mergeRound(v4)
+
+afterLoop:
+ ADD len, h
+
+ TBZ $4, len, try8
+ LDP.P 16(p), (x1, x2)
+
+ round0(x1)
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+ round0(x2)
+ ROR $64-27, h
+ EOR x2 @> 64-27, h
+ MADD h, prime4, prime1, h
+
+try8:
+ TBZ $3, len, try4
+ MOVD.P 8(p), x1
+
+ round0(x1)
+ ROR $64-27, h
+ EOR x1 @> 64-27, h
+ MADD h, prime4, prime1, h
+
+try4:
+ TBZ $2, len, try2
+ MOVWU.P 4(p), x2
+
+ MUL prime1, x2
+ ROR $64-23, h
+ EOR x2 @> 64-23, h
+ MADD h, prime3, prime2, h
+
+try2:
+ TBZ $1, len, try1
+ MOVHU.P 2(p), x3
+ AND $255, x3, x1
+ LSR $8, x3, x2
+
+ MUL prime5, x1
+ ROR $64-11, h
+ EOR x1 @> 64-11, h
+ MUL prime1, h
+
+ MUL prime5, x2
+ ROR $64-11, h
+ EOR x2 @> 64-11, h
+ MUL prime1, h
+
+try1:
+ TBZ $0, len, end
+ MOVBU (p), x4
+
+ MUL prime5, x4
+ ROR $64-11, h
+ EOR x4 @> 64-11, h
+ MUL prime1, h
+
+end:
+ EOR h >> 33, h
+ MUL prime2, h
+ EOR h >> 29, h
+ MUL prime3, h
+ EOR h >> 32, h
+
+ MOVD h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+//
+// Assumes len(b) >= 32.
+TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
+ LDP primes<>(SB), (prime1, prime2)
+
+ // Load state. Assume v[1-4] are stored contiguously.
+ MOVD d+0(FP), digest
+ LDP 0(digest), (v1, v2)
+ LDP 16(digest), (v3, v4)
+
+ LDP b_base+8(FP), (p, len)
+
+ blocksLoop()
+
+ // Store updated state.
+ STP (v1, v2), 0(digest)
+ STP (v3, v4), 16(digest)
+
+ BIC $31, len
+ MOVD len, ret+32(FP)
+ RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
similarity index 67%
rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
rename to vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
index ad14b807f4d..1a1fac9c261 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
@@ -1,6 +1,9 @@
+//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm
+// +build amd64 arm64
// +build !appengine
// +build gc
// +build !purego
+// +build !noasm
package xxhash
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
index 1f52f296e71..209cb4a999c 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
@@ -1,5 +1,5 @@
-//go:build !amd64 || appengine || !gc || purego
-// +build !amd64 appengine !gc purego
+//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm
+// +build !amd64,!arm64 appengine !gc purego noasm
package xxhash
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index 1dd39e63b7e..f833d1541f9 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -20,6 +20,10 @@ type seq struct {
llCode, mlCode, ofCode uint8
}
+type seqVals struct {
+ ll, ml, mo int
+}
+
func (s seq) String() string {
if s.offset <= 3 {
if s.offset == 0 {
@@ -61,16 +65,19 @@ type sequenceDecs struct {
offsets sequenceDec
matchLengths sequenceDec
prevOffset [3]int
- hist []byte
dict []byte
literals []byte
out []byte
+ nSeqs int
+ br *bitReader
+ seqSize int
windowSize int
maxBits uint8
+ maxSyncLen uint64
}
// initialize all 3 decoders from the stream input.
-func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error {
+func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error {
if err := s.litLengths.init(br); err != nil {
return errors.New("litLengths:" + err.Error())
}
@@ -80,8 +87,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
if err := s.matchLengths.init(br); err != nil {
return errors.New("matchLengths:" + err.Error())
}
- s.literals = literals
- s.hist = hist.b
+ s.br = br
s.prevOffset = hist.recentOffsets
s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
s.windowSize = hist.windowSize
@@ -93,12 +99,142 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
return nil
}
+func (s *sequenceDecs) freeDecoders() {
+ if f := s.litLengths.fse; f != nil && !f.preDefined {
+ fseDecoderPool.Put(f)
+ s.litLengths.fse = nil
+ }
+ if f := s.offsets.fse; f != nil && !f.preDefined {
+ fseDecoderPool.Put(f)
+ s.offsets.fse = nil
+ }
+ if f := s.matchLengths.fse; f != nil && !f.preDefined {
+ fseDecoderPool.Put(f)
+ s.matchLengths.fse = nil
+ }
+}
+
+// execute will execute the decoded sequence with the provided history.
+// The sequence must be evaluated before being sent.
+func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
+ if len(s.dict) == 0 {
+ return s.executeSimple(seqs, hist)
+ }
+
+ // Ensure we have enough output size...
+ if len(s.out)+s.seqSize > cap(s.out) {
+ addBytes := s.seqSize + len(s.out)
+ s.out = append(s.out, make([]byte, addBytes)...)
+ s.out = s.out[:len(s.out)-addBytes]
+ }
+
+ if debugDecoder {
+ printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize)
+ }
+
+ var t = len(s.out)
+ out := s.out[:t+s.seqSize]
+
+ for _, seq := range seqs {
+ // Add literals
+ copy(out[t:], s.literals[:seq.ll])
+ t += seq.ll
+ s.literals = s.literals[seq.ll:]
+
+ // Copy from dictionary...
+ if seq.mo > t+len(hist) || seq.mo > s.windowSize {
+ if len(s.dict) == 0 {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
+ }
+
+ // we may be in dictionary.
+ dictO := len(s.dict) - (seq.mo - (t + len(hist)))
+ if dictO < 0 || dictO >= len(s.dict) {
+ return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict))
+ }
+ end := dictO + seq.ml
+ if end > len(s.dict) {
+ n := len(s.dict) - dictO
+ copy(out[t:], s.dict[dictO:])
+ t += n
+ seq.ml -= n
+ } else {
+ copy(out[t:], s.dict[dictO:end])
+ t += end - dictO
+ continue
+ }
+ }
+
+ // Copy from history.
+ if v := seq.mo - t; v > 0 {
+ // v is the start position in history from end.
+ start := len(hist) - v
+ if seq.ml > v {
+ // Some goes into current block.
+ // Copy remainder of history
+ copy(out[t:], hist[start:])
+ t += v
+ seq.ml -= v
+ } else {
+ copy(out[t:], hist[start:start+seq.ml])
+ t += seq.ml
+ continue
+ }
+ }
+ // We must be in current buffer now
+ if seq.ml > 0 {
+ start := t - seq.mo
+ if seq.ml <= t-start {
+ // No overlap
+ copy(out[t:], out[start:start+seq.ml])
+ t += seq.ml
+ continue
+ } else {
+ // Overlapping copy
+ // Extend destination slice and copy one byte at the time.
+ src := out[start : start+seq.ml]
+ dst := out[t:]
+ dst = dst[:len(src)]
+ t += len(src)
+ // Destination is the space we just added.
+ for i := range src {
+ dst[i] = src[i]
+ }
+ }
+ }
+ }
+
+ // Add final literals
+ copy(out[t:], s.literals)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
+ }
+ }
+ s.out = out
+
+ return nil
+}
+
// decode sequences from the stream with the provided history.
-func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
+func (s *sequenceDecs) decodeSync(hist []byte) error {
+ supported, err := s.decodeSyncSimple(hist)
+ if supported {
+ return err
+ }
+
+ br := s.br
+ seqs := s.nSeqs
startSize := len(s.out)
// Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+ out := s.out
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
for i := seqs - 1; i >= 0; i-- {
if br.overread() {
@@ -151,7 +287,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
- println("temp was 0")
+ println("WARNING: temp was 0")
temp = 1
}
@@ -176,51 +312,52 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
if ll > len(s.literals) {
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals))
}
- size := ll + ml + len(s.out)
+ size := ll + ml + len(out)
if size-startSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size", size)
+ if size-startSize == 424242 {
+ panic("here")
+ }
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
- if size > cap(s.out) {
+ if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions
// but could be if destination slice is too small for sync operations.
// over-allocating here can create a large amount of GC pressure so we try to keep
// it as contained as possible
- used := len(s.out) - startSize
+ used := len(out) - startSize
addBytes := 256 + ll + ml + used>>2
// Clamp to max block size.
if used+addBytes > maxBlockSize {
addBytes = maxBlockSize - used
}
- s.out = append(s.out, make([]byte, addBytes)...)
- s.out = s.out[:len(s.out)-addBytes]
+ out = append(out, make([]byte, addBytes)...)
+ out = out[:len(out)-addBytes]
}
if ml > maxMatchLen {
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
}
// Add literals
- s.out = append(s.out, s.literals[:ll]...)
+ out = append(out, s.literals[:ll]...)
s.literals = s.literals[ll:]
- out := s.out
if mo == 0 && ml > 0 {
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
}
- if mo > len(s.out)+len(hist) || mo > s.windowSize {
+ if mo > len(out)+len(hist) || mo > s.windowSize {
if len(s.dict) == 0 {
- return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
}
// we may be in dictionary.
- dictO := len(s.dict) - (mo - (len(s.out) + len(hist)))
+ dictO := len(s.dict) - (mo - (len(out) + len(hist)))
if dictO < 0 || dictO >= len(s.dict) {
- return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
}
end := dictO + ml
if end > len(s.dict) {
out = append(out, s.dict[dictO:]...)
- mo -= len(s.dict) - dictO
ml -= len(s.dict) - dictO
} else {
out = append(out, s.dict[dictO:end]...)
@@ -231,26 +368,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
// Copy from history.
// TODO: Blocks without history could be made to ignore this completely.
- if v := mo - len(s.out); v > 0 {
+ if v := mo - len(out); v > 0 {
// v is the start position in history from end.
- start := len(s.hist) - v
+ start := len(hist) - v
if ml > v {
// Some goes into current block.
// Copy remainder of history
- out = append(out, s.hist[start:]...)
- mo -= v
+ out = append(out, hist[start:]...)
ml -= v
} else {
- out = append(out, s.hist[start:start+ml]...)
+ out = append(out, hist[start:start+ml]...)
ml = 0
}
}
// We must be in current buffer now
if ml > 0 {
- start := len(s.out) - mo
- if ml <= len(s.out)-start {
+ start := len(out) - mo
+ if ml <= len(out)-start {
// No overlap
- out = append(out, s.out[start:start+ml]...)
+ out = append(out, out[start:start+ml]...)
} else {
// Overlapping copy
// Extend destination slice and copy one byte at the time.
@@ -264,7 +400,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
}
}
}
- s.out = out
if i == 0 {
// This is the last sequence, so we shouldn't update state.
break
@@ -278,7 +413,8 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
mlState = mlTable[mlState.newState()&maxTableMask]
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
- bits := br.getBitsFast(nBits)
+ bits := br.get32BitsFast(nBits)
+
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
@@ -291,19 +427,14 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
}
}
- // Add final literals
- s.out = append(s.out, s.literals...)
- return nil
-}
+ // Check if space for literals
+ if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
-// update states, at least 27 bits must be available.
-func (s *sequenceDecs) update(br *bitReader) {
- // Max 8 bits
- s.litLengths.state.next(br)
- // Max 9 bits
- s.matchLengths.state.next(br)
- // Max 8 bits
- s.offsets.state.next(br)
+ // Add final literals
+ s.out = append(out, s.literals...)
+ return br.close()
}
var bitMask [16]uint16
@@ -314,87 +445,6 @@ func init() {
}
}
-// update states, at least 27 bits must be available.
-func (s *sequenceDecs) updateAlt(br *bitReader) {
- // Update all 3 states at once. Approx 20% faster.
- a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
-
- nBits := a.nbBits() + b.nbBits() + c.nbBits()
- if nBits == 0 {
- s.litLengths.state.state = s.litLengths.state.dt[a.newState()]
- s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()]
- s.offsets.state.state = s.offsets.state.dt[c.newState()]
- return
- }
- bits := br.getBitsFast(nBits)
- lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
- s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
-
- lowBits = uint16(bits >> (c.nbBits() & 31))
- lowBits &= bitMask[b.nbBits()&15]
- s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits]
-
- lowBits = uint16(bits) & bitMask[c.nbBits()&15]
- s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits]
-}
-
-// nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
-func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
- // Final will not read from stream.
- ll, llB := llState.final()
- ml, mlB := mlState.final()
- mo, moB := ofState.final()
-
- // extra bits are stored in reverse order.
- br.fillFast()
- mo += br.getBits(moB)
- if s.maxBits > 32 {
- br.fillFast()
- }
- ml += br.getBits(mlB)
- ll += br.getBits(llB)
-
- if moB > 1 {
- s.prevOffset[2] = s.prevOffset[1]
- s.prevOffset[1] = s.prevOffset[0]
- s.prevOffset[0] = mo
- return
- }
- // mo = s.adjustOffset(mo, ll, moB)
- // Inlined for rather big speedup
- if ll == 0 {
- // There is an exception though, when current sequence's literals_length = 0.
- // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
- // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
- mo++
- }
-
- if mo == 0 {
- mo = s.prevOffset[0]
- return
- }
- var temp int
- if mo == 3 {
- temp = s.prevOffset[0] - 1
- } else {
- temp = s.prevOffset[mo]
- }
-
- if temp == 0 {
- // 0 is not valid; input is corrupted; force offset to 1
- println("temp was 0")
- temp = 1
- }
-
- if mo != 1 {
- s.prevOffset[2] = s.prevOffset[1]
- }
- s.prevOffset[1] = s.prevOffset[0]
- s.prevOffset[0] = temp
- mo = temp
- return
-}
-
func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
// Final will not read from stream.
ll, llB := llState.final()
@@ -457,36 +507,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int {
s.prevOffset[0] = temp
return temp
}
-
-// mergeHistory will merge history.
-func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) {
- for i := uint(0); i < 3; i++ {
- var sNew, sHist *sequenceDec
- switch i {
- default:
- // same as "case 0":
- sNew = &s.litLengths
- sHist = &hist.litLengths
- case 1:
- sNew = &s.offsets
- sHist = &hist.offsets
- case 2:
- sNew = &s.matchLengths
- sHist = &hist.matchLengths
- }
- if sNew.repeat {
- if sHist.fse == nil {
- return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i)
- }
- continue
- }
- if sNew.fse == nil {
- return nil, fmt.Errorf("sequence stream %d, no fse found", i)
- }
- if sHist.fse != nil && !sHist.fse.preDefined {
- fseDecoderPool.Put(sHist.fse)
- }
- sHist.fse = sNew.fse
- }
- return hist, nil
-}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
new file mode 100644
index 00000000000..191384adfd0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
@@ -0,0 +1,379 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+package zstd
+
+import (
+ "fmt"
+
+ "github.com/klauspost/compress/internal/cpuinfo"
+)
+
+type decodeSyncAsmContext struct {
+ llTable []decSymbol
+ mlTable []decSymbol
+ ofTable []decSymbol
+ llState uint64
+ mlState uint64
+ ofState uint64
+ iteration int
+ litRemain int
+ out []byte
+ outPosition int
+ literals []byte
+ litPosition int
+ history []byte
+ windowSize int
+ ll int // set on error (not for all errors, please refer to _generate/gen.go)
+ ml int // set on error (not for all errors, please refer to _generate/gen.go)
+ mo int // set on error (not for all errors, please refer to _generate/gen.go)
+}
+
+// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
+//
+//go:noescape
+func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
+//
+//go:noescape
+func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
+//
+//go:noescape
+func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// decode sequences from the stream with the provided history but without a dictionary.
+func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
+ if len(s.dict) > 0 {
+ return false, nil
+ }
+ if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize {
+ return false, nil
+ }
+
+ // FIXME: Using unsafe memory copies leads to rare, random crashes
+ // with fuzz testing. It is therefore disabled for now.
+ const useSafe = true
+ /*
+ useSafe := false
+ if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
+ useSafe = true
+ }
+ if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
+ useSafe = true
+ }
+ if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
+ useSafe = true
+ }
+ */
+
+ br := s.br
+
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+
+ ctx := decodeSyncAsmContext{
+ llTable: s.litLengths.fse.dt[:maxTablesize],
+ mlTable: s.matchLengths.fse.dt[:maxTablesize],
+ ofTable: s.offsets.fse.dt[:maxTablesize],
+ llState: uint64(s.litLengths.state.state),
+ mlState: uint64(s.matchLengths.state.state),
+ ofState: uint64(s.offsets.state.state),
+ iteration: s.nSeqs - 1,
+ litRemain: len(s.literals),
+ out: s.out,
+ outPosition: len(s.out),
+ literals: s.literals,
+ windowSize: s.windowSize,
+ history: hist,
+ }
+
+ s.seqSize = 0
+ startSize := len(s.out)
+
+ var errCode int
+ if cpuinfo.HasBMI2() {
+ if useSafe {
+ errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx)
+ }
+ } else {
+ if useSafe {
+ errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx)
+ }
+ }
+ switch errCode {
+ case noError:
+ break
+
+ case errorMatchLenOfsMismatch:
+ return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml)
+
+ case errorMatchLenTooBig:
+ return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml)
+
+ case errorMatchOffTooBig:
+ return true, fmt.Errorf("match offset (%d) bigger than current history (%d)",
+ ctx.mo, ctx.outPosition+len(hist)-startSize)
+
+ case errorNotEnoughLiterals:
+ return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
+ ctx.ll, ctx.litRemain+ctx.ll)
+
+ case errorNotEnoughSpace:
+ size := ctx.outPosition + ctx.ll + ctx.ml
+ if debugDecoder {
+ println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
+ }
+ return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+
+ default:
+ return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
+ }
+
+ s.seqSize += ctx.litRemain
+ if s.seqSize > maxBlockSize {
+ return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ return true, err
+ }
+
+ s.literals = s.literals[ctx.litPosition:]
+ t := ctx.outPosition
+ s.out = s.out[:t]
+
+ // Add final literals
+ s.out = append(s.out, s.literals...)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(s.out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t))
+ }
+ }
+
+ return true, nil
+}
+
+// --------------------------------------------------------------------------------
+
+type decodeAsmContext struct {
+ llTable []decSymbol
+ mlTable []decSymbol
+ ofTable []decSymbol
+ llState uint64
+ mlState uint64
+ ofState uint64
+ iteration int
+ seqs []seqVals
+ litRemain int
+}
+
+const noError = 0
+
+// error reported when mo == 0 && ml > 0
+const errorMatchLenOfsMismatch = 1
+
+// error reported when ml > maxMatchLen
+const errorMatchLenTooBig = 2
+
+// error reported when mo > available history or mo > s.windowSize
+const errorMatchOffTooBig = 3
+
+// error reported when the sum of literal lengths exeeceds the literal buffer size
+const errorNotEnoughLiterals = 4
+
+// error reported when capacity of `out` is too small
+const errorNotEnoughSpace = 5
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
+//go:noescape
+func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
+//go:noescape
+func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// decode sequences from the stream without the provided history.
+func (s *sequenceDecs) decode(seqs []seqVals) error {
+ br := s.br
+
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+
+ ctx := decodeAsmContext{
+ llTable: s.litLengths.fse.dt[:maxTablesize],
+ mlTable: s.matchLengths.fse.dt[:maxTablesize],
+ ofTable: s.offsets.fse.dt[:maxTablesize],
+ llState: uint64(s.litLengths.state.state),
+ mlState: uint64(s.matchLengths.state.state),
+ ofState: uint64(s.offsets.state.state),
+ seqs: seqs,
+ iteration: len(seqs) - 1,
+ litRemain: len(s.literals),
+ }
+
+ s.seqSize = 0
+ lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
+ var errCode int
+ if cpuinfo.HasBMI2() {
+ if lte56bits {
+ errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decode_bmi2(s, br, &ctx)
+ }
+ } else {
+ if lte56bits {
+ errCode = sequenceDecs_decode_56_amd64(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decode_amd64(s, br, &ctx)
+ }
+ }
+ if errCode != 0 {
+ i := len(seqs) - ctx.iteration - 1
+ switch errCode {
+ case errorMatchLenOfsMismatch:
+ ml := ctx.seqs[i].ml
+ return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+
+ case errorMatchLenTooBig:
+ ml := ctx.seqs[i].ml
+ return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
+
+ case errorNotEnoughLiterals:
+ ll := ctx.seqs[i].ll
+ return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
+ }
+
+ return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
+ }
+
+ if ctx.litRemain < 0 {
+ return fmt.Errorf("literal count is too big: total available %d, total requested %d",
+ len(s.literals), len(s.literals)-ctx.litRemain)
+ }
+
+ s.seqSize += ctx.litRemain
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ }
+ return err
+}
+
+// --------------------------------------------------------------------------------
+
+type executeAsmContext struct {
+ seqs []seqVals
+ seqIndex int
+ out []byte
+ history []byte
+ literals []byte
+ outPosition int
+ litPosition int
+ windowSize int
+}
+
+// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm.
+//
+// Returns false if a match offset is too big.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
+
+// Same as above, but with safe memcopies
+//
+//go:noescape
+func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
+
+// executeSimple handles cases when dictionary is not used.
+func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
+ // Ensure we have enough output size...
+ if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) {
+ addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc
+ s.out = append(s.out, make([]byte, addBytes)...)
+ s.out = s.out[:len(s.out)-addBytes]
+ }
+
+ if debugDecoder {
+ printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
+ }
+
+ var t = len(s.out)
+ out := s.out[:t+s.seqSize]
+
+ ctx := executeAsmContext{
+ seqs: seqs,
+ seqIndex: 0,
+ out: out,
+ history: hist,
+ outPosition: t,
+ litPosition: 0,
+ literals: s.literals,
+ windowSize: s.windowSize,
+ }
+ var ok bool
+ if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
+ ok = sequenceDecs_executeSimple_safe_amd64(&ctx)
+ } else {
+ ok = sequenceDecs_executeSimple_amd64(&ctx)
+ }
+ if !ok {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)",
+ seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist))
+ }
+ s.literals = s.literals[ctx.litPosition:]
+ t = ctx.outPosition
+
+ // Add final literals
+ copy(out[t:], s.literals)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
+ }
+ }
+ s.out = out
+
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
new file mode 100644
index 00000000000..52e5703c26c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -0,0 +1,4099 @@
+// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: CMOV
+TEXT ·sequenceDecs_decode_amd64(SB), $8-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ MOVQ 104(AX), R10
+ MOVQ s+0(FP), AX
+ MOVQ 144(AX), R11
+ MOVQ 152(AX), R12
+ MOVQ 160(AX), R13
+
+sequenceDecs_decode_amd64_main_loop:
+ MOVQ (SP), R14
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decode_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R14
+ MOVQ (R14), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decode_amd64_fill_end
+
+sequenceDecs_decode_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decode_amd64_fill_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decode_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R14
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R14), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decode_amd64_fill_byte_by_byte
+
+sequenceDecs_decode_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_amd64_of_update_zero:
+ MOVQ AX, 16(R10)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_amd64_ml_update_zero:
+ MOVQ AX, 8(R10)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ SI, $0x08
+ JL sequenceDecs_decode_amd64_fill_2_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R14
+ MOVQ (R14), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decode_amd64_fill_2_end
+
+sequenceDecs_decode_amd64_fill_2_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decode_amd64_fill_2_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decode_amd64_fill_2_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R14
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R14), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte
+
+sequenceDecs_decode_amd64_fill_2_end:
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_amd64_ll_update_zero:
+ MOVQ AX, (R10)
+
+ // Fill bitreader for state updates
+ MOVQ R14, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R14
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R14
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R14
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decode_amd64_skip_update:
+ // Adjust offset
+ MOVQ 16(R10), CX
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0
+ MOVQ R12, R13
+ MOVQ R11, R12
+ MOVQ CX, R11
+ JMP sequenceDecs_decode_amd64_after_adjust
+
+sequenceDecs_decode_amd64_adjust_offsetB_1_or_0:
+ CMPQ (R10), $0x00000000
+ JNE sequenceDecs_decode_amd64_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_amd64_adjust_offset_nonzero
+
+sequenceDecs_decode_amd64_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero
+ MOVQ R11, CX
+ JMP sequenceDecs_decode_amd64_after_adjust
+
+sequenceDecs_decode_amd64_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_amd64_adjust_zero
+ JEQ sequenceDecs_decode_amd64_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_amd64_adjust_three
+ JMP sequenceDecs_decode_amd64_adjust_two
+
+sequenceDecs_decode_amd64_adjust_zero:
+ MOVQ R11, AX
+ JMP sequenceDecs_decode_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_amd64_adjust_one:
+ MOVQ R12, AX
+ JMP sequenceDecs_decode_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_amd64_adjust_two:
+ MOVQ R13, AX
+ JMP sequenceDecs_decode_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_amd64_adjust_three:
+ LEAQ -1(R11), AX
+
+sequenceDecs_decode_amd64_adjust_test_temp_valid:
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_amd64_adjust_temp_valid
+ MOVQ $0x00000001, AX
+
+sequenceDecs_decode_amd64_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R12, R13
+ MOVQ R11, R12
+ MOVQ AX, R11
+ MOVQ AX, CX
+
+sequenceDecs_decode_amd64_after_adjust:
+ MOVQ CX, 16(R10)
+
+ // Check values
+ MOVQ 8(R10), AX
+ MOVQ (R10), R14
+ LEAQ (AX)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decode_amd64_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_amd64_match_len_ofs_ok:
+ ADDQ $0x18, R10
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decode_amd64_main_loop
+ MOVQ s+0(FP), AX
+ MOVQ R11, 144(AX)
+ MOVQ R12, 152(AX)
+ MOVQ R13, 160(AX)
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_amd64_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_amd64_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: CMOV
+TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ MOVQ 104(AX), R10
+ MOVQ s+0(FP), AX
+ MOVQ 144(AX), R11
+ MOVQ 152(AX), R12
+ MOVQ 160(AX), R13
+
+sequenceDecs_decode_56_amd64_main_loop:
+ MOVQ (SP), R14
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decode_56_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R14
+ MOVQ (R14), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decode_56_amd64_fill_end
+
+sequenceDecs_decode_56_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decode_56_amd64_fill_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decode_56_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R14
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R14), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte
+
+sequenceDecs_decode_56_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_56_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_56_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_56_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_56_amd64_of_update_zero:
+ MOVQ AX, 16(R10)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_56_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_56_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_56_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_56_amd64_ml_update_zero:
+ MOVQ AX, 8(R10)
+
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_56_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_56_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_56_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_56_amd64_ll_update_zero:
+ MOVQ AX, (R10)
+
+ // Fill bitreader for state updates
+ MOVQ R14, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_56_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R14
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R14
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R14
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decode_56_amd64_skip_update:
+ // Adjust offset
+ MOVQ 16(R10), CX
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0
+ MOVQ R12, R13
+ MOVQ R11, R12
+ MOVQ CX, R11
+ JMP sequenceDecs_decode_56_amd64_after_adjust
+
+sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0:
+ CMPQ (R10), $0x00000000
+ JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero
+
+sequenceDecs_decode_56_amd64_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero
+ MOVQ R11, CX
+ JMP sequenceDecs_decode_56_amd64_after_adjust
+
+sequenceDecs_decode_56_amd64_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_56_amd64_adjust_zero
+ JEQ sequenceDecs_decode_56_amd64_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_56_amd64_adjust_three
+ JMP sequenceDecs_decode_56_amd64_adjust_two
+
+sequenceDecs_decode_56_amd64_adjust_zero:
+ MOVQ R11, AX
+ JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_56_amd64_adjust_one:
+ MOVQ R12, AX
+ JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_56_amd64_adjust_two:
+ MOVQ R13, AX
+ JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_56_amd64_adjust_three:
+ LEAQ -1(R11), AX
+
+sequenceDecs_decode_56_amd64_adjust_test_temp_valid:
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid
+ MOVQ $0x00000001, AX
+
+sequenceDecs_decode_56_amd64_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R12, R13
+ MOVQ R11, R12
+ MOVQ AX, R11
+ MOVQ AX, CX
+
+sequenceDecs_decode_56_amd64_after_adjust:
+ MOVQ CX, 16(R10)
+
+ // Check values
+ MOVQ 8(R10), AX
+ MOVQ (R10), R14
+ LEAQ (AX)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decode_56_amd64_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_56_amd64_match_len_ofs_ok:
+ ADDQ $0x18, R10
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decode_56_amd64_main_loop
+ MOVQ s+0(FP), AX
+ MOVQ R11, 144(AX)
+ MOVQ R12, 152(AX)
+ MOVQ R13, 160(AX)
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_56_amd64_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: BMI, BMI2, CMOV
+TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ MOVQ 104(CX), R9
+ MOVQ s+0(FP), CX
+ MOVQ 144(CX), R10
+ MOVQ 152(CX), R11
+ MOVQ 160(CX), R12
+
+sequenceDecs_decode_bmi2_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decode_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R13
+ MOVQ (R13), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decode_bmi2_fill_end
+
+sequenceDecs_decode_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decode_bmi2_fill_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decode_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R13
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R13), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decode_bmi2_fill_byte_by_byte
+
+sequenceDecs_decode_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 16(R9)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 8(R9)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ BX, $0x08
+ JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R13
+ MOVQ (R13), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decode_bmi2_fill_2_end
+
+sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decode_bmi2_fill_2_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decode_bmi2_fill_2_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R13
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R13), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte
+
+sequenceDecs_decode_bmi2_fill_2_end:
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, (R9)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_bmi2_skip_update
+ LEAQ (SI)(DI*1), R14
+ ADDQ R8, R14
+ MOVBQZX R14, R14
+ LEAQ (DX)(R14*1), CX
+ MOVQ AX, R15
+ MOVQ CX, DX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+
+ // Update Offset State
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, DI, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R15, CX
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, SI, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decode_bmi2_skip_update:
+ // Adjust offset
+ MOVQ 16(R9), CX
+ CMPQ R13, $0x01
+ JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0
+ MOVQ R11, R12
+ MOVQ R10, R11
+ MOVQ CX, R10
+ JMP sequenceDecs_decode_bmi2_after_adjust
+
+sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0:
+ CMPQ (R9), $0x00000000
+ JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decode_bmi2_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero
+ MOVQ R10, CX
+ JMP sequenceDecs_decode_bmi2_after_adjust
+
+sequenceDecs_decode_bmi2_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_bmi2_adjust_zero
+ JEQ sequenceDecs_decode_bmi2_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_bmi2_adjust_three
+ JMP sequenceDecs_decode_bmi2_adjust_two
+
+sequenceDecs_decode_bmi2_adjust_zero:
+ MOVQ R10, R13
+ JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_bmi2_adjust_one:
+ MOVQ R11, R13
+ JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_bmi2_adjust_two:
+ MOVQ R12, R13
+ JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_bmi2_adjust_three:
+ LEAQ -1(R10), R13
+
+sequenceDecs_decode_bmi2_adjust_test_temp_valid:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R13
+
+sequenceDecs_decode_bmi2_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R11, R12
+ MOVQ R10, R11
+ MOVQ R13, R10
+ MOVQ R13, CX
+
+sequenceDecs_decode_bmi2_after_adjust:
+ MOVQ CX, 16(R9)
+
+ // Check values
+ MOVQ 8(R9), R13
+ MOVQ (R9), R14
+ LEAQ (R13)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ R13, $0x00020002
+ JA sequenceDecs_decode_bmi2_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_bmi2_match_len_ofs_ok:
+ ADDQ $0x18, R9
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decode_bmi2_main_loop
+ MOVQ s+0(FP), CX
+ MOVQ R10, 144(CX)
+ MOVQ R11, 152(CX)
+ MOVQ R12, 160(CX)
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_bmi2_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: BMI, BMI2, CMOV
+TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ MOVQ 104(CX), R9
+ MOVQ s+0(FP), CX
+ MOVQ 144(CX), R10
+ MOVQ 152(CX), R11
+ MOVQ 160(CX), R12
+
+sequenceDecs_decode_56_bmi2_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R13
+ MOVQ (R13), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decode_56_bmi2_fill_end
+
+sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decode_56_bmi2_fill_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decode_56_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R13
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R13), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte
+
+sequenceDecs_decode_56_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 16(R9)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 8(R9)
+
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, (R9)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_56_bmi2_skip_update
+ LEAQ (SI)(DI*1), R14
+ ADDQ R8, R14
+ MOVBQZX R14, R14
+ LEAQ (DX)(R14*1), CX
+ MOVQ AX, R15
+ MOVQ CX, DX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+
+ // Update Offset State
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, DI, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R15, CX
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, SI, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decode_56_bmi2_skip_update:
+ // Adjust offset
+ MOVQ 16(R9), CX
+ CMPQ R13, $0x01
+ JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0
+ MOVQ R11, R12
+ MOVQ R10, R11
+ MOVQ CX, R10
+ JMP sequenceDecs_decode_56_bmi2_after_adjust
+
+sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0:
+ CMPQ (R9), $0x00000000
+ JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decode_56_bmi2_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero
+ MOVQ R10, CX
+ JMP sequenceDecs_decode_56_bmi2_after_adjust
+
+sequenceDecs_decode_56_bmi2_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_56_bmi2_adjust_zero
+ JEQ sequenceDecs_decode_56_bmi2_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_56_bmi2_adjust_three
+ JMP sequenceDecs_decode_56_bmi2_adjust_two
+
+sequenceDecs_decode_56_bmi2_adjust_zero:
+ MOVQ R10, R13
+ JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_56_bmi2_adjust_one:
+ MOVQ R11, R13
+ JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_56_bmi2_adjust_two:
+ MOVQ R12, R13
+ JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_56_bmi2_adjust_three:
+ LEAQ -1(R10), R13
+
+sequenceDecs_decode_56_bmi2_adjust_test_temp_valid:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R13
+
+sequenceDecs_decode_56_bmi2_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R11, R12
+ MOVQ R10, R11
+ MOVQ R13, R10
+ MOVQ R13, CX
+
+sequenceDecs_decode_56_bmi2_after_adjust:
+ MOVQ CX, 16(R9)
+
+ // Check values
+ MOVQ 8(R9), R13
+ MOVQ (R9), R14
+ LEAQ (R13)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ R13, $0x00020002
+ JA sequenceDecs_decode_56_bmi2_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
+ ADDQ $0x18, R9
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decode_56_bmi2_main_loop
+ MOVQ s+0(FP), CX
+ MOVQ R10, 144(CX)
+ MOVQ R11, 152(CX)
+ MOVQ R12, 160(CX)
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_56_bmi2_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
+// Requires: SSE
+TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
+ MOVQ ctx+0(FP), R10
+ MOVQ 8(R10), CX
+ TESTQ CX, CX
+ JZ empty_seqs
+ MOVQ (R10), AX
+ MOVQ 24(R10), DX
+ MOVQ 32(R10), BX
+ MOVQ 80(R10), SI
+ MOVQ 104(R10), DI
+ MOVQ 120(R10), R8
+ MOVQ 56(R10), R9
+ MOVQ 64(R10), R10
+ ADDQ R10, R9
+
+ // seqsBase += 24 * seqIndex
+ LEAQ (DX)(DX*2), R11
+ SHLQ $0x03, R11
+ ADDQ R11, AX
+
+ // outBase += outPosition
+ ADDQ DI, BX
+
+main_loop:
+ MOVQ (AX), R11
+ MOVQ 16(AX), R12
+ MOVQ 8(AX), R13
+
+ // Copy literals
+ TESTQ R11, R11
+ JZ check_offset
+ XORQ R14, R14
+
+copy_1:
+ MOVUPS (SI)(R14*1), X0
+ MOVUPS X0, (BX)(R14*1)
+ ADDQ $0x10, R14
+ CMPQ R14, R11
+ JB copy_1
+ ADDQ R11, SI
+ ADDQ R11, BX
+ ADDQ R11, DI
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ LEAQ (DI)(R10*1), R11
+ CMPQ R12, R11
+ JG error_match_off_too_big
+ CMPQ R12, R8
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, R11
+ SUBQ DI, R11
+ JLS copy_match
+ MOVQ R9, R14
+ SUBQ R11, R14
+ CMPQ R13, R11
+ JG copy_all_from_history
+ MOVQ R13, R11
+ SUBQ $0x10, R11
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R11
+ JAE copy_4_loop
+ LEAQ 16(R14)(R11*1), R14
+ LEAQ 16(BX)(R11*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), R11
+ MOVB 2(R14), R12
+ MOVW R11, (BX)
+ MOVB R12, 2(BX)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), R11
+ MOVL -4(R14)(R13*1), R12
+ MOVL R11, (BX)
+ MOVL R12, -4(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), R11
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ R11, (BX)
+ MOVQ R12, -8(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+
+copy_4_end:
+ ADDQ R13, DI
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ R11, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(BX)(R15*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ R11, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ R11, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(R11*1), BP
+ MOVB R15, (BX)
+ MOVB BP, -1(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (BX)
+ MOVB BP, 2(BX)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(R11*1), BP
+ MOVL R15, (BX)
+ MOVL BP, -4(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(R11*1), BP
+ MOVQ R15, (BX)
+ MOVQ BP, -8(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+
+copy_5_end:
+ ADDQ R11, DI
+ SUBQ R11, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ BX, R11
+ SUBQ R12, R11
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, DI
+ MOVQ BX, R12
+ ADDQ R13, BX
+
+copy_2:
+ MOVUPS (R11), X0
+ MOVUPS X0, (R12)
+ ADDQ $0x10, R11
+ ADDQ $0x10, R12
+ SUBQ $0x10, R13
+ JHI copy_2
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, DI
+
+copy_slow_3:
+ MOVB (R11), R12
+ MOVB R12, (BX)
+ INCQ R11
+ INCQ BX
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+
+loop_finished:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ MOVQ 80(AX), CX
+ SUBQ CX, SI
+ MOVQ SI, 112(AX)
+ RET
+
+error_match_off_too_big:
+ // Return value
+ MOVB $0x00, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ MOVQ 80(AX), CX
+ SUBQ CX, SI
+ MOVQ SI, 112(AX)
+ RET
+
+empty_seqs:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+ RET
+
+// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
+// Requires: SSE
+TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9
+ MOVQ ctx+0(FP), R10
+ MOVQ 8(R10), CX
+ TESTQ CX, CX
+ JZ empty_seqs
+ MOVQ (R10), AX
+ MOVQ 24(R10), DX
+ MOVQ 32(R10), BX
+ MOVQ 80(R10), SI
+ MOVQ 104(R10), DI
+ MOVQ 120(R10), R8
+ MOVQ 56(R10), R9
+ MOVQ 64(R10), R10
+ ADDQ R10, R9
+
+ // seqsBase += 24 * seqIndex
+ LEAQ (DX)(DX*2), R11
+ SHLQ $0x03, R11
+ ADDQ R11, AX
+
+ // outBase += outPosition
+ ADDQ DI, BX
+
+main_loop:
+ MOVQ (AX), R11
+ MOVQ 16(AX), R12
+ MOVQ 8(AX), R13
+
+ // Copy literals
+ TESTQ R11, R11
+ JZ check_offset
+ MOVQ R11, R14
+ SUBQ $0x10, R14
+ JB copy_1_small
+
+copy_1_loop:
+ MOVUPS (SI), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, SI
+ ADDQ $0x10, BX
+ SUBQ $0x10, R14
+ JAE copy_1_loop
+ LEAQ 16(SI)(R14*1), SI
+ LEAQ 16(BX)(R14*1), BX
+ MOVUPS -16(SI), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_1_end
+
+copy_1_small:
+ CMPQ R11, $0x03
+ JE copy_1_move_3
+ JB copy_1_move_1or2
+ CMPQ R11, $0x08
+ JB copy_1_move_4through7
+ JMP copy_1_move_8through16
+
+copy_1_move_1or2:
+ MOVB (SI), R14
+ MOVB -1(SI)(R11*1), R15
+ MOVB R14, (BX)
+ MOVB R15, -1(BX)(R11*1)
+ ADDQ R11, SI
+ ADDQ R11, BX
+ JMP copy_1_end
+
+copy_1_move_3:
+ MOVW (SI), R14
+ MOVB 2(SI), R15
+ MOVW R14, (BX)
+ MOVB R15, 2(BX)
+ ADDQ R11, SI
+ ADDQ R11, BX
+ JMP copy_1_end
+
+copy_1_move_4through7:
+ MOVL (SI), R14
+ MOVL -4(SI)(R11*1), R15
+ MOVL R14, (BX)
+ MOVL R15, -4(BX)(R11*1)
+ ADDQ R11, SI
+ ADDQ R11, BX
+ JMP copy_1_end
+
+copy_1_move_8through16:
+ MOVQ (SI), R14
+ MOVQ -8(SI)(R11*1), R15
+ MOVQ R14, (BX)
+ MOVQ R15, -8(BX)(R11*1)
+ ADDQ R11, SI
+ ADDQ R11, BX
+
+copy_1_end:
+ ADDQ R11, DI
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ LEAQ (DI)(R10*1), R11
+ CMPQ R12, R11
+ JG error_match_off_too_big
+ CMPQ R12, R8
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, R11
+ SUBQ DI, R11
+ JLS copy_match
+ MOVQ R9, R14
+ SUBQ R11, R14
+ CMPQ R13, R11
+ JG copy_all_from_history
+ MOVQ R13, R11
+ SUBQ $0x10, R11
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R11
+ JAE copy_4_loop
+ LEAQ 16(R14)(R11*1), R14
+ LEAQ 16(BX)(R11*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), R11
+ MOVB 2(R14), R12
+ MOVW R11, (BX)
+ MOVB R12, 2(BX)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), R11
+ MOVL -4(R14)(R13*1), R12
+ MOVL R11, (BX)
+ MOVL R12, -4(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), R11
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ R11, (BX)
+ MOVQ R12, -8(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+
+copy_4_end:
+ ADDQ R13, DI
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ R11, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(BX)(R15*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ R11, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ R11, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(R11*1), BP
+ MOVB R15, (BX)
+ MOVB BP, -1(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (BX)
+ MOVB BP, 2(BX)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(R11*1), BP
+ MOVL R15, (BX)
+ MOVL BP, -4(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(R11*1), BP
+ MOVQ R15, (BX)
+ MOVQ BP, -8(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+
+copy_5_end:
+ ADDQ R11, DI
+ SUBQ R11, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ BX, R11
+ SUBQ R12, R11
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, DI
+ MOVQ R13, R12
+ SUBQ $0x10, R12
+ JB copy_2_small
+
+copy_2_loop:
+ MOVUPS (R11), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R11
+ ADDQ $0x10, BX
+ SUBQ $0x10, R12
+ JAE copy_2_loop
+ LEAQ 16(R11)(R12*1), R11
+ LEAQ 16(BX)(R12*1), BX
+ MOVUPS -16(R11), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_2_end
+
+copy_2_small:
+ CMPQ R13, $0x03
+ JE copy_2_move_3
+ JB copy_2_move_1or2
+ CMPQ R13, $0x08
+ JB copy_2_move_4through7
+ JMP copy_2_move_8through16
+
+copy_2_move_1or2:
+ MOVB (R11), R12
+ MOVB -1(R11)(R13*1), R14
+ MOVB R12, (BX)
+ MOVB R14, -1(BX)(R13*1)
+ ADDQ R13, R11
+ ADDQ R13, BX
+ JMP copy_2_end
+
+copy_2_move_3:
+ MOVW (R11), R12
+ MOVB 2(R11), R14
+ MOVW R12, (BX)
+ MOVB R14, 2(BX)
+ ADDQ R13, R11
+ ADDQ R13, BX
+ JMP copy_2_end
+
+copy_2_move_4through7:
+ MOVL (R11), R12
+ MOVL -4(R11)(R13*1), R14
+ MOVL R12, (BX)
+ MOVL R14, -4(BX)(R13*1)
+ ADDQ R13, R11
+ ADDQ R13, BX
+ JMP copy_2_end
+
+copy_2_move_8through16:
+ MOVQ (R11), R12
+ MOVQ -8(R11)(R13*1), R14
+ MOVQ R12, (BX)
+ MOVQ R14, -8(BX)(R13*1)
+ ADDQ R13, R11
+ ADDQ R13, BX
+
+copy_2_end:
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, DI
+
+copy_slow_3:
+ MOVB (R11), R12
+ MOVB R12, (BX)
+ INCQ R11
+ INCQ BX
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+
+loop_finished:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ MOVQ 80(AX), CX
+ SUBQ CX, SI
+ MOVQ SI, 112(AX)
+ RET
+
+error_match_off_too_big:
+ // Return value
+ MOVB $0x00, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ MOVQ 80(AX), CX
+ SUBQ CX, SI
+ MOVQ SI, 112(AX)
+ RET
+
+empty_seqs:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+ RET
+
+// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ XORQ CX, CX
+ MOVQ CX, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ CX, 24(SP)
+ MOVQ 112(AX), R10
+ MOVQ 128(AX), CX
+ MOVQ CX, 32(SP)
+ MOVQ 144(AX), R11
+ MOVQ 136(AX), R12
+ MOVQ 200(AX), CX
+ MOVQ CX, 56(SP)
+ MOVQ 176(AX), CX
+ MOVQ CX, 48(SP)
+ MOVQ 184(AX), AX
+ MOVQ AX, 40(SP)
+ MOVQ 40(SP), AX
+ ADDQ AX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R10, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R12, R10
+
+sequenceDecs_decodeSync_amd64_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_amd64_fill_end
+
+sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_amd64_fill_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte
+
+sequenceDecs_decodeSync_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_amd64_of_update_zero:
+ MOVQ AX, 8(SP)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_amd64_ml_update_zero:
+ MOVQ AX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_amd64_fill_2_end
+
+sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_amd64_fill_2_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_amd64_fill_2_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_amd64_fill_2_end:
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_amd64_ll_update_zero:
+ MOVQ AX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R13
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R13
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R13
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decodeSync_amd64_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_amd64_after_adjust
+
+sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_amd64_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_amd64_after_adjust
+
+sequenceDecs_decodeSync_amd64_adjust_offset_nonzero:
+ MOVQ R13, AX
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, AX
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(AX*8), R14
+ JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_amd64_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_amd64_adjust_skip
+ MOVQ 152(CX), AX
+ MOVQ AX, 160(CX)
+
+sequenceDecs_decodeSync_amd64_adjust_skip:
+ MOVQ 144(CX), AX
+ MOVQ AX, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_amd64_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), AX
+ MOVQ 24(SP), CX
+ LEAQ (AX)(CX*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ CX, 104(R14)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decodeSync_amd64_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_amd64_match_len_ofs_ok:
+ MOVQ 24(SP), AX
+ MOVQ 8(SP), CX
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (AX)(R13*1), R14
+ ADDQ R10, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ AX, AX
+ JZ check_offset
+ XORQ R14, R14
+
+copy_1:
+ MOVUPS (R11)(R14*1), X0
+ MOVUPS X0, (R10)(R14*1)
+ ADDQ $0x10, R14
+ CMPQ R14, AX
+ JB copy_1
+ ADDQ AX, R11
+ ADDQ AX, R10
+ ADDQ AX, R12
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R12, AX
+ ADDQ 40(SP), AX
+ CMPQ CX, AX
+ JG error_match_off_too_big
+ CMPQ CX, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ CX, AX
+ SUBQ R12, AX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ AX, R14
+ CMPQ R13, AX
+ JG copy_all_from_history
+ MOVQ R13, AX
+ SUBQ $0x10, AX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, AX
+ JAE copy_4_loop
+ LEAQ 16(R14)(AX*1), R14
+ LEAQ 16(R10)(AX*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), AX
+ MOVB 2(R14), CL
+ MOVW AX, (R10)
+ MOVB CL, 2(R10)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), AX
+ MOVL -4(R14)(R13*1), CX
+ MOVL AX, (R10)
+ MOVL CX, -4(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), AX
+ MOVQ -8(R14)(R13*1), CX
+ MOVQ AX, (R10)
+ MOVQ CX, -8(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+
+copy_4_end:
+ ADDQ R13, R12
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ AX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R10)(R15*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ AX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ AX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(AX*1), BP
+ MOVB R15, (R10)
+ MOVB BP, -1(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R10)
+ MOVB BP, 2(R10)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(AX*1), BP
+ MOVL R15, (R10)
+ MOVL BP, -4(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(AX*1), BP
+ MOVQ R15, (R10)
+ MOVQ BP, -8(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+
+copy_5_end:
+ ADDQ AX, R12
+ SUBQ AX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R10, AX
+ SUBQ CX, AX
+
+ // ml <= mo
+ CMPQ R13, CX
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R12
+ MOVQ R10, CX
+ ADDQ R13, R10
+
+copy_2:
+ MOVUPS (AX), X0
+ MOVUPS X0, (CX)
+ ADDQ $0x10, AX
+ ADDQ $0x10, CX
+ SUBQ $0x10, R13
+ JHI copy_2
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R12
+
+copy_slow_3:
+ MOVB (AX), CL
+ MOVB CL, (R10)
+ INCQ AX
+ INCQ R10
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decodeSync_amd64_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R12, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R11
+ MOVQ R11, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_amd64_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: BMI, BMI2, CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ XORQ R9, R9
+ MOVQ R9, 8(SP)
+ MOVQ R9, 16(SP)
+ MOVQ R9, 24(SP)
+ MOVQ 112(CX), R9
+ MOVQ 128(CX), R10
+ MOVQ R10, 32(SP)
+ MOVQ 144(CX), R10
+ MOVQ 136(CX), R11
+ MOVQ 200(CX), R12
+ MOVQ R12, 56(SP)
+ MOVQ 176(CX), R12
+ MOVQ R12, 48(SP)
+ MOVQ 184(CX), CX
+ MOVQ CX, 40(SP)
+ MOVQ 40(SP), CX
+ ADDQ CX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R9, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R11, R9
+
+sequenceDecs_decodeSync_bmi2_main_loop:
+ MOVQ (SP), R12
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_bmi2_fill_end
+
+sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_bmi2_fill_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
+
+sequenceDecs_decodeSync_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 8(SP)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_bmi2_fill_2_end
+
+sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_bmi2_fill_2_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_bmi2_fill_2_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_bmi2_fill_2_end:
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R12, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R12
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_bmi2_skip_update
+ LEAQ (SI)(DI*1), R13
+ ADDQ R8, R13
+ MOVBQZX R13, R13
+ LEAQ (DX)(R13*1), CX
+ MOVQ AX, R14
+ MOVQ CX, DX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+
+ // Update Offset State
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, DI, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R14, CX
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, SI, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decodeSync_bmi2_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ R12, $0x01
+ JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_bmi2_after_adjust
+
+sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_bmi2_after_adjust
+
+sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero:
+ MOVQ R13, R12
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, R12
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(R12*8), R14
+ JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_bmi2_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_bmi2_adjust_skip
+ MOVQ 152(CX), R12
+ MOVQ R12, 160(CX)
+
+sequenceDecs_decodeSync_bmi2_adjust_skip:
+ MOVQ 144(CX), R12
+ MOVQ R12, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_bmi2_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), CX
+ MOVQ 24(SP), R12
+ LEAQ (CX)(R12*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ R12, 104(R14)
+ JS error_not_enough_literals
+ CMPQ CX, $0x00020002
+ JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok
+ TESTQ CX, CX
+ JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_bmi2_match_len_ofs_ok:
+ MOVQ 24(SP), CX
+ MOVQ 8(SP), R12
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (CX)(R13*1), R14
+ ADDQ R9, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ CX, CX
+ JZ check_offset
+ XORQ R14, R14
+
+copy_1:
+ MOVUPS (R10)(R14*1), X0
+ MOVUPS X0, (R9)(R14*1)
+ ADDQ $0x10, R14
+ CMPQ R14, CX
+ JB copy_1
+ ADDQ CX, R10
+ ADDQ CX, R9
+ ADDQ CX, R11
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R11, CX
+ ADDQ 40(SP), CX
+ CMPQ R12, CX
+ JG error_match_off_too_big
+ CMPQ R12, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, CX
+ SUBQ R11, CX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ CX, R14
+ CMPQ R13, CX
+ JG copy_all_from_history
+ MOVQ R13, CX
+ SUBQ $0x10, CX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, CX
+ JAE copy_4_loop
+ LEAQ 16(R14)(CX*1), R14
+ LEAQ 16(R9)(CX*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), CX
+ MOVB 2(R14), R12
+ MOVW CX, (R9)
+ MOVB R12, 2(R9)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), CX
+ MOVL -4(R14)(R13*1), R12
+ MOVL CX, (R9)
+ MOVL R12, -4(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), CX
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ CX, (R9)
+ MOVQ R12, -8(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+
+copy_4_end:
+ ADDQ R13, R11
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ CX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R9)(R15*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ CX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ CX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(CX*1), BP
+ MOVB R15, (R9)
+ MOVB BP, -1(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R9)
+ MOVB BP, 2(R9)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(CX*1), BP
+ MOVL R15, (R9)
+ MOVL BP, -4(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(CX*1), BP
+ MOVQ R15, (R9)
+ MOVQ BP, -8(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+
+copy_5_end:
+ ADDQ CX, R11
+ SUBQ CX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R9, CX
+ SUBQ R12, CX
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R11
+ MOVQ R9, R12
+ ADDQ R13, R9
+
+copy_2:
+ MOVUPS (CX), X0
+ MOVUPS X0, (R12)
+ ADDQ $0x10, CX
+ ADDQ $0x10, R12
+ SUBQ $0x10, R13
+ JHI copy_2
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R11
+
+copy_slow_3:
+ MOVB (CX), R12
+ MOVB R12, (R9)
+ INCQ CX
+ INCQ R9
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decodeSync_bmi2_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R11, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R10
+ MOVQ R10, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_bmi2_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ XORQ CX, CX
+ MOVQ CX, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ CX, 24(SP)
+ MOVQ 112(AX), R10
+ MOVQ 128(AX), CX
+ MOVQ CX, 32(SP)
+ MOVQ 144(AX), R11
+ MOVQ 136(AX), R12
+ MOVQ 200(AX), CX
+ MOVQ CX, 56(SP)
+ MOVQ 176(AX), CX
+ MOVQ CX, 48(SP)
+ MOVQ 184(AX), AX
+ MOVQ AX, 40(SP)
+ MOVQ 40(SP), AX
+ ADDQ AX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R10, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R12, R10
+
+sequenceDecs_decodeSync_safe_amd64_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_end
+
+sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
+
+sequenceDecs_decodeSync_safe_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_safe_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_safe_amd64_of_update_zero:
+ MOVQ AX, 8(SP)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_safe_amd64_ml_update_zero:
+ MOVQ AX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end
+
+sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_safe_amd64_fill_2_end:
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_safe_amd64_ll_update_zero:
+ MOVQ AX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_safe_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R13
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R13
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R13
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decodeSync_safe_amd64_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_safe_amd64_after_adjust
+
+sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_safe_amd64_after_adjust
+
+sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero:
+ MOVQ R13, AX
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, AX
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(AX*8), R14
+ JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip
+ MOVQ 152(CX), AX
+ MOVQ AX, 160(CX)
+
+sequenceDecs_decodeSync_safe_amd64_adjust_skip:
+ MOVQ 144(CX), AX
+ MOVQ AX, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_safe_amd64_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), AX
+ MOVQ 24(SP), CX
+ LEAQ (AX)(CX*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ CX, 104(R14)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok:
+ MOVQ 24(SP), AX
+ MOVQ 8(SP), CX
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (AX)(R13*1), R14
+ ADDQ R10, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ AX, AX
+ JZ check_offset
+ MOVQ AX, R14
+ SUBQ $0x10, R14
+ JB copy_1_small
+
+copy_1_loop:
+ MOVUPS (R11), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R11
+ ADDQ $0x10, R10
+ SUBQ $0x10, R14
+ JAE copy_1_loop
+ LEAQ 16(R11)(R14*1), R11
+ LEAQ 16(R10)(R14*1), R10
+ MOVUPS -16(R11), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_1_end
+
+copy_1_small:
+ CMPQ AX, $0x03
+ JE copy_1_move_3
+ JB copy_1_move_1or2
+ CMPQ AX, $0x08
+ JB copy_1_move_4through7
+ JMP copy_1_move_8through16
+
+copy_1_move_1or2:
+ MOVB (R11), R14
+ MOVB -1(R11)(AX*1), R15
+ MOVB R14, (R10)
+ MOVB R15, -1(R10)(AX*1)
+ ADDQ AX, R11
+ ADDQ AX, R10
+ JMP copy_1_end
+
+copy_1_move_3:
+ MOVW (R11), R14
+ MOVB 2(R11), R15
+ MOVW R14, (R10)
+ MOVB R15, 2(R10)
+ ADDQ AX, R11
+ ADDQ AX, R10
+ JMP copy_1_end
+
+copy_1_move_4through7:
+ MOVL (R11), R14
+ MOVL -4(R11)(AX*1), R15
+ MOVL R14, (R10)
+ MOVL R15, -4(R10)(AX*1)
+ ADDQ AX, R11
+ ADDQ AX, R10
+ JMP copy_1_end
+
+copy_1_move_8through16:
+ MOVQ (R11), R14
+ MOVQ -8(R11)(AX*1), R15
+ MOVQ R14, (R10)
+ MOVQ R15, -8(R10)(AX*1)
+ ADDQ AX, R11
+ ADDQ AX, R10
+
+copy_1_end:
+ ADDQ AX, R12
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R12, AX
+ ADDQ 40(SP), AX
+ CMPQ CX, AX
+ JG error_match_off_too_big
+ CMPQ CX, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ CX, AX
+ SUBQ R12, AX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ AX, R14
+ CMPQ R13, AX
+ JG copy_all_from_history
+ MOVQ R13, AX
+ SUBQ $0x10, AX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, AX
+ JAE copy_4_loop
+ LEAQ 16(R14)(AX*1), R14
+ LEAQ 16(R10)(AX*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), AX
+ MOVB 2(R14), CL
+ MOVW AX, (R10)
+ MOVB CL, 2(R10)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), AX
+ MOVL -4(R14)(R13*1), CX
+ MOVL AX, (R10)
+ MOVL CX, -4(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), AX
+ MOVQ -8(R14)(R13*1), CX
+ MOVQ AX, (R10)
+ MOVQ CX, -8(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+
+copy_4_end:
+ ADDQ R13, R12
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ AX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R10)(R15*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ AX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ AX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(AX*1), BP
+ MOVB R15, (R10)
+ MOVB BP, -1(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R10)
+ MOVB BP, 2(R10)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(AX*1), BP
+ MOVL R15, (R10)
+ MOVL BP, -4(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(AX*1), BP
+ MOVQ R15, (R10)
+ MOVQ BP, -8(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+
+copy_5_end:
+ ADDQ AX, R12
+ SUBQ AX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R10, AX
+ SUBQ CX, AX
+
+ // ml <= mo
+ CMPQ R13, CX
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R12
+ MOVQ R13, CX
+ SUBQ $0x10, CX
+ JB copy_2_small
+
+copy_2_loop:
+ MOVUPS (AX), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, AX
+ ADDQ $0x10, R10
+ SUBQ $0x10, CX
+ JAE copy_2_loop
+ LEAQ 16(AX)(CX*1), AX
+ LEAQ 16(R10)(CX*1), R10
+ MOVUPS -16(AX), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_2_end
+
+copy_2_small:
+ CMPQ R13, $0x03
+ JE copy_2_move_3
+ JB copy_2_move_1or2
+ CMPQ R13, $0x08
+ JB copy_2_move_4through7
+ JMP copy_2_move_8through16
+
+copy_2_move_1or2:
+ MOVB (AX), CL
+ MOVB -1(AX)(R13*1), R14
+ MOVB CL, (R10)
+ MOVB R14, -1(R10)(R13*1)
+ ADDQ R13, AX
+ ADDQ R13, R10
+ JMP copy_2_end
+
+copy_2_move_3:
+ MOVW (AX), CX
+ MOVB 2(AX), R14
+ MOVW CX, (R10)
+ MOVB R14, 2(R10)
+ ADDQ R13, AX
+ ADDQ R13, R10
+ JMP copy_2_end
+
+copy_2_move_4through7:
+ MOVL (AX), CX
+ MOVL -4(AX)(R13*1), R14
+ MOVL CX, (R10)
+ MOVL R14, -4(R10)(R13*1)
+ ADDQ R13, AX
+ ADDQ R13, R10
+ JMP copy_2_end
+
+copy_2_move_8through16:
+ MOVQ (AX), CX
+ MOVQ -8(AX)(R13*1), R14
+ MOVQ CX, (R10)
+ MOVQ R14, -8(R10)(R13*1)
+ ADDQ R13, AX
+ ADDQ R13, R10
+
+copy_2_end:
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R12
+
+copy_slow_3:
+ MOVB (AX), CL
+ MOVB CL, (R10)
+ INCQ AX
+ INCQ R10
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decodeSync_safe_amd64_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R12, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R11
+ MOVQ R11, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: BMI, BMI2, CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ XORQ R9, R9
+ MOVQ R9, 8(SP)
+ MOVQ R9, 16(SP)
+ MOVQ R9, 24(SP)
+ MOVQ 112(CX), R9
+ MOVQ 128(CX), R10
+ MOVQ R10, 32(SP)
+ MOVQ 144(CX), R10
+ MOVQ 136(CX), R11
+ MOVQ 200(CX), R12
+ MOVQ R12, 56(SP)
+ MOVQ 176(CX), R12
+ MOVQ R12, 48(SP)
+ MOVQ 184(CX), CX
+ MOVQ CX, 40(SP)
+ MOVQ 40(SP), CX
+ ADDQ CX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R9, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R11, R9
+
+sequenceDecs_decodeSync_safe_bmi2_main_loop:
+ MOVQ (SP), R12
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_end
+
+sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
+
+sequenceDecs_decodeSync_safe_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 8(SP)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+
+sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R12, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R12
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_safe_bmi2_skip_update
+ LEAQ (SI)(DI*1), R13
+ ADDQ R8, R13
+ MOVBQZX R13, R13
+ LEAQ (DX)(R13*1), CX
+ MOVQ AX, R14
+ MOVQ CX, DX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+
+ // Update Offset State
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, DI, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R14, CX
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, SI, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decodeSync_safe_bmi2_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ R12, $0x01
+ JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero:
+ MOVQ R13, R12
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, R12
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(R12*8), R14
+ JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip
+ MOVQ 152(CX), R12
+ MOVQ R12, 160(CX)
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_skip:
+ MOVQ 144(CX), R12
+ MOVQ R12, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_safe_bmi2_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), CX
+ MOVQ 24(SP), R12
+ LEAQ (CX)(R12*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ R12, 104(R14)
+ JS error_not_enough_literals
+ CMPQ CX, $0x00020002
+ JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok
+ TESTQ CX, CX
+ JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok:
+ MOVQ 24(SP), CX
+ MOVQ 8(SP), R12
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (CX)(R13*1), R14
+ ADDQ R9, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ CX, CX
+ JZ check_offset
+ MOVQ CX, R14
+ SUBQ $0x10, R14
+ JB copy_1_small
+
+copy_1_loop:
+ MOVUPS (R10), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R10
+ ADDQ $0x10, R9
+ SUBQ $0x10, R14
+ JAE copy_1_loop
+ LEAQ 16(R10)(R14*1), R10
+ LEAQ 16(R9)(R14*1), R9
+ MOVUPS -16(R10), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_1_end
+
+copy_1_small:
+ CMPQ CX, $0x03
+ JE copy_1_move_3
+ JB copy_1_move_1or2
+ CMPQ CX, $0x08
+ JB copy_1_move_4through7
+ JMP copy_1_move_8through16
+
+copy_1_move_1or2:
+ MOVB (R10), R14
+ MOVB -1(R10)(CX*1), R15
+ MOVB R14, (R9)
+ MOVB R15, -1(R9)(CX*1)
+ ADDQ CX, R10
+ ADDQ CX, R9
+ JMP copy_1_end
+
+copy_1_move_3:
+ MOVW (R10), R14
+ MOVB 2(R10), R15
+ MOVW R14, (R9)
+ MOVB R15, 2(R9)
+ ADDQ CX, R10
+ ADDQ CX, R9
+ JMP copy_1_end
+
+copy_1_move_4through7:
+ MOVL (R10), R14
+ MOVL -4(R10)(CX*1), R15
+ MOVL R14, (R9)
+ MOVL R15, -4(R9)(CX*1)
+ ADDQ CX, R10
+ ADDQ CX, R9
+ JMP copy_1_end
+
+copy_1_move_8through16:
+ MOVQ (R10), R14
+ MOVQ -8(R10)(CX*1), R15
+ MOVQ R14, (R9)
+ MOVQ R15, -8(R9)(CX*1)
+ ADDQ CX, R10
+ ADDQ CX, R9
+
+copy_1_end:
+ ADDQ CX, R11
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R11, CX
+ ADDQ 40(SP), CX
+ CMPQ R12, CX
+ JG error_match_off_too_big
+ CMPQ R12, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, CX
+ SUBQ R11, CX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ CX, R14
+ CMPQ R13, CX
+ JG copy_all_from_history
+ MOVQ R13, CX
+ SUBQ $0x10, CX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, CX
+ JAE copy_4_loop
+ LEAQ 16(R14)(CX*1), R14
+ LEAQ 16(R9)(CX*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), CX
+ MOVB 2(R14), R12
+ MOVW CX, (R9)
+ MOVB R12, 2(R9)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), CX
+ MOVL -4(R14)(R13*1), R12
+ MOVL CX, (R9)
+ MOVL R12, -4(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), CX
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ CX, (R9)
+ MOVQ R12, -8(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+
+copy_4_end:
+ ADDQ R13, R11
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ CX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R9)(R15*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ CX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ CX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(CX*1), BP
+ MOVB R15, (R9)
+ MOVB BP, -1(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R9)
+ MOVB BP, 2(R9)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(CX*1), BP
+ MOVL R15, (R9)
+ MOVL BP, -4(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(CX*1), BP
+ MOVQ R15, (R9)
+ MOVQ BP, -8(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+
+copy_5_end:
+ ADDQ CX, R11
+ SUBQ CX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R9, CX
+ SUBQ R12, CX
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R11
+ MOVQ R13, R12
+ SUBQ $0x10, R12
+ JB copy_2_small
+
+copy_2_loop:
+ MOVUPS (CX), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, CX
+ ADDQ $0x10, R9
+ SUBQ $0x10, R12
+ JAE copy_2_loop
+ LEAQ 16(CX)(R12*1), CX
+ LEAQ 16(R9)(R12*1), R9
+ MOVUPS -16(CX), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_2_end
+
+copy_2_small:
+ CMPQ R13, $0x03
+ JE copy_2_move_3
+ JB copy_2_move_1or2
+ CMPQ R13, $0x08
+ JB copy_2_move_4through7
+ JMP copy_2_move_8through16
+
+copy_2_move_1or2:
+ MOVB (CX), R12
+ MOVB -1(CX)(R13*1), R14
+ MOVB R12, (R9)
+ MOVB R14, -1(R9)(R13*1)
+ ADDQ R13, CX
+ ADDQ R13, R9
+ JMP copy_2_end
+
+copy_2_move_3:
+ MOVW (CX), R12
+ MOVB 2(CX), R14
+ MOVW R12, (R9)
+ MOVB R14, 2(R9)
+ ADDQ R13, CX
+ ADDQ R13, R9
+ JMP copy_2_end
+
+copy_2_move_4through7:
+ MOVL (CX), R12
+ MOVL -4(CX)(R13*1), R14
+ MOVL R12, (R9)
+ MOVL R14, -4(R9)(R13*1)
+ ADDQ R13, CX
+ ADDQ R13, R9
+ JMP copy_2_end
+
+copy_2_move_8through16:
+ MOVQ (CX), R12
+ MOVQ -8(CX)(R13*1), R14
+ MOVQ R12, (R9)
+ MOVQ R14, -8(R9)(R13*1)
+ ADDQ R13, CX
+ ADDQ R13, R9
+
+copy_2_end:
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R11
+
+copy_slow_3:
+ MOVB (CX), R12
+ MOVB R12, (R9)
+ INCQ CX
+ INCQ R9
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decodeSync_safe_bmi2_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R11, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R10
+ MOVQ R10, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
new file mode 100644
index 00000000000..ac2a80d2911
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
@@ -0,0 +1,237 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+package zstd
+
+import (
+ "fmt"
+ "io"
+)
+
+// decode sequences from the stream with the provided history but without dictionary.
+func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
+ return false, nil
+}
+
+// decode sequences from the stream without the provided history.
+func (s *sequenceDecs) decode(seqs []seqVals) error {
+ br := s.br
+
+ // Grab full sizes tables, to avoid bounds checks.
+ llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
+ llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+ s.seqSize = 0
+ litRemain := len(s.literals)
+
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+ for i := range seqs {
+ var ll, mo, ml int
+ if br.off > 4+((maxOffsetBits+16+16)>>3) {
+ // inlined function:
+ // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
+
+ // Final will not read from stream.
+ var llB, mlB, moB uint8
+ ll, llB = llState.final()
+ ml, mlB = mlState.final()
+ mo, moB = ofState.final()
+
+ // extra bits are stored in reverse order.
+ br.fillFast()
+ mo += br.getBits(moB)
+ if s.maxBits > 32 {
+ br.fillFast()
+ }
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
+
+ if moB > 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = mo
+ } else {
+ // mo = s.adjustOffset(mo, ll, moB)
+ // Inlined for rather big speedup
+ if ll == 0 {
+ // There is an exception though, when current sequence's literals_length = 0.
+ // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+ // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+ mo++
+ }
+
+ if mo == 0 {
+ mo = s.prevOffset[0]
+ } else {
+ var temp int
+ if mo == 3 {
+ temp = s.prevOffset[0] - 1
+ } else {
+ temp = s.prevOffset[mo]
+ }
+
+ if temp == 0 {
+ // 0 is not valid; input is corrupted; force offset to 1
+ println("WARNING: temp was 0")
+ temp = 1
+ }
+
+ if mo != 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ }
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = temp
+ mo = temp
+ }
+ }
+ br.fillFast()
+ } else {
+ if br.overread() {
+ if debugDecoder {
+ printf("reading sequence %d, exceeded available data\n", i)
+ }
+ return io.ErrUnexpectedEOF
+ }
+ ll, mo, ml = s.next(br, llState, mlState, ofState)
+ br.fill()
+ }
+
+ if debugSequences {
+ println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
+ }
+ // Evaluate.
+ // We might be doing this async, so do it early.
+ if mo == 0 && ml > 0 {
+ return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+ }
+ if ml > maxMatchLen {
+ return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
+ }
+ s.seqSize += ll + ml
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ litRemain -= ll
+ if litRemain < 0 {
+ return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
+ }
+ seqs[i] = seqVals{
+ ll: ll,
+ ml: ml,
+ mo: mo,
+ }
+ if i == len(seqs)-1 {
+ // This is the last sequence, so we shouldn't update state.
+ break
+ }
+
+ // Manually inlined, ~ 5-20% faster
+ // Update all 3 states at once. Approx 20% faster.
+ nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
+ if nBits == 0 {
+ llState = llTable[llState.newState()&maxTableMask]
+ mlState = mlTable[mlState.newState()&maxTableMask]
+ ofState = ofTable[ofState.newState()&maxTableMask]
+ } else {
+ bits := br.get32BitsFast(nBits)
+ lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
+ llState = llTable[(llState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits >> (ofState.nbBits() & 31))
+ lowBits &= bitMask[mlState.nbBits()&15]
+ mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
+ ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
+ }
+ }
+ s.seqSize += litRemain
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ }
+ return err
+}
+
+// executeSimple handles cases when a dictionary is not used.
+func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
+ // Ensure we have enough output size...
+ if len(s.out)+s.seqSize > cap(s.out) {
+ addBytes := s.seqSize + len(s.out)
+ s.out = append(s.out, make([]byte, addBytes)...)
+ s.out = s.out[:len(s.out)-addBytes]
+ }
+
+ if debugDecoder {
+ printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
+ }
+
+ var t = len(s.out)
+ out := s.out[:t+s.seqSize]
+
+ for _, seq := range seqs {
+ // Add literals
+ copy(out[t:], s.literals[:seq.ll])
+ t += seq.ll
+ s.literals = s.literals[seq.ll:]
+
+ // Malformed input
+ if seq.mo > t+len(hist) || seq.mo > s.windowSize {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
+ }
+
+ // Copy from history.
+ if v := seq.mo - t; v > 0 {
+ // v is the start position in history from end.
+ start := len(hist) - v
+ if seq.ml > v {
+ // Some goes into the current block.
+ // Copy remainder of history
+ copy(out[t:], hist[start:])
+ t += v
+ seq.ml -= v
+ } else {
+ copy(out[t:], hist[start:start+seq.ml])
+ t += seq.ml
+ continue
+ }
+ }
+
+ // We must be in the current buffer now
+ if seq.ml > 0 {
+ start := t - seq.mo
+ if seq.ml <= t-start {
+ // No overlap
+ copy(out[t:], out[start:start+seq.ml])
+ t += seq.ml
+ } else {
+ // Overlapping copy
+ // Extend destination slice and copy one byte at the time.
+ src := out[start : start+seq.ml]
+ dst := out[t:]
+ dst = dst[:len(src)]
+ t += len(src)
+ // Destination is the space we just added.
+ for i := range src {
+ dst[i] = src[i]
+ }
+ }
+ }
+ }
+ // Add final literals
+ copy(out[t:], s.literals)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
+ }
+ }
+ s.out = out
+
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go
index 967f29b3120..29c15c8c4ef 100644
--- a/vendor/github.com/klauspost/compress/zstd/zip.go
+++ b/vendor/github.com/klauspost/compress/zstd/zip.go
@@ -18,36 +18,58 @@ const ZipMethodWinZip = 93
// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT
const ZipMethodPKWare = 20
-var zipReaderPool sync.Pool
+// zipReaderPool is the default reader pool.
+var zipReaderPool = sync.Pool{New: func() interface{} {
+ z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1))
+ if err != nil {
+ panic(err)
+ }
+ return z
+}}
-// newZipReader cannot be used since we would leak goroutines...
-func newZipReader(r io.Reader) io.ReadCloser {
- dec, ok := zipReaderPool.Get().(*Decoder)
- if ok {
- dec.Reset(r)
- } else {
- d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
- if err != nil {
- panic(err)
+// newZipReader creates a pooled zip decompressor.
+func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser {
+ pool := &zipReaderPool
+ if len(opts) > 0 {
+ opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...)
+ // Force concurrency 1
+ opts = append(opts, WithDecoderConcurrency(1))
+ // Create our own pool
+ pool = &sync.Pool{}
+ }
+ return func(r io.Reader) io.ReadCloser {
+ dec, ok := pool.Get().(*Decoder)
+ if ok {
+ dec.Reset(r)
+ } else {
+ d, err := NewReader(r, opts...)
+ if err != nil {
+ panic(err)
+ }
+ dec = d
}
- dec = d
+ return &pooledZipReader{dec: dec, pool: pool}
}
- return &pooledZipReader{dec: dec}
}
type pooledZipReader struct {
- mu sync.Mutex // guards Close and Read
- dec *Decoder
+ mu sync.Mutex // guards Close and Read
+ pool *sync.Pool
+ dec *Decoder
}
func (r *pooledZipReader) Read(p []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.dec == nil {
- return 0, errors.New("Read after Close")
+ return 0, errors.New("read after close or EOF")
}
dec, err := r.dec.Read(p)
-
+ if err == io.EOF {
+ r.dec.Reset(nil)
+ r.pool.Put(r.dec)
+ r.dec = nil
+ }
return dec, err
}
@@ -57,7 +79,7 @@ func (r *pooledZipReader) Close() error {
var err error
if r.dec != nil {
err = r.dec.Reset(nil)
- zipReaderPool.Put(r.dec)
+ r.pool.Put(r.dec)
r.dec = nil
}
return err
@@ -111,12 +133,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
// See ZipCompressor for example.
-func ZipDecompressor() func(r io.Reader) io.ReadCloser {
- return func(r io.Reader) io.ReadCloser {
- d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
- if err != nil {
- panic(err)
- }
- return d.IOReadCloser()
- }
+// Options can be specified. WithDecoderConcurrency(1) is forced,
+// and by default a 128MB maximum decompression window is specified.
+// The window size can be overridden if required.
+func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser {
+ return newZipReader(opts...)
}
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index ef1d49a009c..3eb3f1c8266 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -39,6 +39,9 @@ const zstdMinMatch = 3
// Reset the buffer offset when reaching this.
const bufferReset = math.MaxInt32 - MaxWindowSize
+// fcsUnknown is used for unknown frame content size.
+const fcsUnknown = math.MaxUint64
+
var (
// ErrReservedBlockType is returned when a reserved block type is found.
// Typically this indicates wrong or corrupted input.
@@ -52,6 +55,10 @@ var (
// Typically returned on invalid input.
ErrBlockTooSmall = errors.New("block too small")
+ // ErrUnexpectedBlockSize is returned when a block has unexpected size.
+ // Typically returned on invalid input.
+ ErrUnexpectedBlockSize = errors.New("unexpected block size")
+
// ErrMagicMismatch is returned when a "magic" number isn't what is expected.
// Typically this indicates wrong or corrupted input.
ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
@@ -75,6 +82,10 @@ var (
// This is only returned if SingleSegment is specified on the frame.
ErrFrameSizeExceeded = errors.New("frame size exceeded")
+ // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size.
+ // This is only returned if SingleSegment is specified on the frame.
+ ErrFrameSizeMismatch = errors.New("frame size does not match size on stream")
+
// ErrCRCMismatch is returned if CRC mismatches.
ErrCRCMismatch = errors.New("CRC check failed")
@@ -99,17 +110,6 @@ func printf(format string, a ...interface{}) {
}
}
-// matchLenFast does matching, but will not match the last up to 7 bytes.
-func matchLenFast(a, b []byte) int {
- endI := len(a) & (math.MaxInt32 - 7)
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- return i + bits.TrailingZeros64(diff)>>3
- }
- }
- return endI
-}
-
// matchLen returns the maximum length.
// a must be the shortest of the two.
// The function also returns whether all bytes matched.
diff --git a/vendor/github.com/letsencrypt/boulder/LICENSE.txt b/vendor/github.com/letsencrypt/boulder/LICENSE.txt
new file mode 100644
index 00000000000..fa274d92d74
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/LICENSE.txt
@@ -0,0 +1,375 @@
+Copyright 2016 ISRG. All rights reserved.
+
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/letsencrypt/boulder/core/challenges.go b/vendor/github.com/letsencrypt/boulder/core/challenges.go
new file mode 100644
index 00000000000..4b4a67c4868
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/core/challenges.go
@@ -0,0 +1,27 @@
+package core
+
+func newChallenge(challengeType AcmeChallenge, token string) Challenge {
+ return Challenge{
+ Type: challengeType,
+ Status: StatusPending,
+ Token: token,
+ }
+}
+
+// HTTPChallenge01 constructs a random http-01 challenge. If token is empty a random token
+// will be generated, otherwise the provided token is used.
+func HTTPChallenge01(token string) Challenge {
+ return newChallenge(ChallengeTypeHTTP01, token)
+}
+
+// DNSChallenge01 constructs a random dns-01 challenge. If token is empty a random token
+// will be generated, otherwise the provided token is used.
+func DNSChallenge01(token string) Challenge {
+ return newChallenge(ChallengeTypeDNS01, token)
+}
+
+// TLSALPNChallenge01 constructs a random tls-alpn-01 challenge. If token is empty a random token
+// will be generated, otherwise the provided token is used.
+func TLSALPNChallenge01(token string) Challenge {
+ return newChallenge(ChallengeTypeTLSALPN01, token)
+}
diff --git a/vendor/github.com/letsencrypt/boulder/core/interfaces.go b/vendor/github.com/letsencrypt/boulder/core/interfaces.go
new file mode 100644
index 00000000000..85cdc9a49bc
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/core/interfaces.go
@@ -0,0 +1,14 @@
+package core
+
+import (
+ "github.com/letsencrypt/boulder/identifier"
+)
+
+// PolicyAuthority defines the public interface for the Boulder PA
+// TODO(#5891): Move this interface to a more appropriate location.
+type PolicyAuthority interface {
+ WillingToIssue(domain identifier.ACMEIdentifier) error
+ WillingToIssueWildcards(identifiers []identifier.ACMEIdentifier) error
+ ChallengesFor(domain identifier.ACMEIdentifier) ([]Challenge, error)
+ ChallengeTypeEnabled(t AcmeChallenge) bool
+}
diff --git a/vendor/github.com/letsencrypt/boulder/core/objects.go b/vendor/github.com/letsencrypt/boulder/core/objects.go
new file mode 100644
index 00000000000..9e328e82391
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/core/objects.go
@@ -0,0 +1,536 @@
+package core
+
+import (
+ "crypto"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "hash/fnv"
+ "net"
+ "strings"
+ "time"
+
+ "gopkg.in/square/go-jose.v2"
+
+ "github.com/letsencrypt/boulder/identifier"
+ "github.com/letsencrypt/boulder/probs"
+ "github.com/letsencrypt/boulder/revocation"
+)
+
+// AcmeStatus defines the state of a given authorization
+type AcmeStatus string
+
+// These statuses are the states of authorizations, challenges, and registrations
+const (
+ StatusUnknown = AcmeStatus("unknown") // Unknown status; the default
+ StatusPending = AcmeStatus("pending") // In process; client has next action
+ StatusProcessing = AcmeStatus("processing") // In process; server has next action
+ StatusReady = AcmeStatus("ready") // Order is ready for finalization
+ StatusValid = AcmeStatus("valid") // Object is valid
+ StatusInvalid = AcmeStatus("invalid") // Validation failed
+ StatusRevoked = AcmeStatus("revoked") // Object no longer valid
+ StatusDeactivated = AcmeStatus("deactivated") // Object has been deactivated
+)
+
+// AcmeResource values identify different types of ACME resources
+type AcmeResource string
+
+// The types of ACME resources
+const (
+ ResourceNewReg = AcmeResource("new-reg")
+ ResourceNewAuthz = AcmeResource("new-authz")
+ ResourceNewCert = AcmeResource("new-cert")
+ ResourceRevokeCert = AcmeResource("revoke-cert")
+ ResourceRegistration = AcmeResource("reg")
+ ResourceChallenge = AcmeResource("challenge")
+ ResourceAuthz = AcmeResource("authz")
+ ResourceKeyChange = AcmeResource("key-change")
+)
+
+// AcmeChallenge values identify different types of ACME challenges
+type AcmeChallenge string
+
+// These types are the available challenges
+// TODO(#5009): Make this a custom type as well.
+const (
+ ChallengeTypeHTTP01 = AcmeChallenge("http-01")
+ ChallengeTypeDNS01 = AcmeChallenge("dns-01")
+ ChallengeTypeTLSALPN01 = AcmeChallenge("tls-alpn-01")
+)
+
+// IsValid tests whether the challenge is a known challenge
+func (c AcmeChallenge) IsValid() bool {
+ switch c {
+ case ChallengeTypeHTTP01, ChallengeTypeDNS01, ChallengeTypeTLSALPN01:
+ return true
+ default:
+ return false
+ }
+}
+
+// OCSPStatus defines the state of OCSP for a domain
+type OCSPStatus string
+
+// These status are the states of OCSP
+const (
+ OCSPStatusGood = OCSPStatus("good")
+ OCSPStatusRevoked = OCSPStatus("revoked")
+)
+
+// DNSPrefix is attached to DNS names in DNS challenges
+const DNSPrefix = "_acme-challenge"
+
+// CertificateRequest is just a CSR
+//
+// This data is unmarshalled from JSON by way of RawCertificateRequest, which
+// represents the actual structure received from the client.
+type CertificateRequest struct {
+ CSR *x509.CertificateRequest // The CSR
+ Bytes []byte // The original bytes of the CSR, for logging.
+}
+
+type RawCertificateRequest struct {
+ CSR JSONBuffer `json:"csr"` // The encoded CSR
+}
+
+// UnmarshalJSON provides an implementation for decoding CertificateRequest objects.
+func (cr *CertificateRequest) UnmarshalJSON(data []byte) error {
+ var raw RawCertificateRequest
+ err := json.Unmarshal(data, &raw)
+ if err != nil {
+ return err
+ }
+
+ csr, err := x509.ParseCertificateRequest(raw.CSR)
+ if err != nil {
+ return err
+ }
+
+ cr.CSR = csr
+ cr.Bytes = raw.CSR
+ return nil
+}
+
+// MarshalJSON provides an implementation for encoding CertificateRequest objects.
+func (cr CertificateRequest) MarshalJSON() ([]byte, error) {
+ return json.Marshal(RawCertificateRequest{
+ CSR: cr.CSR.Raw,
+ })
+}
+
+// Registration objects represent non-public metadata attached
+// to account keys.
+type Registration struct {
+ // Unique identifier
+ ID int64 `json:"id,omitempty" db:"id"`
+
+ // Account key to which the details are attached
+ Key *jose.JSONWebKey `json:"key"`
+
+ // Contact URIs
+ Contact *[]string `json:"contact,omitempty"`
+
+ // Agreement with terms of service
+ Agreement string `json:"agreement,omitempty"`
+
+ // InitialIP is the IP address from which the registration was created
+ InitialIP net.IP `json:"initialIp"`
+
+ // CreatedAt is the time the registration was created.
+ CreatedAt *time.Time `json:"createdAt,omitempty"`
+
+ Status AcmeStatus `json:"status"`
+}
+
+// ValidationRecord represents a validation attempt against a specific URL/hostname
+// and the IP addresses that were resolved and used
+type ValidationRecord struct {
+ // SimpleHTTP only
+ URL string `json:"url,omitempty"`
+
+ // Shared
+ Hostname string `json:"hostname"`
+ Port string `json:"port,omitempty"`
+ AddressesResolved []net.IP `json:"addressesResolved,omitempty"`
+ AddressUsed net.IP `json:"addressUsed,omitempty"`
+ // AddressesTried contains a list of addresses tried before the `AddressUsed`.
+ // Presently this will only ever be one IP from `AddressesResolved` since the
+ // only retry is in the case of a v6 failure with one v4 fallback. E.g. if
+ // a record with `AddressesResolved: { 127.0.0.1, ::1 }` were processed for
+ // a challenge validation with the IPv6 first flag on and the ::1 address
+ // failed but the 127.0.0.1 retry succeeded then the record would end up
+ // being:
+ // {
+ // ...
+ // AddressesResolved: [ 127.0.0.1, ::1 ],
+ // AddressUsed: 127.0.0.1
+ // AddressesTried: [ ::1 ],
+ // ...
+ // }
+ AddressesTried []net.IP `json:"addressesTried,omitempty"`
+
+ // OldTLS is true if any request in the validation chain used HTTPS and negotiated
+ // a TLS version lower than 1.2.
+ // TODO(#6011): Remove once TLS 1.0 and 1.1 support is gone.
+ OldTLS bool `json:"oldTLS,omitempty"`
+}
+
+func looksLikeKeyAuthorization(str string) error {
+ parts := strings.Split(str, ".")
+ if len(parts) != 2 {
+ return fmt.Errorf("Invalid key authorization: does not look like a key authorization")
+ } else if !LooksLikeAToken(parts[0]) {
+ return fmt.Errorf("Invalid key authorization: malformed token")
+ } else if !LooksLikeAToken(parts[1]) {
+ // Thumbprints have the same syntax as tokens in boulder
+ // Both are base64-encoded and 32 octets
+ return fmt.Errorf("Invalid key authorization: malformed key thumbprint")
+ }
+ return nil
+}
+
+// Challenge is an aggregate of all data needed for any challenges.
+//
+// Rather than define individual types for different types of
+// challenge, we just throw all the elements into one bucket,
+// together with the common metadata elements.
+type Challenge struct {
+ // The type of challenge
+ Type AcmeChallenge `json:"type"`
+
+ // The status of this challenge
+ Status AcmeStatus `json:"status,omitempty"`
+
+ // Contains the error that occurred during challenge validation, if any
+ Error *probs.ProblemDetails `json:"error,omitempty"`
+
+ // A URI to which a response can be POSTed
+ URI string `json:"uri,omitempty"`
+
+ // For the V2 API the "URI" field is deprecated in favour of URL.
+ URL string `json:"url,omitempty"`
+
+ // Used by http-01, tls-sni-01, tls-alpn-01 and dns-01 challenges
+ Token string `json:"token,omitempty"`
+
+ // The expected KeyAuthorization for validation of the challenge. Populated by
+ // the RA prior to passing the challenge to the VA. For legacy reasons this
+ // field is called "ProvidedKeyAuthorization" because it was initially set by
+ // the content of the challenge update POST from the client. It is no longer
+ // set that way and should be renamed to "KeyAuthorization".
+ // TODO(@cpu): Rename `ProvidedKeyAuthorization` to `KeyAuthorization`.
+ ProvidedKeyAuthorization string `json:"keyAuthorization,omitempty"`
+
+ // Contains information about URLs used or redirected to and IPs resolved and
+ // used
+ ValidationRecord []ValidationRecord `json:"validationRecord,omitempty"`
+ // The time at which the server validated the challenge. Required by
+ // RFC8555 if status is valid.
+ Validated *time.Time `json:"validated,omitempty"`
+}
+
+// ExpectedKeyAuthorization computes the expected KeyAuthorization value for
+// the challenge.
+func (ch Challenge) ExpectedKeyAuthorization(key *jose.JSONWebKey) (string, error) {
+ if key == nil {
+ return "", fmt.Errorf("Cannot authorize a nil key")
+ }
+
+ thumbprint, err := key.Thumbprint(crypto.SHA256)
+ if err != nil {
+ return "", err
+ }
+
+ return ch.Token + "." + base64.RawURLEncoding.EncodeToString(thumbprint), nil
+}
+
+// RecordsSane checks the sanity of a ValidationRecord object before sending it
+// back to the RA to be stored.
+func (ch Challenge) RecordsSane() bool {
+ if ch.ValidationRecord == nil || len(ch.ValidationRecord) == 0 {
+ return false
+ }
+
+ switch ch.Type {
+ case ChallengeTypeHTTP01:
+ for _, rec := range ch.ValidationRecord {
+ if rec.URL == "" || rec.Hostname == "" || rec.Port == "" || rec.AddressUsed == nil ||
+ len(rec.AddressesResolved) == 0 {
+ return false
+ }
+ }
+ case ChallengeTypeTLSALPN01:
+ if len(ch.ValidationRecord) > 1 {
+ return false
+ }
+ if ch.ValidationRecord[0].URL != "" {
+ return false
+ }
+ if ch.ValidationRecord[0].Hostname == "" || ch.ValidationRecord[0].Port == "" ||
+ ch.ValidationRecord[0].AddressUsed == nil || len(ch.ValidationRecord[0].AddressesResolved) == 0 {
+ return false
+ }
+ case ChallengeTypeDNS01:
+ if len(ch.ValidationRecord) > 1 {
+ return false
+ }
+ if ch.ValidationRecord[0].Hostname == "" {
+ return false
+ }
+ return true
+ default: // Unsupported challenge type
+ return false
+ }
+
+ return true
+}
+
+// CheckConsistencyForClientOffer checks the fields of a challenge object before it is
+// given to the client.
+func (ch Challenge) CheckConsistencyForClientOffer() error {
+ err := ch.checkConsistency()
+ if err != nil {
+ return err
+ }
+
+ // Before completion, the key authorization field should be empty
+ if ch.ProvidedKeyAuthorization != "" {
+ return fmt.Errorf("A response to this challenge was already submitted.")
+ }
+ return nil
+}
+
+// CheckConsistencyForValidation checks the fields of a challenge object before it is
+// given to the VA.
+func (ch Challenge) CheckConsistencyForValidation() error {
+ err := ch.checkConsistency()
+ if err != nil {
+ return err
+ }
+
+ // If the challenge is completed, then there should be a key authorization
+ return looksLikeKeyAuthorization(ch.ProvidedKeyAuthorization)
+}
+
+// checkConsistency checks the sanity of a challenge object before issued to the client.
+func (ch Challenge) checkConsistency() error {
+ if ch.Status != StatusPending {
+ return fmt.Errorf("The challenge is not pending.")
+ }
+
+ // There always needs to be a token
+ if !LooksLikeAToken(ch.Token) {
+ return fmt.Errorf("The token is missing.")
+ }
+ return nil
+}
+
+// StringID is used to generate a ID for challenges associated with new style authorizations.
+// This is necessary as these challenges no longer have a unique non-sequential identifier
+// in the new storage scheme. This identifier is generated by constructing a fnv hash over the
+// challenge token and type and encoding the first 4 bytes of it using the base64 URL encoding.
+func (ch Challenge) StringID() string {
+ h := fnv.New128a()
+ h.Write([]byte(ch.Token))
+ h.Write([]byte(ch.Type))
+ return base64.RawURLEncoding.EncodeToString(h.Sum(nil)[0:4])
+}
+
+// Authorization represents the authorization of an account key holder
+// to act on behalf of a domain. This struct is intended to be used both
+// internally and for JSON marshaling on the wire. Any fields that should be
+// suppressed on the wire (e.g., ID, regID) must be made empty before marshaling.
+type Authorization struct {
+ // An identifier for this authorization, unique across
+ // authorizations and certificates within this instance.
+ ID string `json:"id,omitempty" db:"id"`
+
+ // The identifier for which authorization is being given
+ Identifier identifier.ACMEIdentifier `json:"identifier,omitempty" db:"identifier"`
+
+ // The registration ID associated with the authorization
+ RegistrationID int64 `json:"regId,omitempty" db:"registrationID"`
+
+ // The status of the validation of this authorization
+ Status AcmeStatus `json:"status,omitempty" db:"status"`
+
+ // The date after which this authorization will be no
+ // longer be considered valid. Note: a certificate may be issued even on the
+ // last day of an authorization's lifetime. The last day for which someone can
+ // hold a valid certificate based on an authorization is authorization
+ // lifetime + certificate lifetime.
+ Expires *time.Time `json:"expires,omitempty" db:"expires"`
+
+ // An array of challenges objects used to validate the
+ // applicant's control of the identifier. For authorizations
+ // in process, these are challenges to be fulfilled; for
+ // final authorizations, they describe the evidence that
+ // the server used in support of granting the authorization.
+ //
+ // There should only ever be one challenge of each type in this
+ // slice and the order of these challenges may not be predictable.
+ Challenges []Challenge `json:"challenges,omitempty" db:"-"`
+
+ // This field is deprecated. It's filled in by WFE for the ACMEv1 API.
+ Combinations [][]int `json:"combinations,omitempty" db:"combinations"`
+
+ // Wildcard is a Boulder-specific Authorization field that indicates the
+ // authorization was created as a result of an order containing a name with
+ // a `*.`wildcard prefix. This will help convey to users that an
+ // Authorization with the identifier `example.com` and one DNS-01 challenge
+ // corresponds to a name `*.example.com` from an associated order.
+ Wildcard bool `json:"wildcard,omitempty" db:"-"`
+}
+
+// FindChallengeByStringID will look for a challenge matching the given ID inside
+// this authorization. If found, it will return the index of that challenge within
+// the Authorization's Challenges array. Otherwise it will return -1.
+func (authz *Authorization) FindChallengeByStringID(id string) int {
+ for i, c := range authz.Challenges {
+ if c.StringID() == id {
+ return i
+ }
+ }
+ return -1
+}
+
+// SolvedBy will look through the Authorizations challenges, returning the type
+// of the *first* challenge it finds with Status: valid, or an error if no
+// challenge is valid.
+func (authz *Authorization) SolvedBy() (*AcmeChallenge, error) {
+ if len(authz.Challenges) == 0 {
+ return nil, fmt.Errorf("Authorization has no challenges")
+ }
+ for _, chal := range authz.Challenges {
+ if chal.Status == StatusValid {
+ return &chal.Type, nil
+ }
+ }
+ return nil, fmt.Errorf("Authorization not solved by any challenge")
+}
+
+// JSONBuffer fields get encoded and decoded JOSE-style, in base64url encoding
+// with stripped padding.
+type JSONBuffer []byte
+
+// URL-safe base64 encode that strips padding
+func base64URLEncode(data []byte) string {
+ var result = base64.URLEncoding.EncodeToString(data)
+ return strings.TrimRight(result, "=")
+}
+
+// URL-safe base64 decoder that adds padding
+func base64URLDecode(data string) ([]byte, error) {
+ var missing = (4 - len(data)%4) % 4
+ data += strings.Repeat("=", missing)
+ return base64.URLEncoding.DecodeString(data)
+}
+
+// MarshalJSON encodes a JSONBuffer for transmission.
+func (jb JSONBuffer) MarshalJSON() (result []byte, err error) {
+ return json.Marshal(base64URLEncode(jb))
+}
+
+// UnmarshalJSON decodes a JSONBuffer to an object.
+func (jb *JSONBuffer) UnmarshalJSON(data []byte) (err error) {
+ var str string
+ err = json.Unmarshal(data, &str)
+ if err != nil {
+ return err
+ }
+ *jb, err = base64URLDecode(str)
+ return
+}
+
+// Certificate objects are entirely internal to the server. The only
+// thing exposed on the wire is the certificate itself.
+type Certificate struct {
+ ID int64 `db:"id"`
+ RegistrationID int64 `db:"registrationID"`
+
+ Serial string `db:"serial"`
+ Digest string `db:"digest"`
+ DER []byte `db:"der"`
+ Issued time.Time `db:"issued"`
+ Expires time.Time `db:"expires"`
+}
+
+// CertificateStatus structs are internal to the server. They represent the
+// latest data about the status of the certificate, required for OCSP updating
+// and for validating that the subscriber has accepted the certificate.
+type CertificateStatus struct {
+ ID int64 `db:"id"`
+
+ Serial string `db:"serial"`
+
+ // status: 'good' or 'revoked'. Note that good, expired certificates remain
+ // with status 'good' but don't necessarily get fresh OCSP responses.
+ Status OCSPStatus `db:"status"`
+
+ // ocspLastUpdated: The date and time of the last time we generated an OCSP
+ // response. If we have never generated one, this has the zero value of
+ // time.Time, i.e. Jan 1 1970.
+ OCSPLastUpdated time.Time `db:"ocspLastUpdated"`
+
+ // revokedDate: If status is 'revoked', this is the date and time it was
+ // revoked. Otherwise it has the zero value of time.Time, i.e. Jan 1 1970.
+ RevokedDate time.Time `db:"revokedDate"`
+
+ // revokedReason: If status is 'revoked', this is the reason code for the
+ // revocation. Otherwise it is zero (which happens to be the reason
+ // code for 'unspecified').
+ RevokedReason revocation.Reason `db:"revokedReason"`
+
+ LastExpirationNagSent time.Time `db:"lastExpirationNagSent"`
+
+ // The encoded and signed OCSP response.
+ OCSPResponse []byte `db:"ocspResponse"`
+
+ // For performance reasons[0] we duplicate the `Expires` field of the
+ // `Certificates` object/table in `CertificateStatus` to avoid a costly `JOIN`
+ // later on just to retrieve this `Time` value. This helps both the OCSP
+ // updater and the expiration-mailer stay performant.
+ //
+ // Similarly, we add an explicit `IsExpired` boolean to `CertificateStatus`
+ // table that the OCSP updater so that the database can create a meaningful
+ // index on `(isExpired, ocspLastUpdated)` without a `JOIN` on `certificates`.
+ // For more detail see Boulder #1864[0].
+ //
+ // [0]: https://github.com/letsencrypt/boulder/issues/1864
+ NotAfter time.Time `db:"notAfter"`
+ IsExpired bool `db:"isExpired"`
+
+ // TODO(#5152): Change this to an issuance.Issuer(Name)ID after it no longer
+ // has to support both IssuerNameIDs and IssuerIDs.
+ IssuerID int64
+}
+
+// FQDNSet contains the SHA256 hash of the lowercased, comma joined dNSNames
+// contained in a certificate.
+type FQDNSet struct {
+ ID int64
+ SetHash []byte
+ Serial string
+ Issued time.Time
+ Expires time.Time
+}
+
+// SCTDERs is a convenience type
+type SCTDERs [][]byte
+
+// CertDER is a convenience type that helps differentiate what the
+// underlying byte slice contains
+type CertDER []byte
+
+// SuggestedWindow is a type exposed inside the RenewalInfo resource.
+type SuggestedWindow struct {
+ Start time.Time `json:"start"`
+ End time.Time `json:"end"`
+}
+
+// RenewalInfo is a type which is exposed to clients which query the renewalInfo
+// endpoint specified in draft-aaron-ari.
+type RenewalInfo struct {
+ SuggestedWindow SuggestedWindow `json:"suggestedWindow"`
+}
diff --git a/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go b/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go
new file mode 100644
index 00000000000..02b3515bdcd
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go
@@ -0,0 +1,1182 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.0
+// protoc v3.20.1
+// source: core.proto
+
+package proto
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Challenge struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+ Status string `protobuf:"bytes,6,opt,name=status,proto3" json:"status,omitempty"`
+ Uri string `protobuf:"bytes,9,opt,name=uri,proto3" json:"uri,omitempty"`
+ Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"`
+ KeyAuthorization string `protobuf:"bytes,5,opt,name=keyAuthorization,proto3" json:"keyAuthorization,omitempty"`
+ Validationrecords []*ValidationRecord `protobuf:"bytes,10,rep,name=validationrecords,proto3" json:"validationrecords,omitempty"`
+ Error *ProblemDetails `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"`
+ Validated int64 `protobuf:"varint,11,opt,name=validated,proto3" json:"validated,omitempty"`
+}
+
+func (x *Challenge) Reset() {
+ *x = Challenge{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Challenge) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Challenge) ProtoMessage() {}
+
+func (x *Challenge) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Challenge.ProtoReflect.Descriptor instead.
+func (*Challenge) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Challenge) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Challenge) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *Challenge) GetStatus() string {
+ if x != nil {
+ return x.Status
+ }
+ return ""
+}
+
+func (x *Challenge) GetUri() string {
+ if x != nil {
+ return x.Uri
+ }
+ return ""
+}
+
+func (x *Challenge) GetToken() string {
+ if x != nil {
+ return x.Token
+ }
+ return ""
+}
+
+func (x *Challenge) GetKeyAuthorization() string {
+ if x != nil {
+ return x.KeyAuthorization
+ }
+ return ""
+}
+
+func (x *Challenge) GetValidationrecords() []*ValidationRecord {
+ if x != nil {
+ return x.Validationrecords
+ }
+ return nil
+}
+
+func (x *Challenge) GetError() *ProblemDetails {
+ if x != nil {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *Challenge) GetValidated() int64 {
+ if x != nil {
+ return x.Validated
+ }
+ return 0
+}
+
+type ValidationRecord struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"`
+ Port string `protobuf:"bytes,2,opt,name=port,proto3" json:"port,omitempty"`
+ AddressesResolved [][]byte `protobuf:"bytes,3,rep,name=addressesResolved,proto3" json:"addressesResolved,omitempty"` // net.IP.MarshalText()
+ AddressUsed []byte `protobuf:"bytes,4,opt,name=addressUsed,proto3" json:"addressUsed,omitempty"` // net.IP.MarshalText()
+ Authorities []string `protobuf:"bytes,5,rep,name=authorities,proto3" json:"authorities,omitempty"`
+ Url string `protobuf:"bytes,6,opt,name=url,proto3" json:"url,omitempty"`
+ // A list of addresses tried before the address used (see
+ // core/objects.go and the comment on the ValidationRecord structure
+ // definition for more information.
+ AddressesTried [][]byte `protobuf:"bytes,7,rep,name=addressesTried,proto3" json:"addressesTried,omitempty"` // net.IP.MarshalText()
+}
+
+func (x *ValidationRecord) Reset() {
+ *x = ValidationRecord{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidationRecord) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidationRecord) ProtoMessage() {}
+
+func (x *ValidationRecord) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidationRecord.ProtoReflect.Descriptor instead.
+func (*ValidationRecord) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ValidationRecord) GetHostname() string {
+ if x != nil {
+ return x.Hostname
+ }
+ return ""
+}
+
+func (x *ValidationRecord) GetPort() string {
+ if x != nil {
+ return x.Port
+ }
+ return ""
+}
+
+func (x *ValidationRecord) GetAddressesResolved() [][]byte {
+ if x != nil {
+ return x.AddressesResolved
+ }
+ return nil
+}
+
+func (x *ValidationRecord) GetAddressUsed() []byte {
+ if x != nil {
+ return x.AddressUsed
+ }
+ return nil
+}
+
+func (x *ValidationRecord) GetAuthorities() []string {
+ if x != nil {
+ return x.Authorities
+ }
+ return nil
+}
+
+func (x *ValidationRecord) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *ValidationRecord) GetAddressesTried() [][]byte {
+ if x != nil {
+ return x.AddressesTried
+ }
+ return nil
+}
+
+type ProblemDetails struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ProblemType string `protobuf:"bytes,1,opt,name=problemType,proto3" json:"problemType,omitempty"`
+ Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
+ HttpStatus int32 `protobuf:"varint,3,opt,name=httpStatus,proto3" json:"httpStatus,omitempty"`
+}
+
+func (x *ProblemDetails) Reset() {
+ *x = ProblemDetails{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ProblemDetails) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ProblemDetails) ProtoMessage() {}
+
+func (x *ProblemDetails) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ProblemDetails.ProtoReflect.Descriptor instead.
+func (*ProblemDetails) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ProblemDetails) GetProblemType() string {
+ if x != nil {
+ return x.ProblemType
+ }
+ return ""
+}
+
+func (x *ProblemDetails) GetDetail() string {
+ if x != nil {
+ return x.Detail
+ }
+ return ""
+}
+
+func (x *ProblemDetails) GetHttpStatus() int32 {
+ if x != nil {
+ return x.HttpStatus
+ }
+ return 0
+}
+
+type Certificate struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial,omitempty"`
+ Digest string `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"`
+ Der []byte `protobuf:"bytes,4,opt,name=der,proto3" json:"der,omitempty"`
+ Issued int64 `protobuf:"varint,5,opt,name=issued,proto3" json:"issued,omitempty"` // Unix timestamp (nanoseconds)
+ Expires int64 `protobuf:"varint,6,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *Certificate) Reset() {
+ *x = Certificate{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Certificate) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Certificate) ProtoMessage() {}
+
+func (x *Certificate) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Certificate.ProtoReflect.Descriptor instead.
+func (*Certificate) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Certificate) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *Certificate) GetSerial() string {
+ if x != nil {
+ return x.Serial
+ }
+ return ""
+}
+
+func (x *Certificate) GetDigest() string {
+ if x != nil {
+ return x.Digest
+ }
+ return ""
+}
+
+func (x *Certificate) GetDer() []byte {
+ if x != nil {
+ return x.Der
+ }
+ return nil
+}
+
+func (x *Certificate) GetIssued() int64 {
+ if x != nil {
+ return x.Issued
+ }
+ return 0
+}
+
+func (x *Certificate) GetExpires() int64 {
+ if x != nil {
+ return x.Expires
+ }
+ return 0
+}
+
+type CertificateStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
+ Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+ OcspLastUpdated int64 `protobuf:"varint,4,opt,name=ocspLastUpdated,proto3" json:"ocspLastUpdated,omitempty"`
+ RevokedDate int64 `protobuf:"varint,5,opt,name=revokedDate,proto3" json:"revokedDate,omitempty"`
+ RevokedReason int64 `protobuf:"varint,6,opt,name=revokedReason,proto3" json:"revokedReason,omitempty"`
+ LastExpirationNagSent int64 `protobuf:"varint,7,opt,name=lastExpirationNagSent,proto3" json:"lastExpirationNagSent,omitempty"`
+ OcspResponse []byte `protobuf:"bytes,8,opt,name=ocspResponse,proto3" json:"ocspResponse,omitempty"`
+ NotAfter int64 `protobuf:"varint,9,opt,name=notAfter,proto3" json:"notAfter,omitempty"`
+ IsExpired bool `protobuf:"varint,10,opt,name=isExpired,proto3" json:"isExpired,omitempty"`
+ IssuerID int64 `protobuf:"varint,11,opt,name=issuerID,proto3" json:"issuerID,omitempty"`
+}
+
+func (x *CertificateStatus) Reset() {
+ *x = CertificateStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CertificateStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CertificateStatus) ProtoMessage() {}
+
+func (x *CertificateStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CertificateStatus.ProtoReflect.Descriptor instead.
+func (*CertificateStatus) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *CertificateStatus) GetSerial() string {
+ if x != nil {
+ return x.Serial
+ }
+ return ""
+}
+
+func (x *CertificateStatus) GetStatus() string {
+ if x != nil {
+ return x.Status
+ }
+ return ""
+}
+
+func (x *CertificateStatus) GetOcspLastUpdated() int64 {
+ if x != nil {
+ return x.OcspLastUpdated
+ }
+ return 0
+}
+
+func (x *CertificateStatus) GetRevokedDate() int64 {
+ if x != nil {
+ return x.RevokedDate
+ }
+ return 0
+}
+
+func (x *CertificateStatus) GetRevokedReason() int64 {
+ if x != nil {
+ return x.RevokedReason
+ }
+ return 0
+}
+
+func (x *CertificateStatus) GetLastExpirationNagSent() int64 {
+ if x != nil {
+ return x.LastExpirationNagSent
+ }
+ return 0
+}
+
+func (x *CertificateStatus) GetOcspResponse() []byte {
+ if x != nil {
+ return x.OcspResponse
+ }
+ return nil
+}
+
+func (x *CertificateStatus) GetNotAfter() int64 {
+ if x != nil {
+ return x.NotAfter
+ }
+ return 0
+}
+
+func (x *CertificateStatus) GetIsExpired() bool {
+ if x != nil {
+ return x.IsExpired
+ }
+ return false
+}
+
+func (x *CertificateStatus) GetIssuerID() int64 {
+ if x != nil {
+ return x.IssuerID
+ }
+ return 0
+}
+
+type Registration struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+ Contact []string `protobuf:"bytes,3,rep,name=contact,proto3" json:"contact,omitempty"`
+ ContactsPresent bool `protobuf:"varint,4,opt,name=contactsPresent,proto3" json:"contactsPresent,omitempty"`
+ Agreement string `protobuf:"bytes,5,opt,name=agreement,proto3" json:"agreement,omitempty"`
+ InitialIP []byte `protobuf:"bytes,6,opt,name=initialIP,proto3" json:"initialIP,omitempty"`
+ CreatedAt int64 `protobuf:"varint,7,opt,name=createdAt,proto3" json:"createdAt,omitempty"` // Unix timestamp (nanoseconds)
+ Status string `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"`
+}
+
+func (x *Registration) Reset() {
+ *x = Registration{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Registration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Registration) ProtoMessage() {}
+
+func (x *Registration) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Registration.ProtoReflect.Descriptor instead.
+func (*Registration) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *Registration) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Registration) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *Registration) GetContact() []string {
+ if x != nil {
+ return x.Contact
+ }
+ return nil
+}
+
+func (x *Registration) GetContactsPresent() bool {
+ if x != nil {
+ return x.ContactsPresent
+ }
+ return false
+}
+
+func (x *Registration) GetAgreement() string {
+ if x != nil {
+ return x.Agreement
+ }
+ return ""
+}
+
+func (x *Registration) GetInitialIP() []byte {
+ if x != nil {
+ return x.InitialIP
+ }
+ return nil
+}
+
+func (x *Registration) GetCreatedAt() int64 {
+ if x != nil {
+ return x.CreatedAt
+ }
+ return 0
+}
+
+func (x *Registration) GetStatus() string {
+ if x != nil {
+ return x.Status
+ }
+ return ""
+}
+
+type Authorization struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Identifier string `protobuf:"bytes,2,opt,name=identifier,proto3" json:"identifier,omitempty"`
+ RegistrationID int64 `protobuf:"varint,3,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
+ Expires int64 `protobuf:"varint,5,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds)
+ Challenges []*Challenge `protobuf:"bytes,6,rep,name=challenges,proto3" json:"challenges,omitempty"`
+}
+
+func (x *Authorization) Reset() {
+ *x = Authorization{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Authorization) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Authorization) ProtoMessage() {}
+
+func (x *Authorization) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Authorization.ProtoReflect.Descriptor instead.
+func (*Authorization) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *Authorization) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *Authorization) GetIdentifier() string {
+ if x != nil {
+ return x.Identifier
+ }
+ return ""
+}
+
+func (x *Authorization) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *Authorization) GetStatus() string {
+ if x != nil {
+ return x.Status
+ }
+ return ""
+}
+
+func (x *Authorization) GetExpires() int64 {
+ if x != nil {
+ return x.Expires
+ }
+ return 0
+}
+
+func (x *Authorization) GetChallenges() []*Challenge {
+ if x != nil {
+ return x.Challenges
+ }
+ return nil
+}
+
+type Order struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Expires int64 `protobuf:"varint,3,opt,name=expires,proto3" json:"expires,omitempty"`
+ Error *ProblemDetails `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"`
+ CertificateSerial string `protobuf:"bytes,5,opt,name=certificateSerial,proto3" json:"certificateSerial,omitempty"`
+ Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"`
+ Names []string `protobuf:"bytes,8,rep,name=names,proto3" json:"names,omitempty"`
+ BeganProcessing bool `protobuf:"varint,9,opt,name=beganProcessing,proto3" json:"beganProcessing,omitempty"`
+ Created int64 `protobuf:"varint,10,opt,name=created,proto3" json:"created,omitempty"`
+ V2Authorizations []int64 `protobuf:"varint,11,rep,packed,name=v2Authorizations,proto3" json:"v2Authorizations,omitempty"`
+}
+
+func (x *Order) Reset() {
+ *x = Order{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Order) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Order) ProtoMessage() {}
+
+func (x *Order) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Order.ProtoReflect.Descriptor instead.
+func (*Order) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *Order) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Order) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *Order) GetExpires() int64 {
+ if x != nil {
+ return x.Expires
+ }
+ return 0
+}
+
+func (x *Order) GetError() *ProblemDetails {
+ if x != nil {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *Order) GetCertificateSerial() string {
+ if x != nil {
+ return x.CertificateSerial
+ }
+ return ""
+}
+
+func (x *Order) GetStatus() string {
+ if x != nil {
+ return x.Status
+ }
+ return ""
+}
+
+func (x *Order) GetNames() []string {
+ if x != nil {
+ return x.Names
+ }
+ return nil
+}
+
+func (x *Order) GetBeganProcessing() bool {
+ if x != nil {
+ return x.BeganProcessing
+ }
+ return false
+}
+
+func (x *Order) GetCreated() int64 {
+ if x != nil {
+ return x.Created
+ }
+ return 0
+}
+
+func (x *Order) GetV2Authorizations() []int64 {
+ if x != nil {
+ return x.V2Authorizations
+ }
+ return nil
+}
+
+type CRLEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
+ Reason int32 `protobuf:"varint,2,opt,name=reason,proto3" json:"reason,omitempty"`
+ RevokedAt int64 `protobuf:"varint,3,opt,name=revokedAt,proto3" json:"revokedAt,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *CRLEntry) Reset() {
+ *x = CRLEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_core_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CRLEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CRLEntry) ProtoMessage() {}
+
+func (x *CRLEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_core_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CRLEntry.ProtoReflect.Descriptor instead.
+func (*CRLEntry) Descriptor() ([]byte, []int) {
+ return file_core_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *CRLEntry) GetSerial() string {
+ if x != nil {
+ return x.Serial
+ }
+ return ""
+}
+
+func (x *CRLEntry) GetReason() int32 {
+ if x != nil {
+ return x.Reason
+ }
+ return 0
+}
+
+func (x *CRLEntry) GetRevokedAt() int64 {
+ if x != nil {
+ return x.RevokedAt
+ }
+ return 0
+}
+
+var File_core_proto protoreflect.FileDescriptor
+
+var file_core_proto_rawDesc = []byte{
+ 0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x63, 0x6f,
+ 0x72, 0x65, 0x22, 0xab, 0x02, 0x0a, 0x09, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65,
+ 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64,
+ 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x0a, 0x03,
+ 0x75, 0x72, 0x69, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x14,
+ 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2a, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x41, 0x75, 0x74, 0x68, 0x6f,
+ 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10,
+ 0x6b, 0x65, 0x79, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x44, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x72, 0x65,
+ 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63,
+ 0x6f, 0x72, 0x64, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x72,
+ 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f,
+ 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18,
+ 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x22, 0xee, 0x01, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c,
+ 0x52, 0x11, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c,
+ 0x76, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x55, 0x73,
+ 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69,
+ 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x61, 0x64, 0x64,
+ 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x54, 0x72, 0x69, 0x65, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28,
+ 0x0c, 0x52, 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x54, 0x72, 0x69, 0x65,
+ 0x64, 0x22, 0x6a, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61,
+ 0x69, 0x6c, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x54, 0x79,
+ 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65,
+ 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x1e, 0x0a,
+ 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xa9, 0x01,
+ 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a,
+ 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a,
+ 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64,
+ 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65,
+ 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x12,
+ 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x22, 0xeb, 0x02, 0x0a, 0x11, 0x43, 0x65,
+ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
+ 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
+ 0x28, 0x0a, 0x0f, 0x6f, 0x63, 0x73, 0x70, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x6f, 0x63, 0x73, 0x70, 0x4c, 0x61,
+ 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x76,
+ 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b,
+ 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x72,
+ 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f,
+ 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x67, 0x53, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4e, 0x61, 0x67, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x63, 0x73, 0x70, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6f,
+ 0x63, 0x73, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6e,
+ 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6e,
+ 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, 0x45, 0x78, 0x70,
+ 0x69, 0x72, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x45, 0x78,
+ 0x70, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49,
+ 0x44, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49,
+ 0x44, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0xe6, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x67, 0x69,
+ 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f,
+ 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73,
+ 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x63,
+ 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x12, 0x1c,
+ 0x0a, 0x09, 0x61, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x61, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09,
+ 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x49, 0x50, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x09, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x49, 0x50, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x22, 0xd6, 0x01, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02,
+ 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69,
+ 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69,
+ 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x0a,
+ 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67,
+ 0x65, 0x52, 0x0a, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x73, 0x4a, 0x04, 0x08,
+ 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0xd7, 0x02, 0x0a, 0x05, 0x4f, 0x72,
+ 0x64, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x02, 0x69, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67,
+ 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x65,
+ 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78,
+ 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62,
+ 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x12, 0x2c, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
+ 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x65,
+ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12,
+ 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73,
+ 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x28, 0x0a,
+ 0x0f, 0x62, 0x65, 0x67, 0x61, 0x6e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x62, 0x65, 0x67, 0x61, 0x6e, 0x50, 0x72, 0x6f,
+ 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65,
+ 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x03, 0x52, 0x10, 0x76, 0x32, 0x41,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08,
+ 0x06, 0x10, 0x07, 0x22, 0x58, 0x0a, 0x08, 0x43, 0x52, 0x4c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12,
+ 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x42, 0x2b, 0x5a,
+ 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73,
+ 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f,
+ 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_core_proto_rawDescOnce sync.Once
+ file_core_proto_rawDescData = file_core_proto_rawDesc
+)
+
+func file_core_proto_rawDescGZIP() []byte {
+ file_core_proto_rawDescOnce.Do(func() {
+ file_core_proto_rawDescData = protoimpl.X.CompressGZIP(file_core_proto_rawDescData)
+ })
+ return file_core_proto_rawDescData
+}
+
+var file_core_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
+var file_core_proto_goTypes = []interface{}{
+ (*Challenge)(nil), // 0: core.Challenge
+ (*ValidationRecord)(nil), // 1: core.ValidationRecord
+ (*ProblemDetails)(nil), // 2: core.ProblemDetails
+ (*Certificate)(nil), // 3: core.Certificate
+ (*CertificateStatus)(nil), // 4: core.CertificateStatus
+ (*Registration)(nil), // 5: core.Registration
+ (*Authorization)(nil), // 6: core.Authorization
+ (*Order)(nil), // 7: core.Order
+ (*CRLEntry)(nil), // 8: core.CRLEntry
+}
+var file_core_proto_depIdxs = []int32{
+ 1, // 0: core.Challenge.validationrecords:type_name -> core.ValidationRecord
+ 2, // 1: core.Challenge.error:type_name -> core.ProblemDetails
+ 0, // 2: core.Authorization.challenges:type_name -> core.Challenge
+ 2, // 3: core.Order.error:type_name -> core.ProblemDetails
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
+}
+
+func init() { file_core_proto_init() }
+func file_core_proto_init() {
+ if File_core_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_core_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Challenge); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_core_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidationRecord); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_core_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ProblemDetails); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_core_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Certificate); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_core_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CertificateStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_core_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Registration); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_core_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Authorization); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_core_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Order); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_core_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CRLEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_core_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 9,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_core_proto_goTypes,
+ DependencyIndexes: file_core_proto_depIdxs,
+ MessageInfos: file_core_proto_msgTypes,
+ }.Build()
+ File_core_proto = out.File
+ file_core_proto_rawDesc = nil
+ file_core_proto_goTypes = nil
+ file_core_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/letsencrypt/boulder/core/proto/core.proto b/vendor/github.com/letsencrypt/boulder/core/proto/core.proto
new file mode 100644
index 00000000000..946bb16c875
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/core/proto/core.proto
@@ -0,0 +1,101 @@
+syntax = "proto3";
+
+package core;
+option go_package = "github.com/letsencrypt/boulder/core/proto";
+
+message Challenge {
+ int64 id = 1;
+ string type = 2;
+ string status = 6;
+ string uri = 9;
+ string token = 3;
+ string keyAuthorization = 5;
+ repeated ValidationRecord validationrecords = 10;
+ ProblemDetails error = 7;
+ int64 validated = 11;
+}
+
+message ValidationRecord {
+ string hostname = 1;
+ string port = 2;
+ repeated bytes addressesResolved = 3; // net.IP.MarshalText()
+ bytes addressUsed = 4; // net.IP.MarshalText()
+
+ repeated string authorities = 5;
+ string url = 6;
+ // A list of addresses tried before the address used (see
+ // core/objects.go and the comment on the ValidationRecord structure
+ // definition for more information.
+ repeated bytes addressesTried = 7; // net.IP.MarshalText()
+}
+
+message ProblemDetails {
+ string problemType = 1;
+ string detail = 2;
+ int32 httpStatus = 3;
+}
+
+message Certificate {
+ int64 registrationID = 1;
+ string serial = 2;
+ string digest = 3;
+ bytes der = 4;
+ int64 issued = 5; // Unix timestamp (nanoseconds)
+ int64 expires = 6; // Unix timestamp (nanoseconds)
+}
+
+message CertificateStatus {
+ string serial = 1;
+ reserved 2; // previously subscriberApproved
+ string status = 3;
+ int64 ocspLastUpdated = 4;
+ int64 revokedDate = 5;
+ int64 revokedReason = 6;
+ int64 lastExpirationNagSent = 7;
+ bytes ocspResponse = 8;
+ int64 notAfter = 9;
+ bool isExpired = 10;
+ int64 issuerID = 11;
+}
+
+message Registration {
+ int64 id = 1;
+ bytes key = 2;
+ repeated string contact = 3;
+ bool contactsPresent = 4;
+ string agreement = 5;
+ bytes initialIP = 6;
+ int64 createdAt = 7; // Unix timestamp (nanoseconds)
+ string status = 8;
+}
+
+message Authorization {
+ string id = 1;
+ string identifier = 2;
+ int64 registrationID = 3;
+ string status = 4;
+ int64 expires = 5; // Unix timestamp (nanoseconds)
+ repeated core.Challenge challenges = 6;
+ reserved 7; // previously combinations
+ reserved 8; // previously v2
+}
+
+message Order {
+ int64 id = 1;
+ int64 registrationID = 2;
+ int64 expires = 3;
+ ProblemDetails error = 4;
+ string certificateSerial = 5;
+ reserved 6; // previously authorizations, deprecated in favor of v2Authorizations
+ string status = 7;
+ repeated string names = 8;
+ bool beganProcessing = 9;
+ int64 created = 10;
+ repeated int64 v2Authorizations = 11;
+}
+
+message CRLEntry {
+ string serial = 1;
+ int32 reason = 2;
+ int64 revokedAt = 3; // Unix timestamp (nanoseconds)
+}
diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go
new file mode 100644
index 00000000000..29f0d9c3dde
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/core/util.go
@@ -0,0 +1,298 @@
+package core
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/rand"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "expvar"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/big"
+ mrand "math/rand"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode"
+
+ jose "gopkg.in/square/go-jose.v2"
+)
+
+// Package Variables Variables
+
+// BuildID is set by the compiler (using -ldflags "-X core.BuildID $(git rev-parse --short HEAD)")
+// and is used by GetBuildID
+var BuildID string
+
+// BuildHost is set by the compiler and is used by GetBuildHost
+var BuildHost string
+
+// BuildTime is set by the compiler and is used by GetBuildTime
+var BuildTime string
+
+func init() {
+ expvar.NewString("BuildID").Set(BuildID)
+ expvar.NewString("BuildTime").Set(BuildTime)
+}
+
+// Random stuff
+
+type randSource interface {
+ Read(p []byte) (n int, err error)
+}
+
+// RandReader is used so that it can be replaced in tests that require
+// deterministic output
+var RandReader randSource = rand.Reader
+
+// RandomString returns a randomly generated string of the requested length.
+func RandomString(byteLength int) string {
+ b := make([]byte, byteLength)
+ _, err := io.ReadFull(RandReader, b)
+ if err != nil {
+ panic(fmt.Sprintf("Error reading random bytes: %s", err))
+ }
+ return base64.RawURLEncoding.EncodeToString(b)
+}
+
+// NewToken produces a random string for Challenges, etc.
+func NewToken() string {
+ return RandomString(32)
+}
+
+var tokenFormat = regexp.MustCompile(`^[\w-]{43}$`)
+
+// LooksLikeAToken checks whether a string represents a 32-octet value in
+// the URL-safe base64 alphabet.
+func LooksLikeAToken(token string) bool {
+ return tokenFormat.MatchString(token)
+}
+
+// Fingerprints
+
+// Fingerprint256 produces an unpadded, URL-safe Base64-encoded SHA256 digest
+// of the data.
+func Fingerprint256(data []byte) string {
+ d := sha256.New()
+ _, _ = d.Write(data) // Never returns an error
+ return base64.RawURLEncoding.EncodeToString(d.Sum(nil))
+}
+
+type Sha256Digest [sha256.Size]byte
+
+// KeyDigest produces a Base64-encoded SHA256 digest of a
+// provided public key.
+func KeyDigest(key crypto.PublicKey) (Sha256Digest, error) {
+ switch t := key.(type) {
+ case *jose.JSONWebKey:
+ if t == nil {
+ return Sha256Digest{}, fmt.Errorf("Cannot compute digest of nil key")
+ }
+ return KeyDigest(t.Key)
+ case jose.JSONWebKey:
+ return KeyDigest(t.Key)
+ default:
+ keyDER, err := x509.MarshalPKIXPublicKey(key)
+ if err != nil {
+ return Sha256Digest{}, err
+ }
+ return sha256.Sum256(keyDER), nil
+ }
+}
+
+// KeyDigestB64 produces a padded, standard Base64-encoded SHA256 digest of a
+// provided public key.
+func KeyDigestB64(key crypto.PublicKey) (string, error) {
+ digest, err := KeyDigest(key)
+ if err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(digest[:]), nil
+}
+
+// KeyDigestEquals determines whether two public keys have the same digest.
+func KeyDigestEquals(j, k crypto.PublicKey) bool {
+ digestJ, errJ := KeyDigestB64(j)
+ digestK, errK := KeyDigestB64(k)
+ // Keys that don't have a valid digest (due to marshalling problems)
+ // are never equal. So, e.g. nil keys are not equal.
+ if errJ != nil || errK != nil {
+ return false
+ }
+ return digestJ == digestK
+}
+
+// PublicKeysEqual determines whether two public keys have the same marshalled
+// bytes as one another
+func PublicKeysEqual(a, b interface{}) (bool, error) {
+ if a == nil || b == nil {
+ return false, errors.New("One or more nil arguments to PublicKeysEqual")
+ }
+ aBytes, err := x509.MarshalPKIXPublicKey(a)
+ if err != nil {
+ return false, err
+ }
+ bBytes, err := x509.MarshalPKIXPublicKey(b)
+ if err != nil {
+ return false, err
+ }
+ return bytes.Equal(aBytes, bBytes), nil
+}
+
+// SerialToString converts a certificate serial number (big.Int) to a String
+// consistently.
+func SerialToString(serial *big.Int) string {
+ return fmt.Sprintf("%036x", serial)
+}
+
+// StringToSerial converts a string into a certificate serial number (big.Int)
+// consistently.
+func StringToSerial(serial string) (*big.Int, error) {
+ var serialNum big.Int
+ if !ValidSerial(serial) {
+ return &serialNum, errors.New("Invalid serial number")
+ }
+ _, err := fmt.Sscanf(serial, "%036x", &serialNum)
+ return &serialNum, err
+}
+
+// ValidSerial tests whether the input string represents a syntactically
+// valid serial number, i.e., that it is a valid hex string between 32
+// and 36 characters long.
+func ValidSerial(serial string) bool {
+ // Originally, serial numbers were 32 hex characters long. We later increased
+ // them to 36, but we allow the shorter ones because they exist in some
+ // production databases.
+ if len(serial) != 32 && len(serial) != 36 {
+ return false
+ }
+ _, err := hex.DecodeString(serial)
+ return err == nil
+}
+
+// GetBuildID identifies what build is running.
+func GetBuildID() (retID string) {
+ retID = BuildID
+ if retID == "" {
+ retID = "Unspecified"
+ }
+ return
+}
+
+// GetBuildTime identifies when this build was made
+func GetBuildTime() (retID string) {
+ retID = BuildTime
+ if retID == "" {
+ retID = "Unspecified"
+ }
+ return
+}
+
+// GetBuildHost identifies the building host
+func GetBuildHost() (retID string) {
+ retID = BuildHost
+ if retID == "" {
+ retID = "Unspecified"
+ }
+ return
+}
+
+// IsAnyNilOrZero returns whether any of the supplied values are nil, or (if not)
+// if any of them is its type's zero-value. This is useful for validating that
+// all required fields on a proto message are present.
+func IsAnyNilOrZero(vals ...interface{}) bool {
+ for _, val := range vals {
+ switch v := val.(type) {
+ case nil:
+ return true
+ case []byte:
+ if len(v) == 0 {
+ return true
+ }
+ default:
+ if reflect.ValueOf(v).IsZero() {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// UniqueLowerNames returns the set of all unique names in the input after all
+// of them are lowercased. The returned names will be in their lowercased form
+// and sorted alphabetically.
+func UniqueLowerNames(names []string) (unique []string) {
+ nameMap := make(map[string]int, len(names))
+ for _, name := range names {
+ nameMap[strings.ToLower(name)] = 1
+ }
+
+ unique = make([]string, 0, len(nameMap))
+ for name := range nameMap {
+ unique = append(unique, name)
+ }
+ sort.Strings(unique)
+ return
+}
+
+// LoadCert loads a PEM certificate specified by filename or returns an error
+func LoadCert(filename string) (*x509.Certificate, error) {
+ certPEM, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ block, _ := pem.Decode(certPEM)
+ if block == nil {
+ return nil, fmt.Errorf("No data in cert PEM file %s", filename)
+ }
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ return cert, nil
+}
+
+// retryJitter is used to prevent bunched retried queries from falling into lockstep
+const retryJitter = 0.2
+
+// RetryBackoff calculates a backoff time based on number of retries, will always
+// add jitter so requests that start in unison won't fall into lockstep. Because of
+// this the returned duration can always be larger than the maximum by a factor of
+// retryJitter. Adapted from
+// https://github.com/grpc/grpc-go/blob/v1.11.3/backoff.go#L77-L96
+func RetryBackoff(retries int, base, max time.Duration, factor float64) time.Duration {
+ if retries == 0 {
+ return 0
+ }
+ backoff, fMax := float64(base), float64(max)
+ for backoff < fMax && retries > 1 {
+ backoff *= factor
+ retries--
+ }
+ if backoff > fMax {
+ backoff = fMax
+ }
+ // Randomize backoff delays so that if a cluster of requests start at
+ // the same time, they won't operate in lockstep.
+ backoff *= (1 - retryJitter) + 2*retryJitter*mrand.Float64()
+ return time.Duration(backoff)
+}
+
+// IsASCII determines if every character in a string is encoded in
+// the ASCII character set.
+func IsASCII(str string) bool {
+ for _, r := range str {
+ if r > unicode.MaxASCII {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/letsencrypt/boulder/errors/errors.go b/vendor/github.com/letsencrypt/boulder/errors/errors.go
new file mode 100644
index 00000000000..861d54ad177
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/errors/errors.go
@@ -0,0 +1,177 @@
+// Package errors provides internal-facing error types for use in Boulder. Many
+// of these are transformed directly into Problem Details documents by the WFE.
+// Some, like NotFound, may be handled internally. We avoid using Problem
+// Details documents as part of our internal error system to avoid layering
+// confusions.
+//
+// These errors are specifically for use in errors that cross RPC boundaries.
+// An error type that does not need to be passed through an RPC can use a plain
+// Go type locally. Our gRPC code is aware of these error types and will
+// serialize and deserialize them automatically.
+package errors
+
+import (
+ "fmt"
+
+ "github.com/letsencrypt/boulder/identifier"
+)
+
+// ErrorType provides a coarse category for BoulderErrors.
+// Objects of type ErrorType should never be directly returned by other
+// functions; instead use the methods below to create an appropriate
+// BoulderError wrapping one of these types.
+type ErrorType int
+
+// These numeric constants are used when sending berrors through gRPC.
+const (
+ // InternalServer is deprecated. Instead, pass a plain Go error. That will get
+ // turned into a probs.InternalServerError by the WFE.
+ InternalServer ErrorType = iota
+ _
+ Malformed
+ Unauthorized
+ NotFound
+ RateLimit
+ RejectedIdentifier
+ InvalidEmail
+ ConnectionFailure
+ _ // Reserved, previously WrongAuthorizationState
+ CAA
+ MissingSCTs
+ Duplicate
+ OrderNotReady
+ DNS
+ BadPublicKey
+ BadCSR
+ AlreadyRevoked
+ BadRevocationReason
+)
+
+func (ErrorType) Error() string {
+ return "urn:ietf:params:acme:error"
+}
+
+// BoulderError represents internal Boulder errors
+type BoulderError struct {
+ Type ErrorType
+ Detail string
+ SubErrors []SubBoulderError
+}
+
+// SubBoulderError represents sub-errors specific to an identifier that are
+// related to a top-level internal Boulder error.
+type SubBoulderError struct {
+ *BoulderError
+ Identifier identifier.ACMEIdentifier
+}
+
+func (be *BoulderError) Error() string {
+ return be.Detail
+}
+
+func (be *BoulderError) Unwrap() error {
+ return be.Type
+}
+
+// WithSubErrors returns a new BoulderError instance created by adding the
+// provided subErrs to the existing BoulderError.
+func (be *BoulderError) WithSubErrors(subErrs []SubBoulderError) *BoulderError {
+ return &BoulderError{
+ Type: be.Type,
+ Detail: be.Detail,
+ SubErrors: append(be.SubErrors, subErrs...),
+ }
+}
+
+// New is a convenience function for creating a new BoulderError
+func New(errType ErrorType, msg string, args ...interface{}) error {
+ return &BoulderError{
+ Type: errType,
+ Detail: fmt.Sprintf(msg, args...),
+ }
+}
+
+func InternalServerError(msg string, args ...interface{}) error {
+ return New(InternalServer, msg, args...)
+}
+
+func MalformedError(msg string, args ...interface{}) error {
+ return New(Malformed, msg, args...)
+}
+
+func UnauthorizedError(msg string, args ...interface{}) error {
+ return New(Unauthorized, msg, args...)
+}
+
+func NotFoundError(msg string, args ...interface{}) error {
+ return New(NotFound, msg, args...)
+}
+
+func RateLimitError(msg string, args ...interface{}) error {
+ return &BoulderError{
+ Type: RateLimit,
+ Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/", args...),
+ }
+}
+
+func DuplicateCertificateError(msg string, args ...interface{}) error {
+ return &BoulderError{
+ Type: RateLimit,
+ Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/duplicate-certificate-limit/", args...),
+ }
+}
+
+func FailedValidationError(msg string, args ...interface{}) error {
+ return &BoulderError{
+ Type: RateLimit,
+ Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/failed-validation-limit/", args...),
+ }
+}
+
+func RejectedIdentifierError(msg string, args ...interface{}) error {
+ return New(RejectedIdentifier, msg, args...)
+}
+
+func InvalidEmailError(msg string, args ...interface{}) error {
+ return New(InvalidEmail, msg, args...)
+}
+
+func ConnectionFailureError(msg string, args ...interface{}) error {
+ return New(ConnectionFailure, msg, args...)
+}
+
+func CAAError(msg string, args ...interface{}) error {
+ return New(CAA, msg, args...)
+}
+
+func MissingSCTsError(msg string, args ...interface{}) error {
+ return New(MissingSCTs, msg, args...)
+}
+
+func DuplicateError(msg string, args ...interface{}) error {
+ return New(Duplicate, msg, args...)
+}
+
+func OrderNotReadyError(msg string, args ...interface{}) error {
+ return New(OrderNotReady, msg, args...)
+}
+
+func DNSError(msg string, args ...interface{}) error {
+ return New(DNS, msg, args...)
+}
+
+func BadPublicKeyError(msg string, args ...interface{}) error {
+ return New(BadPublicKey, msg, args...)
+}
+
+func BadCSRError(msg string, args ...interface{}) error {
+ return New(BadCSR, msg, args...)
+}
+
+func AlreadyRevokedError(msg string, args ...interface{}) error {
+ return New(AlreadyRevoked, msg, args...)
+}
+
+func BadRevocationReasonError(reason int64) error {
+ return New(BadRevocationReason, "disallowed revocation reason: %d", reason)
+}
diff --git a/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go b/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go
new file mode 100644
index 00000000000..b8f42509019
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go
@@ -0,0 +1,53 @@
+// Code generated by "stringer -type=FeatureFlag"; DO NOT EDIT.
+
+package features
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[unused-0]
+ _ = x[PrecertificateRevocation-1]
+ _ = x[StripDefaultSchemePort-2]
+ _ = x[NonCFSSLSigner-3]
+ _ = x[StoreIssuerInfo-4]
+ _ = x[StreamlineOrderAndAuthzs-5]
+ _ = x[V1DisableNewValidations-6]
+ _ = x[ExpirationMailerDontLookTwice-7]
+ _ = x[CAAValidationMethods-8]
+ _ = x[CAAAccountURI-9]
+ _ = x[EnforceMultiVA-10]
+ _ = x[MultiVAFullResults-11]
+ _ = x[MandatoryPOSTAsGET-12]
+ _ = x[AllowV1Registration-13]
+ _ = x[StoreRevokerInfo-14]
+ _ = x[RestrictRSAKeySizes-15]
+ _ = x[FasterNewOrdersRateLimit-16]
+ _ = x[ECDSAForAll-17]
+ _ = x[ServeRenewalInfo-18]
+ _ = x[GetAuthzReadOnly-19]
+ _ = x[GetAuthzUseIndex-20]
+ _ = x[CheckFailedAuthorizationsFirst-21]
+ _ = x[AllowReRevocation-22]
+ _ = x[MozRevocationReasons-23]
+ _ = x[OldTLSOutbound-24]
+ _ = x[OldTLSInbound-25]
+ _ = x[SHA1CSRs-26]
+ _ = x[AllowUnrecognizedFeatures-27]
+ _ = x[RejectDuplicateCSRExtensions-28]
+ _ = x[ROCSPStage1-29]
+ _ = x[ROCSPStage2-30]
+}
+
+const _FeatureFlag_name = "unusedPrecertificateRevocationStripDefaultSchemePortNonCFSSLSignerStoreIssuerInfoStreamlineOrderAndAuthzsV1DisableNewValidationsExpirationMailerDontLookTwiceCAAValidationMethodsCAAAccountURIEnforceMultiVAMultiVAFullResultsMandatoryPOSTAsGETAllowV1RegistrationStoreRevokerInfoRestrictRSAKeySizesFasterNewOrdersRateLimitECDSAForAllServeRenewalInfoGetAuthzReadOnlyGetAuthzUseIndexCheckFailedAuthorizationsFirstAllowReRevocationMozRevocationReasonsOldTLSOutboundOldTLSInboundSHA1CSRsAllowUnrecognizedFeaturesRejectDuplicateCSRExtensionsROCSPStage1ROCSPStage2"
+
+var _FeatureFlag_index = [...]uint16{0, 6, 30, 52, 66, 81, 105, 128, 157, 177, 190, 204, 222, 240, 259, 275, 294, 318, 329, 345, 361, 377, 407, 424, 444, 458, 471, 479, 504, 532, 543, 554}
+
+func (i FeatureFlag) String() string {
+ if i < 0 || i >= FeatureFlag(len(_FeatureFlag_index)-1) {
+ return "FeatureFlag(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _FeatureFlag_name[_FeatureFlag_index[i]:_FeatureFlag_index[i+1]]
+}
diff --git a/vendor/github.com/letsencrypt/boulder/features/features.go b/vendor/github.com/letsencrypt/boulder/features/features.go
new file mode 100644
index 00000000000..ca4be39ab59
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/features/features.go
@@ -0,0 +1,201 @@
+//go:generate stringer -type=FeatureFlag
+
+package features
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+)
+
+type FeatureFlag int
+
+const (
+ unused FeatureFlag = iota // unused is used for testing
+ // Deprecated features, these can be removed once stripped from production configs
+ PrecertificateRevocation
+ StripDefaultSchemePort
+ NonCFSSLSigner
+ StoreIssuerInfo
+ StreamlineOrderAndAuthzs
+ V1DisableNewValidations
+ ExpirationMailerDontLookTwice
+
+ // Currently in-use features
+ // Check CAA and respect validationmethods parameter.
+ CAAValidationMethods
+ // Check CAA and respect accounturi parameter.
+ CAAAccountURI
+ // EnforceMultiVA causes the VA to block on remote VA PerformValidation
+ // requests in order to make a valid/invalid decision with the results.
+ EnforceMultiVA
+ // MultiVAFullResults will cause the main VA to wait for all of the remote VA
+ // results, not just the threshold required to make a decision.
+ MultiVAFullResults
+ // MandatoryPOSTAsGET forbids legacy unauthenticated GET requests for ACME
+ // resources.
+ MandatoryPOSTAsGET
+ // Allow creation of new registrations in ACMEv1.
+ AllowV1Registration
+ // StoreRevokerInfo enables storage of the revoker and a bool indicating if the row
+ // was checked for extant unrevoked certificates in the blockedKeys table.
+ StoreRevokerInfo
+ // RestrictRSAKeySizes enables restriction of acceptable RSA public key moduli to
+ // the common sizes (2048, 3072, and 4096 bits).
+ RestrictRSAKeySizes
+ // FasterNewOrdersRateLimit enables use of a separate table for counting the
+ // new orders rate limit.
+ FasterNewOrdersRateLimit
+ // ECDSAForAll enables all accounts, regardless of their presence in the CA's
+ // ecdsaAllowedAccounts config value, to get issuance from ECDSA issuers.
+ ECDSAForAll
+ // ServeRenewalInfo exposes the renewalInfo endpoint in the directory and for
+ // GET requests. WARNING: This feature is a draft and highly unstable.
+ ServeRenewalInfo
+ // GetAuthzReadOnly causes the SA to use its read-only database connection
+ // (which is generally pointed at a replica rather than the primary db) when
+ // querying the authz2 table.
+ GetAuthzReadOnly
+ // GetAuthzUseIndex causes the SA to use to add a USE INDEX hint when it
+ // queries the authz2 table.
+ GetAuthzUseIndex
+ // Check the failed authorization limit before doing authz reuse.
+ CheckFailedAuthorizationsFirst
+ // AllowReRevocation causes the RA to allow the revocation reason of an
+ // already-revoked certificate to be updated to `keyCompromise` from any
+ // other reason if that compromise is demonstrated by making the second
+ // revocation request signed by the certificate keypair.
+ AllowReRevocation
+ // MozRevocationReasons causes the RA to enforce the following upcoming
+ // Mozilla policies regarding revocation:
+ // - A subscriber can request that their certificate be revoked with reason
+ // keyCompromise, even without demonstrating that compromise at the time.
+ // However, the cert's pubkey will not be added to the blocked keys list.
+ // - When an applicant other than the original subscriber requests that a
+ // certificate be revoked (by demonstrating control over all names in it),
+ // the cert will be revoked with reason cessationOfOperation, regardless of
+ // what revocation reason they request.
+ // - When anyone requests that a certificate be revoked by signing the request
+ // with the certificate's keypair, the cert will be revoked with reason
+ // keyCompromise, regardless of what revocation reason they request.
+ MozRevocationReasons
+ // OldTLSOutbound allows the VA to negotiate TLS 1.0 and TLS 1.1 during
+ // HTTPS redirects. When it is set to false, the VA will only connect to
+ // HTTPS servers that support TLS 1.2 or above.
+ OldTLSOutbound
+ // OldTLSInbound controls whether the WFE rejects inbound requests using
+ // TLS 1.0 and TLS 1.1. Because WFE does not terminate TLS in production,
+ // we rely on the TLS-Version header (set by our reverse proxy).
+ OldTLSInbound
+ // SHA1CSRs controls whether the /acme/finalize endpoint rejects CSRs that
+ // are self-signed using SHA1.
+ SHA1CSRs
+ // AllowUnrecognizedFeatures is internal to the features package: if true,
+ // skip error when unrecognized feature flag names are passed.
+ AllowUnrecognizedFeatures
+ // RejectDuplicateCSRExtensions enables verification that submitted CSRs do
+ // not contain duplicate extensions. This behavior will be on by default in
+ // go1.19.
+ RejectDuplicateCSRExtensions
+
+ // ROCSPStage1 enables querying Redis, live-signing response, and storing
+ // to Redis, but doesn't serve responses from Redis.
+ ROCSPStage1
+ // ROCSPStage2 enables querying Redis, live-signing a response, and storing
+ // to Redis, and does serve responses from Redis when appropriate (when
+ // they are fresh, and agree with MariaDB's status for the certificate).
+ ROCSPStage2
+)
+
+// List of features and their default value, protected by fMu
+var features = map[FeatureFlag]bool{
+ unused: false,
+ CAAValidationMethods: false,
+ CAAAccountURI: false,
+ EnforceMultiVA: false,
+ MultiVAFullResults: false,
+ MandatoryPOSTAsGET: false,
+ AllowV1Registration: true,
+ V1DisableNewValidations: false,
+ PrecertificateRevocation: false,
+ StripDefaultSchemePort: false,
+ StoreIssuerInfo: false,
+ StoreRevokerInfo: false,
+ RestrictRSAKeySizes: false,
+ FasterNewOrdersRateLimit: false,
+ NonCFSSLSigner: false,
+ ECDSAForAll: false,
+ StreamlineOrderAndAuthzs: false,
+ ServeRenewalInfo: false,
+ GetAuthzReadOnly: false,
+ GetAuthzUseIndex: false,
+ CheckFailedAuthorizationsFirst: false,
+ AllowReRevocation: false,
+ MozRevocationReasons: false,
+ OldTLSOutbound: true,
+ OldTLSInbound: true,
+ SHA1CSRs: true,
+ AllowUnrecognizedFeatures: false,
+ ExpirationMailerDontLookTwice: false,
+ RejectDuplicateCSRExtensions: false,
+ ROCSPStage1: false,
+ ROCSPStage2: false,
+}
+
+var fMu = new(sync.RWMutex)
+
+var initial = map[FeatureFlag]bool{}
+
+var nameToFeature = make(map[string]FeatureFlag, len(features))
+
+func init() {
+ for f, v := range features {
+ nameToFeature[f.String()] = f
+ initial[f] = v
+ }
+}
+
+// Set accepts a list of features and whether they should
+// be enabled or disabled. In the presence of unrecognized
+// flags, it will return an error or not depending on the
+// value of AllowUnrecognizedFeatures.
+func Set(featureSet map[string]bool) error {
+ fMu.Lock()
+ defer fMu.Unlock()
+ var unknown []string
+ for n, v := range featureSet {
+ f, present := nameToFeature[n]
+ if present {
+ features[f] = v
+ } else {
+ unknown = append(unknown, n)
+ }
+ }
+ if len(unknown) > 0 && !features[AllowUnrecognizedFeatures] {
+ return fmt.Errorf("unrecognized feature flag names: %s",
+ strings.Join(unknown, ", "))
+ }
+ return nil
+}
+
+// Enabled returns true if the feature is enabled or false
+// if it isn't, it will panic if passed a feature that it
+// doesn't know.
+func Enabled(n FeatureFlag) bool {
+ fMu.RLock()
+ defer fMu.RUnlock()
+ v, present := features[n]
+ if !present {
+ panic(fmt.Sprintf("feature '%s' doesn't exist", n.String()))
+ }
+ return v
+}
+
+// Reset resets the features to their initial state
+func Reset() {
+ fMu.Lock()
+ defer fMu.Unlock()
+ for k, v := range initial {
+ features[k] = v
+ }
+}
diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go
new file mode 100644
index 00000000000..acaab252277
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go
@@ -0,0 +1,98 @@
+package goodkey
+
+import (
+ "crypto"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "io/ioutil"
+
+ "github.com/letsencrypt/boulder/core"
+
+ yaml "gopkg.in/yaml.v3"
+)
+
+// blockedKeys is a type for maintaining a map of SHA256 hashes
+// of SubjectPublicKeyInfo's that should be considered blocked.
+// blockedKeys are created by using loadBlockedKeysList.
+type blockedKeys map[core.Sha256Digest]bool
+
+var ErrWrongDecodedSize = errors.New("not enough bytes decoded for sha256 hash")
+
+// blocked checks if the given public key is considered administratively
+// blocked based on a SHA256 hash of the SubjectPublicKeyInfo.
+// Important: blocked should not be called except on a blockedKeys instance
+// returned from loadBlockedKeysList.
+// function should not be used until after `loadBlockedKeysList` has returned.
+func (b blockedKeys) blocked(key crypto.PublicKey) (bool, error) {
+ hash, err := core.KeyDigest(key)
+ if err != nil {
+ // the bool result should be ignored when err is != nil but to be on the
+ // paranoid side return true anyway so that a key we can't compute the
+ // digest for will always be blocked even if a caller foolishly discards the
+ // err result.
+ return true, err
+ }
+ return b[hash], nil
+}
+
+// loadBlockedKeysList creates a blockedKeys object that can be used to check if
+// a key is blocked. It creates a lookup map from a list of
+// SHA256 hashes of SubjectPublicKeyInfo's in the input YAML file
+// with the expected format:
+//
+// ```
+// blocked:
+// - cuwGhNNI6nfob5aqY90e7BleU6l7rfxku4X3UTJ3Z7M=
+//
+// - Qebc1V3SkX3izkYRGNJilm9Bcuvf0oox4U2Rn+b4JOE=
+// ```
+//
+// If no hashes are found in the input YAML an error is returned.
+func loadBlockedKeysList(filename string) (*blockedKeys, error) {
+ yamlBytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ var list struct {
+ BlockedHashes []string `yaml:"blocked"`
+ BlockedHashesHex []string `yaml:"blockedHashesHex"`
+ }
+ err = yaml.Unmarshal(yamlBytes, &list)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(list.BlockedHashes) == 0 && len(list.BlockedHashesHex) == 0 {
+ return nil, errors.New("no blocked hashes in YAML")
+ }
+
+ blockedKeys := make(blockedKeys, len(list.BlockedHashes)+len(list.BlockedHashesHex))
+ for _, b64Hash := range list.BlockedHashes {
+ decoded, err := base64.StdEncoding.DecodeString(b64Hash)
+ if err != nil {
+ return nil, err
+ }
+ if len(decoded) != sha256.Size {
+ return nil, ErrWrongDecodedSize
+ }
+ var sha256Digest core.Sha256Digest
+ copy(sha256Digest[:], decoded[0:sha256.Size])
+ blockedKeys[sha256Digest] = true
+ }
+ for _, hexHash := range list.BlockedHashesHex {
+ decoded, err := hex.DecodeString(hexHash)
+ if err != nil {
+ return nil, err
+ }
+ if len(decoded) != sha256.Size {
+ return nil, ErrWrongDecodedSize
+ }
+ var sha256Digest core.Sha256Digest
+ copy(sha256Digest[:], decoded[0:sha256.Size])
+ blockedKeys[sha256Digest] = true
+ }
+ return &blockedKeys, nil
+}
diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go
new file mode 100644
index 00000000000..b751c376cd1
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go
@@ -0,0 +1,432 @@
+package goodkey
+
+import (
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "math/big"
+ "sync"
+
+ "github.com/letsencrypt/boulder/core"
+ berrors "github.com/letsencrypt/boulder/errors"
+ "github.com/letsencrypt/boulder/features"
+ sapb "github.com/letsencrypt/boulder/sa/proto"
+ "google.golang.org/grpc"
+
+ "github.com/titanous/rocacheck"
+)
+
+// To generate, run: primes 2 752 | tr '\n' ,
+var smallPrimeInts = []int64{
+ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47,
+ 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107,
+ 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167,
+ 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
+ 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283,
+ 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359,
+ 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,
+ 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491,
+ 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571,
+ 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641,
+ 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709,
+ 719, 727, 733, 739, 743, 751,
+}
+
+// singleton defines the object of a Singleton pattern
+var (
+ smallPrimesSingleton sync.Once
+ smallPrimesProduct *big.Int
+)
+
+type Config struct {
+ // WeakKeyFile is the path to a JSON file containing truncated modulus hashes
+ // of known weak RSA keys. If this config value is empty, then RSA modulus
+ // hash checking will be disabled.
+ WeakKeyFile string
+ // BlockedKeyFile is the path to a YAML file containing base64-encoded SHA256
+ // hashes of PKIX Subject Public Keys that should be blocked. If this config
+ // value is empty, then blocked key checking will be disabled.
+ BlockedKeyFile string
+ // FermatRounds is an integer number of rounds of Fermat's factorization
+ // method that should be performed to attempt to detect keys whose modulus can
+ // be trivially factored because the two factors are very close to each other.
+ // If this config value is empty (0), no factorization will be attempted.
+ FermatRounds int
+}
+
+// ErrBadKey represents an error with a key. It is distinct from the various
+// ways in which an ACME request can have an erroneous key (BadPublicKeyError,
+// BadCSRError) because this library is used to check both JWS signing keys and
+// keys in CSRs.
+var ErrBadKey = errors.New("")
+
+func badKey(msg string, args ...interface{}) error {
+ return fmt.Errorf("%w%s", ErrBadKey, fmt.Errorf(msg, args...))
+}
+
+// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey method to KeyPolicy,
+// rather than storing a full sa.SQLStorageAuthority. This makes testing
+// significantly simpler.
+type BlockedKeyCheckFunc func(context.Context, *sapb.KeyBlockedRequest, ...grpc.CallOption) (*sapb.Exists, error)
+
+// KeyPolicy determines which types of key may be used with various boulder
+// operations.
+type KeyPolicy struct {
+ AllowRSA bool // Whether RSA keys should be allowed.
+ AllowECDSANISTP256 bool // Whether ECDSA NISTP256 keys should be allowed.
+ AllowECDSANISTP384 bool // Whether ECDSA NISTP384 keys should be allowed.
+ weakRSAList *WeakRSAKeys
+ blockedList *blockedKeys
+ fermatRounds int
+ dbCheck BlockedKeyCheckFunc
+}
+
+// NewKeyPolicy returns a KeyPolicy that allows RSA, ECDSA256 and ECDSA384.
+// weakKeyFile contains the path to a JSON file containing truncated modulus
+// hashes of known weak RSA keys. If this argument is empty RSA modulus hash
+// checking will be disabled. blockedKeyFile contains the path to a YAML file
+// containing Base64 encoded SHA256 hashes of pkix subject public keys that
+// should be blocked. If this argument is empty then no blocked key checking is
+// performed.
+func NewKeyPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) {
+ kp := KeyPolicy{
+ AllowRSA: true,
+ AllowECDSANISTP256: true,
+ AllowECDSANISTP384: true,
+ dbCheck: bkc,
+ }
+ if config.WeakKeyFile != "" {
+ keyList, err := LoadWeakRSASuffixes(config.WeakKeyFile)
+ if err != nil {
+ return KeyPolicy{}, err
+ }
+ kp.weakRSAList = keyList
+ }
+ if config.BlockedKeyFile != "" {
+ blocked, err := loadBlockedKeysList(config.BlockedKeyFile)
+ if err != nil {
+ return KeyPolicy{}, err
+ }
+ kp.blockedList = blocked
+ }
+ if config.FermatRounds < 0 {
+ return KeyPolicy{}, fmt.Errorf("Fermat factorization rounds cannot be negative: %d", config.FermatRounds)
+ }
+ kp.fermatRounds = config.FermatRounds
+ return kp, nil
+}
+
+// GoodKey returns true if the key is acceptable for both TLS use and account
+// key use (our requirements are the same for either one), according to basic
+// strength and algorithm checking. GoodKey only supports pointers: *rsa.PublicKey
+// and *ecdsa.PublicKey. It will reject non-pointer types.
+// TODO: Support JSONWebKeys once go-jose migration is done.
+func (policy *KeyPolicy) GoodKey(ctx context.Context, key crypto.PublicKey) error {
+ // Early rejection of unacceptable key types to guard subsequent checks.
+ switch t := key.(type) {
+ case *rsa.PublicKey, *ecdsa.PublicKey:
+ break
+ default:
+ return badKey("unsupported key type %T", t)
+ }
+ // If there is a blocked list configured then check if the public key is one
+ // that has been administratively blocked.
+ if policy.blockedList != nil {
+ if blocked, err := policy.blockedList.blocked(key); err != nil {
+ return berrors.InternalServerError("error checking blocklist for key: %v", key)
+ } else if blocked {
+ return badKey("public key is forbidden")
+ }
+ }
+ if policy.dbCheck != nil {
+ digest, err := core.KeyDigest(key)
+ if err != nil {
+ return badKey("%w", err)
+ }
+ exists, err := policy.dbCheck(ctx, &sapb.KeyBlockedRequest{KeyHash: digest[:]})
+ if err != nil {
+ return err
+ } else if exists.Exists {
+ return badKey("public key is forbidden")
+ }
+ }
+ switch t := key.(type) {
+ case *rsa.PublicKey:
+ return policy.goodKeyRSA(t)
+ case *ecdsa.PublicKey:
+ return policy.goodKeyECDSA(t)
+ default:
+ return badKey("unsupported key type %T", key)
+ }
+}
+
+// GoodKeyECDSA determines if an ECDSA pubkey meets our requirements
+func (policy *KeyPolicy) goodKeyECDSA(key *ecdsa.PublicKey) (err error) {
+ // Check the curve.
+ //
+ // The validity of the curve is an assumption for all following tests.
+ err = policy.goodCurve(key.Curve)
+ if err != nil {
+ return err
+ }
+
+ // Key validation routine adapted from NIST SP800-56A § 5.6.2.3.2.
+ //
+ //
+ // Assuming a prime field since a) we are only allowing such curves and b)
+ // crypto/elliptic only supports prime curves. Where this assumption
+ // simplifies the code below, it is explicitly stated and explained. If ever
+ // adapting this code to support non-prime curves, refer to NIST SP800-56A §
+ // 5.6.2.3.2 and adapt this code appropriately.
+ params := key.Params()
+
+ // SP800-56A § 5.6.2.3.2 Step 1.
+ // Partial check of the public key for an invalid range in the EC group:
+ // Verify that key is not the point at infinity O.
+ // This code assumes that the point at infinity is (0,0), which is the
+ // case for all supported curves.
+ if isPointAtInfinityNISTP(key.X, key.Y) {
+ return badKey("key x, y must not be the point at infinity")
+ }
+
+ // SP800-56A § 5.6.2.3.2 Step 2.
+ // "Verify that x_Q and y_Q are integers in the interval [0,p-1] in the
+ // case that q is an odd prime p, or that x_Q and y_Q are bit strings
+ // of length m bits in the case that q = 2**m."
+ //
+ // Prove prime field: ASSUMED.
+ // Prove q != 2: ASSUMED. (Curve parameter. No supported curve has q == 2.)
+ // Prime field && q != 2 => q is an odd prime p
+ // Therefore "verify that x, y are in [0, p-1]" satisfies step 2.
+ //
+ // Therefore verify that both x and y of the public key point have the unique
+ // correct representation of an element in the underlying field by verifying
+ // that x and y are integers in [0, p-1].
+ if key.X.Sign() < 0 || key.Y.Sign() < 0 {
+ return badKey("key x, y must not be negative")
+ }
+
+ if key.X.Cmp(params.P) >= 0 || key.Y.Cmp(params.P) >= 0 {
+ return badKey("key x, y must not exceed P-1")
+ }
+
+ // SP800-56A § 5.6.2.3.2 Step 3.
+ // "If q is an odd prime p, verify that (y_Q)**2 === (x_Q)***3 + a*x_Q + b (mod p).
+ // If q = 2**m, verify that (y_Q)**2 + (x_Q)*(y_Q) == (x_Q)**3 + a*(x_Q)*2 + b in
+ // the finite field of size 2**m.
+ // (Ensures that the public key is on the correct elliptic curve.)"
+ //
+ // q is an odd prime p: proven/assumed above.
+ // a = -3 for all supported curves.
+ //
+ // Therefore step 3 is satisfied simply by showing that
+ // y**2 === x**3 - 3*x + B (mod P).
+ //
+ // This proves that the public key is on the correct elliptic curve.
+ // But in practice, this test is provided by crypto/elliptic, so use that.
+ if !key.Curve.IsOnCurve(key.X, key.Y) {
+ return badKey("key point is not on the curve")
+ }
+
+ // SP800-56A § 5.6.2.3.2 Step 4.
+ // "Verify that n*Q == Ø.
+ // (Ensures that the public key has the correct order. Along with check 1,
+ // ensures that the public key is in the correct range in the correct EC
+ // subgroup, that is, it is in the correct EC subgroup and is not the
+ // identity element.)"
+ //
+ // Ensure that public key has the correct order:
+ // verify that n*Q = Ø.
+ //
+ // n*Q = Ø iff n*Q is the point at infinity (see step 1).
+ ox, oy := key.Curve.ScalarMult(key.X, key.Y, params.N.Bytes())
+ if !isPointAtInfinityNISTP(ox, oy) {
+ return badKey("public key does not have correct order")
+ }
+
+ // End of SP800-56A § 5.6.2.3.2 Public Key Validation Routine.
+ // Key is valid.
+ return nil
+}
+
+// Returns true iff the point (x,y) on NIST P-256, NIST P-384 or NIST P-521 is
+// the point at infinity. These curves all have the same point at infinity
+// (0,0). This function must ONLY be used on points on curves verified to have
+// (0,0) as their point at infinity.
+func isPointAtInfinityNISTP(x, y *big.Int) bool {
+ return x.Sign() == 0 && y.Sign() == 0
+}
+
+// GoodCurve determines if an elliptic curve meets our requirements.
+func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) {
+ // Simply use a whitelist for now.
+ params := c.Params()
+ switch {
+ case policy.AllowECDSANISTP256 && params == elliptic.P256().Params():
+ return nil
+ case policy.AllowECDSANISTP384 && params == elliptic.P384().Params():
+ return nil
+ default:
+ return badKey("ECDSA curve %v not allowed", params.Name)
+ }
+}
+
+var acceptableRSAKeySizes = map[int]bool{
+ 2048: true,
+ 3072: true,
+ 4096: true,
+}
+
+// GoodKeyRSA determines if a RSA pubkey meets our requirements
+func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) {
+ if !policy.AllowRSA {
+ return badKey("RSA keys are not allowed")
+ }
+ if policy.weakRSAList != nil && policy.weakRSAList.Known(key) {
+ return badKey("key is on a known weak RSA key list")
+ }
+
+ // Baseline Requirements Appendix A
+ // Modulus must be >= 2048 bits and <= 4096 bits
+ modulus := key.N
+ modulusBitLen := modulus.BitLen()
+ if features.Enabled(features.RestrictRSAKeySizes) {
+ if !acceptableRSAKeySizes[modulusBitLen] {
+ return badKey("key size not supported: %d", modulusBitLen)
+ }
+ } else {
+ const maxKeySize = 4096
+ if modulusBitLen < 2048 {
+ return badKey("key too small: %d", modulusBitLen)
+ }
+ if modulusBitLen > maxKeySize {
+ return badKey("key too large: %d > %d", modulusBitLen, maxKeySize)
+ }
+ // Bit lengths that are not a multiple of 8 may cause problems on some
+ // client implementations.
+ if modulusBitLen%8 != 0 {
+ return badKey("key length wasn't a multiple of 8: %d", modulusBitLen)
+ }
+ }
+
+ // Rather than support arbitrary exponents, which significantly increases
+ // the size of the key space we allow, we restrict E to the defacto standard
+ // RSA exponent 65537. There is no specific standards document that specifies
+ // 65537 as the 'best' exponent, but ITU X.509 Annex C suggests there are
+ // notable merits for using it if using a fixed exponent.
+ //
+ // The CABF Baseline Requirements state:
+ // The CA SHALL confirm that the value of the public exponent is an
+ // odd number equal to 3 or more. Additionally, the public exponent
+ // SHOULD be in the range between 2^16 + 1 and 2^256-1.
+ //
+ // By only allowing one exponent, which fits these constraints, we satisfy
+ // these requirements.
+ if key.E != 65537 {
+ return badKey("key exponent must be 65537")
+ }
+
+ // The modulus SHOULD also have the following characteristics: an odd
+ // number, not the power of a prime, and have no factors smaller than 752.
+ // TODO: We don't yet check for "power of a prime."
+ if checkSmallPrimes(modulus) {
+ return badKey("key divisible by small prime")
+ }
+ // Check for weak keys generated by Infineon hardware
+ // (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17)
+ if rocacheck.IsWeak(key) {
+ return badKey("key generated by vulnerable Infineon-based hardware")
+ }
+ // Check if the key can be easily factored via Fermat's factorization method.
+ if policy.fermatRounds > 0 {
+ err := checkPrimeFactorsTooClose(modulus, policy.fermatRounds)
+ if err != nil {
+ return badKey("key generated with factors too close together: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// Returns true iff integer i is divisible by any of the primes in smallPrimes.
+//
+// Short circuits; execution time is dependent on i. Do not use this on secret
+// values.
+//
+// Rather than checking each prime individually (invoking Mod on each),
+// multiply the primes together and let GCD do our work for us: if the
+// GCD between and is not one, we know we have
+// a bad key. This is substantially faster than checking each prime
+// individually.
+func checkSmallPrimes(i *big.Int) bool {
+ smallPrimesSingleton.Do(func() {
+ smallPrimesProduct = big.NewInt(1)
+ for _, prime := range smallPrimeInts {
+ smallPrimesProduct.Mul(smallPrimesProduct, big.NewInt(prime))
+ }
+ })
+
+ // When the GCD is 1, i and smallPrimesProduct are coprime, meaning they
+ // share no common factors. When the GCD is not one, it is the product of
+ // all common factors, meaning we've identified at least one small prime
+ // which invalidates i as a valid key.
+
+ var result big.Int
+ result.GCD(nil, nil, i, smallPrimesProduct)
+ return result.Cmp(big.NewInt(1)) != 0
+}
+
+// Returns an error if the modulus n is able to be factored into primes p and q
+// via Fermat's factorization method. This method relies on the two primes being
+// very close together, which means that they were almost certainly not picked
+// independently from a uniform random distribution. Basically, if we can factor
+// the key this easily, so can anyone else.
+func checkPrimeFactorsTooClose(n *big.Int, rounds int) error {
+ // Pre-allocate some big numbers that we'll use a lot down below.
+ one := big.NewInt(1)
+ bb := new(big.Int)
+
+ // Any odd integer is equal to a difference of squares of integers:
+ // n = a^2 - b^2 = (a + b)(a - b)
+ // Any RSA public key modulus is equal to a product of two primes:
+ // n = pq
+ // Here we try to find values for a and b, since doing so also gives us the
+ // prime factors p = (a + b) and q = (a - b).
+
+ // We start with a close to the square root of the modulus n, to start with
+ // two candidate prime factors that are as close together as possible and
+ // work our way out from there. Specifically, we set a = ceil(sqrt(n)), the
+ // first integer greater than the square root of n. Unfortunately, big.Int's
+ // built-in square root function takes the floor, so we have to add one to get
+ // the ceil.
+ a := new(big.Int)
+ a.Sqrt(n).Add(a, one)
+
+ // We calculate b2 to see if it is a perfect square (i.e. b^2), and therefore
+ // b is an integer. Specifically, b2 = a^2 - n.
+ b2 := new(big.Int)
+ b2.Mul(a, a).Sub(b2, n)
+
+ for i := 0; i < rounds; i++ {
+ // To see if b2 is a perfect square, we take its square root, square that,
+ // and check to see if we got the same result back.
+ bb.Sqrt(b2).Mul(bb, bb)
+ if b2.Cmp(bb) == 0 {
+ // b2 is a perfect square, so we've found integer values of a and b,
+ // and can easily compute p and q as their sum and difference.
+ bb.Sqrt(bb)
+ p := new(big.Int).Add(a, bb)
+ q := new(big.Int).Sub(a, bb)
+ return fmt.Errorf("public modulus n = pq factored into p: %s; q: %s", p, q)
+ }
+
+ // Set up the next iteration by incrementing a by one and recalculating b2.
+ a.Add(a, one)
+ b2.Mul(a, a).Sub(b2, n)
+ }
+ return nil
+}
diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/weak.go b/vendor/github.com/letsencrypt/boulder/goodkey/weak.go
new file mode 100644
index 00000000000..4a63af09a0a
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/goodkey/weak.go
@@ -0,0 +1,66 @@
+package goodkey
+
+// This file defines a basic method for testing if a given RSA public key is on one of
+// the Debian weak key lists and is therefore considered compromised. Instead of
+// directly loading the hash suffixes from the individual lists we flatten them all
+// into a single JSON list using cmd/weak-key-flatten for ease of use.
+
+import (
+ "crypto/rsa"
+ "crypto/sha1"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+)
+
+type truncatedHash [10]byte
+
+type WeakRSAKeys struct {
+ suffixes map[truncatedHash]struct{}
+}
+
+func LoadWeakRSASuffixes(path string) (*WeakRSAKeys, error) {
+ f, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ var suffixList []string
+ err = json.Unmarshal(f, &suffixList)
+ if err != nil {
+ return nil, err
+ }
+
+ wk := &WeakRSAKeys{suffixes: make(map[truncatedHash]struct{})}
+ for _, suffix := range suffixList {
+ err := wk.addSuffix(suffix)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return wk, nil
+}
+
+func (wk *WeakRSAKeys) addSuffix(str string) error {
+ var suffix truncatedHash
+ decoded, err := hex.DecodeString(str)
+ if err != nil {
+ return err
+ }
+ if len(decoded) != 10 {
+ return fmt.Errorf("unexpected suffix length of %d", len(decoded))
+ }
+ copy(suffix[:], decoded)
+ wk.suffixes[suffix] = struct{}{}
+ return nil
+}
+
+func (wk *WeakRSAKeys) Known(key *rsa.PublicKey) bool {
+ // Hash input is in the format "Modulus={upper-case hex of modulus}\n"
+ hash := sha1.Sum([]byte(fmt.Sprintf("Modulus=%X\n", key.N.Bytes())))
+ var suffix truncatedHash
+ copy(suffix[:], hash[10:])
+ _, present := wk.suffixes[suffix]
+ return present
+}
diff --git a/vendor/github.com/letsencrypt/boulder/identifier/identifier.go b/vendor/github.com/letsencrypt/boulder/identifier/identifier.go
new file mode 100644
index 00000000000..cbf228f869f
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/identifier/identifier.go
@@ -0,0 +1,32 @@
+// The identifier package defines types for RFC 8555 ACME identifiers.
+package identifier
+
+// IdentifierType is a named string type for registered ACME identifier types.
+// See https://tools.ietf.org/html/rfc8555#section-9.7.7
+type IdentifierType string
+
+const (
+ // DNS is specified in RFC 8555 for DNS type identifiers.
+ DNS = IdentifierType("dns")
+)
+
+// ACMEIdentifier is a struct encoding an identifier that can be validated. The
+// protocol allows for different types of identifier to be supported (DNS
+// names, IP addresses, etc.), but currently we only support RFC 8555 DNS type
+// identifiers for domain names.
+type ACMEIdentifier struct {
+ // Type is the registered IdentifierType of the identifier.
+ Type IdentifierType `json:"type"`
+ // Value is the value of the identifier. For a DNS type identifier it is
+ // a domain name.
+ Value string `json:"value"`
+}
+
+// DNSIdentifier is a convenience function for creating an ACMEIdentifier with
+// Type DNS for a given domain name.
+func DNSIdentifier(domain string) ACMEIdentifier {
+ return ACMEIdentifier{
+ Type: DNS,
+ Value: domain,
+ }
+}
diff --git a/vendor/github.com/letsencrypt/boulder/probs/probs.go b/vendor/github.com/letsencrypt/boulder/probs/probs.go
new file mode 100644
index 00000000000..3736e8d391e
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/probs/probs.go
@@ -0,0 +1,349 @@
+package probs
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/letsencrypt/boulder/identifier"
+)
+
+// Error types that can be used in ACME payloads
+const (
+ ConnectionProblem = ProblemType("connection")
+ MalformedProblem = ProblemType("malformed")
+ ServerInternalProblem = ProblemType("serverInternal")
+ TLSProblem = ProblemType("tls")
+ UnauthorizedProblem = ProblemType("unauthorized")
+ RateLimitedProblem = ProblemType("rateLimited")
+ BadNonceProblem = ProblemType("badNonce")
+ InvalidEmailProblem = ProblemType("invalidEmail")
+ RejectedIdentifierProblem = ProblemType("rejectedIdentifier")
+ AccountDoesNotExistProblem = ProblemType("accountDoesNotExist")
+ CAAProblem = ProblemType("caa")
+ DNSProblem = ProblemType("dns")
+ AlreadyRevokedProblem = ProblemType("alreadyRevoked")
+ OrderNotReadyProblem = ProblemType("orderNotReady")
+ BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm")
+ BadPublicKeyProblem = ProblemType("badPublicKey")
+ BadRevocationReasonProblem = ProblemType("badRevocationReason")
+ BadCSRProblem = ProblemType("badCSR")
+
+ V1ErrorNS = "urn:acme:error:"
+ V2ErrorNS = "urn:ietf:params:acme:error:"
+)
+
+// ProblemType defines the error types in the ACME protocol
+type ProblemType string
+
+// ProblemDetails objects represent problem documents
+// https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00
+type ProblemDetails struct {
+ Type ProblemType `json:"type,omitempty"`
+ Detail string `json:"detail,omitempty"`
+ // HTTPStatus is the HTTP status code the ProblemDetails should probably be sent
+ // as.
+ HTTPStatus int `json:"status,omitempty"`
+ // SubProblems are optional additional per-identifier problems. See
+ // RFC 8555 Section 6.7.1: https://tools.ietf.org/html/rfc8555#section-6.7.1
+ SubProblems []SubProblemDetails `json:"subproblems,omitempty"`
+}
+
+// SubProblemDetails represents sub-problems specific to an identifier that are
+// related to a top-level ProblemDetails.
+// See RFC 8555 Section 6.7.1: https://tools.ietf.org/html/rfc8555#section-6.7.1
+type SubProblemDetails struct {
+ ProblemDetails
+ Identifier identifier.ACMEIdentifier `json:"identifier"`
+}
+
+func (pd *ProblemDetails) Error() string {
+ return fmt.Sprintf("%s :: %s", pd.Type, pd.Detail)
+}
+
+// WithSubProblems returns a new ProblemsDetails instance created by adding the
+// provided subProbs to the existing ProblemsDetail.
+func (pd *ProblemDetails) WithSubProblems(subProbs []SubProblemDetails) *ProblemDetails {
+ return &ProblemDetails{
+ Type: pd.Type,
+ Detail: pd.Detail,
+ HTTPStatus: pd.HTTPStatus,
+ SubProblems: append(pd.SubProblems, subProbs...),
+ }
+}
+
+// statusTooManyRequests is the HTTP status code meant for rate limiting
+// errors. It's not currently in the net/http library so we add it here.
+const statusTooManyRequests = 429
+
+// ProblemDetailsToStatusCode inspects the given ProblemDetails to figure out
+// what HTTP status code it should represent. It should only be used by the WFE
+// but is included in this package because of its reliance on ProblemTypes.
+func ProblemDetailsToStatusCode(prob *ProblemDetails) int {
+ if prob.HTTPStatus != 0 {
+ return prob.HTTPStatus
+ }
+ switch prob.Type {
+ case
+ ConnectionProblem,
+ MalformedProblem,
+ BadSignatureAlgorithmProblem,
+ BadPublicKeyProblem,
+ TLSProblem,
+ BadNonceProblem,
+ InvalidEmailProblem,
+ RejectedIdentifierProblem,
+ AccountDoesNotExistProblem,
+ BadRevocationReasonProblem:
+ return http.StatusBadRequest
+ case ServerInternalProblem:
+ return http.StatusInternalServerError
+ case
+ UnauthorizedProblem,
+ CAAProblem:
+ return http.StatusForbidden
+ case RateLimitedProblem:
+ return statusTooManyRequests
+ default:
+ return http.StatusInternalServerError
+ }
+}
+
+// BadNonce returns a ProblemDetails with a BadNonceProblem and a 400 Bad
+// Request status code.
+func BadNonce(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: BadNonceProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad
+// Request status code.
+func RejectedIdentifier(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: RejectedIdentifierProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict
+// status code.
+func Conflict(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: MalformedProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusConflict,
+ }
+}
+
+// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad
+// Request status code.
+func AlreadyRevoked(detail string, a ...interface{}) *ProblemDetails {
+ return &ProblemDetails{
+ Type: AlreadyRevokedProblem,
+ Detail: fmt.Sprintf(detail, a...),
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad
+// Request status code.
+func Malformed(detail string, args ...interface{}) *ProblemDetails {
+ if len(args) > 0 {
+ detail = fmt.Sprintf(detail, args...)
+ }
+ return &ProblemDetails{
+ Type: MalformedProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request
+// Timeout status code.
+func Canceled(detail string, args ...interface{}) *ProblemDetails {
+ if len(args) > 0 {
+ detail = fmt.Sprintf(detail, args...)
+ }
+ return &ProblemDetails{
+ Type: MalformedProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusRequestTimeout,
+ }
+}
+
+// BadSignatureAlgorithm returns a ProblemDetails with a BadSignatureAlgorithmProblem
+// and a 400 Bad Request status code.
+func BadSignatureAlgorithm(detail string, a ...interface{}) *ProblemDetails {
+ return &ProblemDetails{
+ Type: BadSignatureAlgorithmProblem,
+ Detail: fmt.Sprintf(detail, a...),
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad
+// Request status code.
+func BadPublicKey(detail string, a ...interface{}) *ProblemDetails {
+ return &ProblemDetails{
+ Type: BadPublicKeyProblem,
+ Detail: fmt.Sprintf(detail, a...),
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found
+// status code.
+func NotFound(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: MalformedProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusNotFound,
+ }
+}
+
+// ServerInternal returns a ProblemDetails with a ServerInternalProblem and a
+// 500 Internal Server Failure status code.
+func ServerInternal(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: ServerInternalProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusInternalServerError,
+ }
+}
+
+// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403
+// Forbidden status code.
+func Unauthorized(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: UnauthorizedProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusForbidden,
+ }
+}
+
+// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP
+// method error.
+func MethodNotAllowed() *ProblemDetails {
+ return &ProblemDetails{
+ Type: MalformedProblem,
+ Detail: "Method not allowed",
+ HTTPStatus: http.StatusMethodNotAllowed,
+ }
+}
+
+// ContentLengthRequired returns a ProblemDetails representing a missing
+// Content-Length header error
+func ContentLengthRequired() *ProblemDetails {
+ return &ProblemDetails{
+ Type: MalformedProblem,
+ Detail: "missing Content-Length header",
+ HTTPStatus: http.StatusLengthRequired,
+ }
+}
+
+// InvalidContentType returns a ProblemDetails suitable for a missing
+// ContentType header, or an incorrect ContentType header
+func InvalidContentType(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: MalformedProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusUnsupportedMediaType,
+ }
+}
+
+// InvalidEmail returns a ProblemDetails representing an invalid email address
+// error
+func InvalidEmail(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: InvalidEmailProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// ConnectionFailure returns a ProblemDetails representing a ConnectionProblem
+// error
+func ConnectionFailure(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: ConnectionProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// RateLimited returns a ProblemDetails representing a RateLimitedProblem error
+func RateLimited(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: RateLimitedProblem,
+ Detail: detail,
+ HTTPStatus: statusTooManyRequests,
+ }
+}
+
+// TLSError returns a ProblemDetails representing a TLSProblem error
+func TLSError(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: TLSProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// AccountDoesNotExist returns a ProblemDetails representing an
+// AccountDoesNotExistProblem error
+func AccountDoesNotExist(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: AccountDoesNotExistProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// CAA returns a ProblemDetails representing a CAAProblem
+func CAA(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: CAAProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusForbidden,
+ }
+}
+
+// DNS returns a ProblemDetails representing a DNSProblem
+func DNS(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: DNSProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem
+func OrderNotReady(detail string, a ...interface{}) *ProblemDetails {
+ return &ProblemDetails{
+ Type: OrderNotReadyProblem,
+ Detail: fmt.Sprintf(detail, a...),
+ HTTPStatus: http.StatusForbidden,
+ }
+}
+
+// BadRevocationReason returns a ProblemDetails representing
+// a BadRevocationReasonProblem
+func BadRevocationReason(detail string, a ...interface{}) *ProblemDetails {
+ return &ProblemDetails{
+ Type: BadRevocationReasonProblem,
+ Detail: fmt.Sprintf(detail, a...),
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
+
+// BadCSR returns a ProblemDetails representing a BadCSRProblem.
+func BadCSR(detail string, a ...interface{}) *ProblemDetails {
+ return &ProblemDetails{
+ Type: BadCSRProblem,
+ Detail: fmt.Sprintf(detail, a...),
+ HTTPStatus: http.StatusBadRequest,
+ }
+}
diff --git a/vendor/github.com/letsencrypt/boulder/revocation/reasons.go b/vendor/github.com/letsencrypt/boulder/revocation/reasons.go
new file mode 100644
index 00000000000..50f556be011
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/revocation/reasons.go
@@ -0,0 +1,72 @@
+package revocation
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "golang.org/x/crypto/ocsp"
+)
+
+// Reason is used to specify a certificate revocation reason
+type Reason int
+
+// ReasonToString provides a map from reason code to string
+var ReasonToString = map[Reason]string{
+ ocsp.Unspecified: "unspecified",
+ ocsp.KeyCompromise: "keyCompromise",
+ ocsp.CACompromise: "cACompromise",
+ ocsp.AffiliationChanged: "affiliationChanged",
+ ocsp.Superseded: "superseded",
+ ocsp.CessationOfOperation: "cessationOfOperation",
+ ocsp.CertificateHold: "certificateHold",
+ // 7 is unused
+ ocsp.RemoveFromCRL: "removeFromCRL",
+ ocsp.PrivilegeWithdrawn: "privilegeWithdrawn",
+ ocsp.AACompromise: "aAcompromise",
+}
+
+// UserAllowedReasons contains the subset of Reasons which users are
+// allowed to use
+var UserAllowedReasons = map[Reason]struct{}{
+ ocsp.Unspecified: {},
+ ocsp.KeyCompromise: {},
+ ocsp.Superseded: {},
+ ocsp.CessationOfOperation: {},
+}
+
+// AdminAllowedReasons contains the subset of Reasons which admins are allowed
+// to use. Reasons not found here will soon be forbidden from appearing in CRLs
+// or OCSP responses by root programs.
+var AdminAllowedReasons = map[Reason]struct{}{
+ ocsp.Unspecified: {},
+ ocsp.KeyCompromise: {},
+ ocsp.Superseded: {},
+ ocsp.CessationOfOperation: {},
+ ocsp.PrivilegeWithdrawn: {},
+}
+
+// UserAllowedReasonsMessage contains a string describing a list of user allowed
+// revocation reasons. This is useful when a revocation is rejected because it
+// is not a valid user supplied reason and the allowed values must be
+// communicated. This variable is populated during package initialization.
+var UserAllowedReasonsMessage = ""
+
+func init() {
+ // Build a slice of ints from the allowed reason codes.
+ // We want a slice because iterating `UserAllowedReasons` will change order
+ // and make the message unpredictable and cumbersome for unit testing.
+ // We use []ints instead of []Reason to use `sort.Ints` without fuss.
+ var allowed []int
+ for reason := range UserAllowedReasons {
+ allowed = append(allowed, int(reason))
+ }
+ sort.Ints(allowed)
+
+ var reasonStrings []string
+ for _, reason := range allowed {
+ reasonStrings = append(reasonStrings, fmt.Sprintf("%s (%d)",
+ ReasonToString[Reason(reason)], reason))
+ }
+ UserAllowedReasonsMessage = strings.Join(reasonStrings, ", ")
+}
diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go
new file mode 100644
index 00000000000..6bb28e0f299
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go
@@ -0,0 +1,3895 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.0
+// protoc v3.20.1
+// source: sa.proto
+
+package proto
+
+import (
+ proto "github.com/letsencrypt/boulder/core/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type RegistrationID struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (x *RegistrationID) Reset() {
+ *x = RegistrationID{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegistrationID) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegistrationID) ProtoMessage() {}
+
+func (x *RegistrationID) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegistrationID.ProtoReflect.Descriptor instead.
+func (*RegistrationID) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *RegistrationID) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+type JSONWebKey struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Jwk []byte `protobuf:"bytes,1,opt,name=jwk,proto3" json:"jwk,omitempty"`
+}
+
+func (x *JSONWebKey) Reset() {
+ *x = JSONWebKey{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *JSONWebKey) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONWebKey) ProtoMessage() {}
+
+func (x *JSONWebKey) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use JSONWebKey.ProtoReflect.Descriptor instead.
+func (*JSONWebKey) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *JSONWebKey) GetJwk() []byte {
+ if x != nil {
+ return x.Jwk
+ }
+ return nil
+}
+
+type AuthorizationID struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (x *AuthorizationID) Reset() {
+ *x = AuthorizationID{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AuthorizationID) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AuthorizationID) ProtoMessage() {}
+
+func (x *AuthorizationID) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AuthorizationID.ProtoReflect.Descriptor instead.
+func (*AuthorizationID) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *AuthorizationID) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+type GetPendingAuthorizationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ IdentifierType string `protobuf:"bytes,2,opt,name=identifierType,proto3" json:"identifierType,omitempty"`
+ IdentifierValue string `protobuf:"bytes,3,opt,name=identifierValue,proto3" json:"identifierValue,omitempty"`
+ // Result must be valid until at least this Unix timestamp (nanos)
+ ValidUntil int64 `protobuf:"varint,4,opt,name=validUntil,proto3" json:"validUntil,omitempty"`
+}
+
+func (x *GetPendingAuthorizationRequest) Reset() {
+ *x = GetPendingAuthorizationRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetPendingAuthorizationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetPendingAuthorizationRequest) ProtoMessage() {}
+
+func (x *GetPendingAuthorizationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetPendingAuthorizationRequest.ProtoReflect.Descriptor instead.
+func (*GetPendingAuthorizationRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *GetPendingAuthorizationRequest) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *GetPendingAuthorizationRequest) GetIdentifierType() string {
+ if x != nil {
+ return x.IdentifierType
+ }
+ return ""
+}
+
+func (x *GetPendingAuthorizationRequest) GetIdentifierValue() string {
+ if x != nil {
+ return x.IdentifierValue
+ }
+ return ""
+}
+
+func (x *GetPendingAuthorizationRequest) GetValidUntil() int64 {
+ if x != nil {
+ return x.ValidUntil
+ }
+ return 0
+}
+
+type GetValidAuthorizationsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"`
+ Now int64 `protobuf:"varint,3,opt,name=now,proto3" json:"now,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *GetValidAuthorizationsRequest) Reset() {
+ *x = GetValidAuthorizationsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetValidAuthorizationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetValidAuthorizationsRequest) ProtoMessage() {}
+
+func (x *GetValidAuthorizationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetValidAuthorizationsRequest.ProtoReflect.Descriptor instead.
+func (*GetValidAuthorizationsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *GetValidAuthorizationsRequest) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *GetValidAuthorizationsRequest) GetDomains() []string {
+ if x != nil {
+ return x.Domains
+ }
+ return nil
+}
+
+func (x *GetValidAuthorizationsRequest) GetNow() int64 {
+ if x != nil {
+ return x.Now
+ }
+ return 0
+}
+
+type ValidAuthorizations struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Valid []*ValidAuthorizations_MapElement `protobuf:"bytes,1,rep,name=valid,proto3" json:"valid,omitempty"`
+}
+
+func (x *ValidAuthorizations) Reset() {
+ *x = ValidAuthorizations{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidAuthorizations) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidAuthorizations) ProtoMessage() {}
+
+func (x *ValidAuthorizations) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidAuthorizations.ProtoReflect.Descriptor instead.
+func (*ValidAuthorizations) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ValidAuthorizations) GetValid() []*ValidAuthorizations_MapElement {
+ if x != nil {
+ return x.Valid
+ }
+ return nil
+}
+
+type Serial struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
+}
+
+func (x *Serial) Reset() {
+ *x = Serial{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Serial) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Serial) ProtoMessage() {}
+
+func (x *Serial) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Serial.ProtoReflect.Descriptor instead.
+func (*Serial) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *Serial) GetSerial() string {
+ if x != nil {
+ return x.Serial
+ }
+ return ""
+}
+
+type SerialMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
+ RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Created int64 `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` // Unix timestamp (nanoseconds)
+ Expires int64 `protobuf:"varint,4,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *SerialMetadata) Reset() {
+ *x = SerialMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SerialMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SerialMetadata) ProtoMessage() {}
+
+func (x *SerialMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SerialMetadata.ProtoReflect.Descriptor instead.
+func (*SerialMetadata) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *SerialMetadata) GetSerial() string {
+ if x != nil {
+ return x.Serial
+ }
+ return ""
+}
+
+func (x *SerialMetadata) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *SerialMetadata) GetCreated() int64 {
+ if x != nil {
+ return x.Created
+ }
+ return 0
+}
+
+func (x *SerialMetadata) GetExpires() int64 {
+ if x != nil {
+ return x.Expires
+ }
+ return 0
+}
+
+type Range struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Earliest int64 `protobuf:"varint,1,opt,name=earliest,proto3" json:"earliest,omitempty"` // Unix timestamp (nanoseconds)
+ Latest int64 `protobuf:"varint,2,opt,name=latest,proto3" json:"latest,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *Range) Reset() {
+ *x = Range{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Range) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Range) ProtoMessage() {}
+
+func (x *Range) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Range.ProtoReflect.Descriptor instead.
+func (*Range) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *Range) GetEarliest() int64 {
+ if x != nil {
+ return x.Earliest
+ }
+ return 0
+}
+
+func (x *Range) GetLatest() int64 {
+ if x != nil {
+ return x.Latest
+ }
+ return 0
+}
+
+type Count struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+}
+
+func (x *Count) Reset() {
+ *x = Count{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Count) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Count) ProtoMessage() {}
+
+func (x *Count) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Count.ProtoReflect.Descriptor instead.
+func (*Count) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *Count) GetCount() int64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+type Timestamps struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Timestamps []int64 `protobuf:"varint,1,rep,packed,name=timestamps,proto3" json:"timestamps,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *Timestamps) Reset() {
+ *x = Timestamps{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Timestamps) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Timestamps) ProtoMessage() {}
+
+func (x *Timestamps) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Timestamps.ProtoReflect.Descriptor instead.
+func (*Timestamps) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *Timestamps) GetTimestamps() []int64 {
+ if x != nil {
+ return x.Timestamps
+ }
+ return nil
+}
+
+type CountCertificatesByNamesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Range *Range `protobuf:"bytes,1,opt,name=range,proto3" json:"range,omitempty"`
+ Names []string `protobuf:"bytes,2,rep,name=names,proto3" json:"names,omitempty"`
+}
+
+func (x *CountCertificatesByNamesRequest) Reset() {
+ *x = CountCertificatesByNamesRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CountCertificatesByNamesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CountCertificatesByNamesRequest) ProtoMessage() {}
+
+func (x *CountCertificatesByNamesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CountCertificatesByNamesRequest.ProtoReflect.Descriptor instead.
+func (*CountCertificatesByNamesRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *CountCertificatesByNamesRequest) GetRange() *Range {
+ if x != nil {
+ return x.Range
+ }
+ return nil
+}
+
+func (x *CountCertificatesByNamesRequest) GetNames() []string {
+ if x != nil {
+ return x.Names
+ }
+ return nil
+}
+
+type CountByNames struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Counts map[string]int64 `protobuf:"bytes,1,rep,name=counts,proto3" json:"counts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+}
+
+func (x *CountByNames) Reset() {
+ *x = CountByNames{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CountByNames) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CountByNames) ProtoMessage() {}
+
+func (x *CountByNames) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CountByNames.ProtoReflect.Descriptor instead.
+func (*CountByNames) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *CountByNames) GetCounts() map[string]int64 {
+ if x != nil {
+ return x.Counts
+ }
+ return nil
+}
+
+type CountRegistrationsByIPRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ip []byte `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"`
+ Range *Range `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"`
+}
+
+func (x *CountRegistrationsByIPRequest) Reset() {
+ *x = CountRegistrationsByIPRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CountRegistrationsByIPRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CountRegistrationsByIPRequest) ProtoMessage() {}
+
+func (x *CountRegistrationsByIPRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CountRegistrationsByIPRequest.ProtoReflect.Descriptor instead.
+func (*CountRegistrationsByIPRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *CountRegistrationsByIPRequest) GetIp() []byte {
+ if x != nil {
+ return x.Ip
+ }
+ return nil
+}
+
+func (x *CountRegistrationsByIPRequest) GetRange() *Range {
+ if x != nil {
+ return x.Range
+ }
+ return nil
+}
+
+type CountInvalidAuthorizationsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3" json:"hostname,omitempty"`
+ // Count authorizations that expire in this range.
+ Range *Range `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"`
+}
+
+func (x *CountInvalidAuthorizationsRequest) Reset() {
+ *x = CountInvalidAuthorizationsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CountInvalidAuthorizationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CountInvalidAuthorizationsRequest) ProtoMessage() {}
+
+func (x *CountInvalidAuthorizationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CountInvalidAuthorizationsRequest.ProtoReflect.Descriptor instead.
+func (*CountInvalidAuthorizationsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *CountInvalidAuthorizationsRequest) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *CountInvalidAuthorizationsRequest) GetHostname() string {
+ if x != nil {
+ return x.Hostname
+ }
+ return ""
+}
+
+func (x *CountInvalidAuthorizationsRequest) GetRange() *Range {
+ if x != nil {
+ return x.Range
+ }
+ return nil
+}
+
+type CountOrdersRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ AccountID int64 `protobuf:"varint,1,opt,name=accountID,proto3" json:"accountID,omitempty"`
+ Range *Range `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"`
+}
+
+func (x *CountOrdersRequest) Reset() {
+ *x = CountOrdersRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CountOrdersRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CountOrdersRequest) ProtoMessage() {}
+
+func (x *CountOrdersRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CountOrdersRequest.ProtoReflect.Descriptor instead.
+func (*CountOrdersRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *CountOrdersRequest) GetAccountID() int64 {
+ if x != nil {
+ return x.AccountID
+ }
+ return 0
+}
+
+func (x *CountOrdersRequest) GetRange() *Range {
+ if x != nil {
+ return x.Range
+ }
+ return nil
+}
+
+type CountFQDNSetsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Window int64 `protobuf:"varint,1,opt,name=window,proto3" json:"window,omitempty"`
+ Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"`
+}
+
+func (x *CountFQDNSetsRequest) Reset() {
+ *x = CountFQDNSetsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CountFQDNSetsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CountFQDNSetsRequest) ProtoMessage() {}
+
+func (x *CountFQDNSetsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CountFQDNSetsRequest.ProtoReflect.Descriptor instead.
+func (*CountFQDNSetsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *CountFQDNSetsRequest) GetWindow() int64 {
+ if x != nil {
+ return x.Window
+ }
+ return 0
+}
+
+func (x *CountFQDNSetsRequest) GetDomains() []string {
+ if x != nil {
+ return x.Domains
+ }
+ return nil
+}
+
+type FQDNSetExistsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Domains []string `protobuf:"bytes,1,rep,name=domains,proto3" json:"domains,omitempty"`
+}
+
+func (x *FQDNSetExistsRequest) Reset() {
+ *x = FQDNSetExistsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FQDNSetExistsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FQDNSetExistsRequest) ProtoMessage() {}
+
+func (x *FQDNSetExistsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FQDNSetExistsRequest.ProtoReflect.Descriptor instead.
+func (*FQDNSetExistsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *FQDNSetExistsRequest) GetDomains() []string {
+ if x != nil {
+ return x.Domains
+ }
+ return nil
+}
+
+type PreviousCertificateExistsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"`
+ RegID int64 `protobuf:"varint,2,opt,name=regID,proto3" json:"regID,omitempty"`
+}
+
+func (x *PreviousCertificateExistsRequest) Reset() {
+ *x = PreviousCertificateExistsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PreviousCertificateExistsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PreviousCertificateExistsRequest) ProtoMessage() {}
+
+func (x *PreviousCertificateExistsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PreviousCertificateExistsRequest.ProtoReflect.Descriptor instead.
+func (*PreviousCertificateExistsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *PreviousCertificateExistsRequest) GetDomain() string {
+ if x != nil {
+ return x.Domain
+ }
+ return ""
+}
+
+func (x *PreviousCertificateExistsRequest) GetRegID() int64 {
+ if x != nil {
+ return x.RegID
+ }
+ return 0
+}
+
+type Exists struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"`
+}
+
+func (x *Exists) Reset() {
+ *x = Exists{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Exists) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Exists) ProtoMessage() {}
+
+func (x *Exists) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Exists.ProtoReflect.Descriptor instead.
+func (*Exists) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *Exists) GetExists() bool {
+ if x != nil {
+ return x.Exists
+ }
+ return false
+}
+
+type AddSerialRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegID int64 `protobuf:"varint,1,opt,name=regID,proto3" json:"regID,omitempty"`
+ Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial,omitempty"`
+ Created int64 `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` // Unix timestamp (nanoseconds)
+ Expires int64 `protobuf:"varint,4,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *AddSerialRequest) Reset() {
+ *x = AddSerialRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddSerialRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddSerialRequest) ProtoMessage() {}
+
+func (x *AddSerialRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddSerialRequest.ProtoReflect.Descriptor instead.
+func (*AddSerialRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *AddSerialRequest) GetRegID() int64 {
+ if x != nil {
+ return x.RegID
+ }
+ return 0
+}
+
+func (x *AddSerialRequest) GetSerial() string {
+ if x != nil {
+ return x.Serial
+ }
+ return ""
+}
+
+func (x *AddSerialRequest) GetCreated() int64 {
+ if x != nil {
+ return x.Created
+ }
+ return 0
+}
+
+func (x *AddSerialRequest) GetExpires() int64 {
+ if x != nil {
+ return x.Expires
+ }
+ return 0
+}
+
+type AddCertificateRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Der []byte `protobuf:"bytes,1,opt,name=der,proto3" json:"der,omitempty"`
+ RegID int64 `protobuf:"varint,2,opt,name=regID,proto3" json:"regID,omitempty"`
+ // A signed OCSP response for the certificate contained in "der".
+ // Note: The certificate status in the OCSP response is assumed to be 0 (good).
+ Ocsp []byte `protobuf:"bytes,3,opt,name=ocsp,proto3" json:"ocsp,omitempty"`
+ // An issued time. When not present the SA defaults to using
+ // the current time. The orphan-finder uses this parameter to add
+ // certificates with the correct historic issued date
+ Issued int64 `protobuf:"varint,4,opt,name=issued,proto3" json:"issued,omitempty"`
+ IssuerID int64 `protobuf:"varint,5,opt,name=issuerID,proto3" json:"issuerID,omitempty"`
+}
+
+func (x *AddCertificateRequest) Reset() {
+ *x = AddCertificateRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddCertificateRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddCertificateRequest) ProtoMessage() {}
+
+func (x *AddCertificateRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddCertificateRequest.ProtoReflect.Descriptor instead.
+func (*AddCertificateRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *AddCertificateRequest) GetDer() []byte {
+ if x != nil {
+ return x.Der
+ }
+ return nil
+}
+
+func (x *AddCertificateRequest) GetRegID() int64 {
+ if x != nil {
+ return x.RegID
+ }
+ return 0
+}
+
+func (x *AddCertificateRequest) GetOcsp() []byte {
+ if x != nil {
+ return x.Ocsp
+ }
+ return nil
+}
+
+func (x *AddCertificateRequest) GetIssued() int64 {
+ if x != nil {
+ return x.Issued
+ }
+ return 0
+}
+
+func (x *AddCertificateRequest) GetIssuerID() int64 {
+ if x != nil {
+ return x.IssuerID
+ }
+ return 0
+}
+
+type AddCertificateResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"`
+}
+
+func (x *AddCertificateResponse) Reset() {
+ *x = AddCertificateResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddCertificateResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddCertificateResponse) ProtoMessage() {}
+
+func (x *AddCertificateResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddCertificateResponse.ProtoReflect.Descriptor instead.
+func (*AddCertificateResponse) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *AddCertificateResponse) GetDigest() string {
+ if x != nil {
+ return x.Digest
+ }
+ return ""
+}
+
+type OrderRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (x *OrderRequest) Reset() {
+ *x = OrderRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OrderRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OrderRequest) ProtoMessage() {}
+
+func (x *OrderRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OrderRequest.ProtoReflect.Descriptor instead.
+func (*OrderRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *OrderRequest) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+type NewOrderRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Expires int64 `protobuf:"varint,2,opt,name=expires,proto3" json:"expires,omitempty"`
+ Names []string `protobuf:"bytes,3,rep,name=names,proto3" json:"names,omitempty"`
+ V2Authorizations []int64 `protobuf:"varint,4,rep,packed,name=v2Authorizations,proto3" json:"v2Authorizations,omitempty"`
+}
+
+func (x *NewOrderRequest) Reset() {
+ *x = NewOrderRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NewOrderRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NewOrderRequest) ProtoMessage() {}
+
+func (x *NewOrderRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NewOrderRequest.ProtoReflect.Descriptor instead.
+func (*NewOrderRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *NewOrderRequest) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *NewOrderRequest) GetExpires() int64 {
+ if x != nil {
+ return x.Expires
+ }
+ return 0
+}
+
+func (x *NewOrderRequest) GetNames() []string {
+ if x != nil {
+ return x.Names
+ }
+ return nil
+}
+
+func (x *NewOrderRequest) GetV2Authorizations() []int64 {
+ if x != nil {
+ return x.V2Authorizations
+ }
+ return nil
+}
+
+type NewOrderAndAuthzsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ NewOrder *NewOrderRequest `protobuf:"bytes,1,opt,name=newOrder,proto3" json:"newOrder,omitempty"`
+ NewAuthzs []*proto.Authorization `protobuf:"bytes,2,rep,name=newAuthzs,proto3" json:"newAuthzs,omitempty"`
+}
+
+func (x *NewOrderAndAuthzsRequest) Reset() {
+ *x = NewOrderAndAuthzsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NewOrderAndAuthzsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NewOrderAndAuthzsRequest) ProtoMessage() {}
+
+func (x *NewOrderAndAuthzsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NewOrderAndAuthzsRequest.ProtoReflect.Descriptor instead.
+func (*NewOrderAndAuthzsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{25}
+}
+
+func (x *NewOrderAndAuthzsRequest) GetNewOrder() *NewOrderRequest {
+ if x != nil {
+ return x.NewOrder
+ }
+ return nil
+}
+
+func (x *NewOrderAndAuthzsRequest) GetNewAuthzs() []*proto.Authorization {
+ if x != nil {
+ return x.NewAuthzs
+ }
+ return nil
+}
+
+type SetOrderErrorRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Error *proto.ProblemDetails `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (x *SetOrderErrorRequest) Reset() {
+ *x = SetOrderErrorRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SetOrderErrorRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SetOrderErrorRequest) ProtoMessage() {}
+
+func (x *SetOrderErrorRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SetOrderErrorRequest.ProtoReflect.Descriptor instead.
+func (*SetOrderErrorRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{26}
+}
+
+func (x *SetOrderErrorRequest) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *SetOrderErrorRequest) GetError() *proto.ProblemDetails {
+ if x != nil {
+ return x.Error
+ }
+ return nil
+}
+
+type GetValidOrderAuthorizationsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ AcctID int64 `protobuf:"varint,2,opt,name=acctID,proto3" json:"acctID,omitempty"`
+}
+
+func (x *GetValidOrderAuthorizationsRequest) Reset() {
+ *x = GetValidOrderAuthorizationsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetValidOrderAuthorizationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetValidOrderAuthorizationsRequest) ProtoMessage() {}
+
+func (x *GetValidOrderAuthorizationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetValidOrderAuthorizationsRequest.ProtoReflect.Descriptor instead.
+func (*GetValidOrderAuthorizationsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{27}
+}
+
+func (x *GetValidOrderAuthorizationsRequest) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *GetValidOrderAuthorizationsRequest) GetAcctID() int64 {
+ if x != nil {
+ return x.AcctID
+ }
+ return 0
+}
+
+type GetOrderForNamesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ AcctID int64 `protobuf:"varint,1,opt,name=acctID,proto3" json:"acctID,omitempty"`
+ Names []string `protobuf:"bytes,2,rep,name=names,proto3" json:"names,omitempty"`
+}
+
+func (x *GetOrderForNamesRequest) Reset() {
+ *x = GetOrderForNamesRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetOrderForNamesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetOrderForNamesRequest) ProtoMessage() {}
+
+func (x *GetOrderForNamesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[28]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetOrderForNamesRequest.ProtoReflect.Descriptor instead.
+func (*GetOrderForNamesRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{28}
+}
+
+func (x *GetOrderForNamesRequest) GetAcctID() int64 {
+ if x != nil {
+ return x.AcctID
+ }
+ return 0
+}
+
+func (x *GetOrderForNamesRequest) GetNames() []string {
+ if x != nil {
+ return x.Names
+ }
+ return nil
+}
+
+type FinalizeOrderRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ CertificateSerial string `protobuf:"bytes,2,opt,name=certificateSerial,proto3" json:"certificateSerial,omitempty"`
+}
+
+func (x *FinalizeOrderRequest) Reset() {
+ *x = FinalizeOrderRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FinalizeOrderRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FinalizeOrderRequest) ProtoMessage() {}
+
+func (x *FinalizeOrderRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[29]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FinalizeOrderRequest.ProtoReflect.Descriptor instead.
+func (*FinalizeOrderRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{29}
+}
+
+func (x *FinalizeOrderRequest) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *FinalizeOrderRequest) GetCertificateSerial() string {
+ if x != nil {
+ return x.CertificateSerial
+ }
+ return ""
+}
+
+type GetAuthorizationsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"`
+ Now int64 `protobuf:"varint,3,opt,name=now,proto3" json:"now,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *GetAuthorizationsRequest) Reset() {
+ *x = GetAuthorizationsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetAuthorizationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetAuthorizationsRequest) ProtoMessage() {}
+
+func (x *GetAuthorizationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[30]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetAuthorizationsRequest.ProtoReflect.Descriptor instead.
+func (*GetAuthorizationsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{30}
+}
+
+func (x *GetAuthorizationsRequest) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *GetAuthorizationsRequest) GetDomains() []string {
+ if x != nil {
+ return x.Domains
+ }
+ return nil
+}
+
+func (x *GetAuthorizationsRequest) GetNow() int64 {
+ if x != nil {
+ return x.Now
+ }
+ return 0
+}
+
+type Authorizations struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Authz []*Authorizations_MapElement `protobuf:"bytes,1,rep,name=authz,proto3" json:"authz,omitempty"`
+}
+
+func (x *Authorizations) Reset() {
+ *x = Authorizations{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Authorizations) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Authorizations) ProtoMessage() {}
+
+func (x *Authorizations) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[31]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Authorizations.ProtoReflect.Descriptor instead.
+func (*Authorizations) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{31}
+}
+
+func (x *Authorizations) GetAuthz() []*Authorizations_MapElement {
+ if x != nil {
+ return x.Authz
+ }
+ return nil
+}
+
+type AddPendingAuthorizationsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Authz []*proto.Authorization `protobuf:"bytes,1,rep,name=authz,proto3" json:"authz,omitempty"`
+}
+
+func (x *AddPendingAuthorizationsRequest) Reset() {
+ *x = AddPendingAuthorizationsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddPendingAuthorizationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddPendingAuthorizationsRequest) ProtoMessage() {}
+
+func (x *AddPendingAuthorizationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[32]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddPendingAuthorizationsRequest.ProtoReflect.Descriptor instead.
+func (*AddPendingAuthorizationsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{32}
+}
+
+func (x *AddPendingAuthorizationsRequest) GetAuthz() []*proto.Authorization {
+ if x != nil {
+ return x.Authz
+ }
+ return nil
+}
+
+type AuthorizationIDs struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"`
+}
+
+func (x *AuthorizationIDs) Reset() {
+ *x = AuthorizationIDs{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AuthorizationIDs) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AuthorizationIDs) ProtoMessage() {}
+
+func (x *AuthorizationIDs) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[33]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AuthorizationIDs.ProtoReflect.Descriptor instead.
+func (*AuthorizationIDs) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{33}
+}
+
+func (x *AuthorizationIDs) GetIds() []string {
+ if x != nil {
+ return x.Ids
+ }
+ return nil
+}
+
+type AuthorizationID2 struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (x *AuthorizationID2) Reset() {
+ *x = AuthorizationID2{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AuthorizationID2) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AuthorizationID2) ProtoMessage() {}
+
+func (x *AuthorizationID2) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[34]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AuthorizationID2.ProtoReflect.Descriptor instead.
+func (*AuthorizationID2) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{34}
+}
+
+func (x *AuthorizationID2) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+type Authorization2IDs struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ids []int64 `protobuf:"varint,1,rep,packed,name=ids,proto3" json:"ids,omitempty"`
+}
+
+func (x *Authorization2IDs) Reset() {
+ *x = Authorization2IDs{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Authorization2IDs) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Authorization2IDs) ProtoMessage() {}
+
+func (x *Authorization2IDs) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[35]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Authorization2IDs.ProtoReflect.Descriptor instead.
+func (*Authorization2IDs) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{35}
+}
+
+func (x *Authorization2IDs) GetIds() []int64 {
+ if x != nil {
+ return x.Ids
+ }
+ return nil
+}
+
+type RevokeCertificateRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
+ Reason int64 `protobuf:"varint,2,opt,name=reason,proto3" json:"reason,omitempty"`
+ Date int64 `protobuf:"varint,3,opt,name=date,proto3" json:"date,omitempty"` // Unix timestamp (nanoseconds)
+ Backdate int64 `protobuf:"varint,5,opt,name=backdate,proto3" json:"backdate,omitempty"` // Unix timestamp (nanoseconds)
+ Response []byte `protobuf:"bytes,4,opt,name=response,proto3" json:"response,omitempty"`
+ IssuerID int64 `protobuf:"varint,6,opt,name=issuerID,proto3" json:"issuerID,omitempty"`
+}
+
+func (x *RevokeCertificateRequest) Reset() {
+ *x = RevokeCertificateRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RevokeCertificateRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RevokeCertificateRequest) ProtoMessage() {}
+
+func (x *RevokeCertificateRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[36]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RevokeCertificateRequest.ProtoReflect.Descriptor instead.
+func (*RevokeCertificateRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{36}
+}
+
+func (x *RevokeCertificateRequest) GetSerial() string {
+ if x != nil {
+ return x.Serial
+ }
+ return ""
+}
+
+func (x *RevokeCertificateRequest) GetReason() int64 {
+ if x != nil {
+ return x.Reason
+ }
+ return 0
+}
+
+func (x *RevokeCertificateRequest) GetDate() int64 {
+ if x != nil {
+ return x.Date
+ }
+ return 0
+}
+
+func (x *RevokeCertificateRequest) GetBackdate() int64 {
+ if x != nil {
+ return x.Backdate
+ }
+ return 0
+}
+
+func (x *RevokeCertificateRequest) GetResponse() []byte {
+ if x != nil {
+ return x.Response
+ }
+ return nil
+}
+
+func (x *RevokeCertificateRequest) GetIssuerID() int64 {
+ if x != nil {
+ return x.IssuerID
+ }
+ return 0
+}
+
+type FinalizeAuthorizationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
+ Expires int64 `protobuf:"varint,3,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds)
+ Attempted string `protobuf:"bytes,4,opt,name=attempted,proto3" json:"attempted,omitempty"`
+ ValidationRecords []*proto.ValidationRecord `protobuf:"bytes,5,rep,name=validationRecords,proto3" json:"validationRecords,omitempty"`
+ ValidationError *proto.ProblemDetails `protobuf:"bytes,6,opt,name=validationError,proto3" json:"validationError,omitempty"`
+ AttemptedAt int64 `protobuf:"varint,7,opt,name=attemptedAt,proto3" json:"attemptedAt,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *FinalizeAuthorizationRequest) Reset() {
+ *x = FinalizeAuthorizationRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FinalizeAuthorizationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FinalizeAuthorizationRequest) ProtoMessage() {}
+
+func (x *FinalizeAuthorizationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[37]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FinalizeAuthorizationRequest.ProtoReflect.Descriptor instead.
+func (*FinalizeAuthorizationRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{37}
+}
+
+func (x *FinalizeAuthorizationRequest) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *FinalizeAuthorizationRequest) GetStatus() string {
+ if x != nil {
+ return x.Status
+ }
+ return ""
+}
+
+func (x *FinalizeAuthorizationRequest) GetExpires() int64 {
+ if x != nil {
+ return x.Expires
+ }
+ return 0
+}
+
+func (x *FinalizeAuthorizationRequest) GetAttempted() string {
+ if x != nil {
+ return x.Attempted
+ }
+ return ""
+}
+
+func (x *FinalizeAuthorizationRequest) GetValidationRecords() []*proto.ValidationRecord {
+ if x != nil {
+ return x.ValidationRecords
+ }
+ return nil
+}
+
+func (x *FinalizeAuthorizationRequest) GetValidationError() *proto.ProblemDetails {
+ if x != nil {
+ return x.ValidationError
+ }
+ return nil
+}
+
+func (x *FinalizeAuthorizationRequest) GetAttemptedAt() int64 {
+ if x != nil {
+ return x.AttemptedAt
+ }
+ return 0
+}
+
+type AddBlockedKeyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ KeyHash []byte `protobuf:"bytes,1,opt,name=keyHash,proto3" json:"keyHash,omitempty"`
+ Added int64 `protobuf:"varint,2,opt,name=added,proto3" json:"added,omitempty"` // Unix timestamp (nanoseconds)
+ Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"`
+ Comment string `protobuf:"bytes,4,opt,name=comment,proto3" json:"comment,omitempty"`
+ RevokedBy int64 `protobuf:"varint,5,opt,name=revokedBy,proto3" json:"revokedBy,omitempty"`
+}
+
+func (x *AddBlockedKeyRequest) Reset() {
+ *x = AddBlockedKeyRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddBlockedKeyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddBlockedKeyRequest) ProtoMessage() {}
+
+func (x *AddBlockedKeyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[38]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddBlockedKeyRequest.ProtoReflect.Descriptor instead.
+func (*AddBlockedKeyRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{38}
+}
+
+func (x *AddBlockedKeyRequest) GetKeyHash() []byte {
+ if x != nil {
+ return x.KeyHash
+ }
+ return nil
+}
+
+func (x *AddBlockedKeyRequest) GetAdded() int64 {
+ if x != nil {
+ return x.Added
+ }
+ return 0
+}
+
+func (x *AddBlockedKeyRequest) GetSource() string {
+ if x != nil {
+ return x.Source
+ }
+ return ""
+}
+
+func (x *AddBlockedKeyRequest) GetComment() string {
+ if x != nil {
+ return x.Comment
+ }
+ return ""
+}
+
+func (x *AddBlockedKeyRequest) GetRevokedBy() int64 {
+ if x != nil {
+ return x.RevokedBy
+ }
+ return 0
+}
+
+type KeyBlockedRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ KeyHash []byte `protobuf:"bytes,1,opt,name=keyHash,proto3" json:"keyHash,omitempty"`
+}
+
+func (x *KeyBlockedRequest) Reset() {
+ *x = KeyBlockedRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeyBlockedRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeyBlockedRequest) ProtoMessage() {}
+
+func (x *KeyBlockedRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[39]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeyBlockedRequest.ProtoReflect.Descriptor instead.
+func (*KeyBlockedRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{39}
+}
+
+func (x *KeyBlockedRequest) GetKeyHash() []byte {
+ if x != nil {
+ return x.KeyHash
+ }
+ return nil
+}
+
+type Incident struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ SerialTable string `protobuf:"bytes,2,opt,name=serialTable,proto3" json:"serialTable,omitempty"`
+ Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"`
+ RenewBy int64 `protobuf:"varint,4,opt,name=renewBy,proto3" json:"renewBy,omitempty"` // Unix timestamp (nanoseconds)
+ Enabled bool `protobuf:"varint,5,opt,name=enabled,proto3" json:"enabled,omitempty"`
+}
+
+func (x *Incident) Reset() {
+ *x = Incident{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Incident) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Incident) ProtoMessage() {}
+
+func (x *Incident) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[40]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Incident.ProtoReflect.Descriptor instead.
+func (*Incident) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{40}
+}
+
+func (x *Incident) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Incident) GetSerialTable() string {
+ if x != nil {
+ return x.SerialTable
+ }
+ return ""
+}
+
+func (x *Incident) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *Incident) GetRenewBy() int64 {
+ if x != nil {
+ return x.RenewBy
+ }
+ return 0
+}
+
+func (x *Incident) GetEnabled() bool {
+ if x != nil {
+ return x.Enabled
+ }
+ return false
+}
+
+type SerialsForIncidentRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IncidentTable string `protobuf:"bytes,1,opt,name=incidentTable,proto3" json:"incidentTable,omitempty"`
+}
+
+func (x *SerialsForIncidentRequest) Reset() {
+ *x = SerialsForIncidentRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SerialsForIncidentRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SerialsForIncidentRequest) ProtoMessage() {}
+
+func (x *SerialsForIncidentRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[41]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SerialsForIncidentRequest.ProtoReflect.Descriptor instead.
+func (*SerialsForIncidentRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{41}
+}
+
+func (x *SerialsForIncidentRequest) GetIncidentTable() string {
+ if x != nil {
+ return x.IncidentTable
+ }
+ return ""
+}
+
+type IncidentSerial struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
+ RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
+ OrderID int64 `protobuf:"varint,3,opt,name=orderID,proto3" json:"orderID,omitempty"`
+ LastNoticeSent int64 `protobuf:"varint,4,opt,name=lastNoticeSent,proto3" json:"lastNoticeSent,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *IncidentSerial) Reset() {
+ *x = IncidentSerial{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[42]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IncidentSerial) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IncidentSerial) ProtoMessage() {}
+
+func (x *IncidentSerial) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[42]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IncidentSerial.ProtoReflect.Descriptor instead.
+func (*IncidentSerial) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{42}
+}
+
+func (x *IncidentSerial) GetSerial() string {
+ if x != nil {
+ return x.Serial
+ }
+ return ""
+}
+
+func (x *IncidentSerial) GetRegistrationID() int64 {
+ if x != nil {
+ return x.RegistrationID
+ }
+ return 0
+}
+
+func (x *IncidentSerial) GetOrderID() int64 {
+ if x != nil {
+ return x.OrderID
+ }
+ return 0
+}
+
+func (x *IncidentSerial) GetLastNoticeSent() int64 {
+ if x != nil {
+ return x.LastNoticeSent
+ }
+ return 0
+}
+
+type GetRevokedCertsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"`
+ ExpiresAfter int64 `protobuf:"varint,2,opt,name=expiresAfter,proto3" json:"expiresAfter,omitempty"` // Unix timestamp (nanoseconds), inclusive
+ ExpiresBefore int64 `protobuf:"varint,3,opt,name=expiresBefore,proto3" json:"expiresBefore,omitempty"` // Unix timestamp (nanoseconds), exclusive
+ RevokedBefore int64 `protobuf:"varint,4,opt,name=revokedBefore,proto3" json:"revokedBefore,omitempty"` // Unix timestamp (nanoseconds)
+}
+
+func (x *GetRevokedCertsRequest) Reset() {
+ *x = GetRevokedCertsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[43]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetRevokedCertsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetRevokedCertsRequest) ProtoMessage() {}
+
+func (x *GetRevokedCertsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[43]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetRevokedCertsRequest.ProtoReflect.Descriptor instead.
+func (*GetRevokedCertsRequest) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{43}
+}
+
+func (x *GetRevokedCertsRequest) GetIssuerNameID() int64 {
+ if x != nil {
+ return x.IssuerNameID
+ }
+ return 0
+}
+
+func (x *GetRevokedCertsRequest) GetExpiresAfter() int64 {
+ if x != nil {
+ return x.ExpiresAfter
+ }
+ return 0
+}
+
+func (x *GetRevokedCertsRequest) GetExpiresBefore() int64 {
+ if x != nil {
+ return x.ExpiresBefore
+ }
+ return 0
+}
+
+func (x *GetRevokedCertsRequest) GetRevokedBefore() int64 {
+ if x != nil {
+ return x.RevokedBefore
+ }
+ return 0
+}
+
+type ValidAuthorizations_MapElement struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"`
+ Authz *proto.Authorization `protobuf:"bytes,2,opt,name=authz,proto3" json:"authz,omitempty"`
+}
+
+func (x *ValidAuthorizations_MapElement) Reset() {
+ *x = ValidAuthorizations_MapElement{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[44]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidAuthorizations_MapElement) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidAuthorizations_MapElement) ProtoMessage() {}
+
+func (x *ValidAuthorizations_MapElement) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[44]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidAuthorizations_MapElement.ProtoReflect.Descriptor instead.
+func (*ValidAuthorizations_MapElement) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{5, 0}
+}
+
+func (x *ValidAuthorizations_MapElement) GetDomain() string {
+ if x != nil {
+ return x.Domain
+ }
+ return ""
+}
+
+func (x *ValidAuthorizations_MapElement) GetAuthz() *proto.Authorization {
+ if x != nil {
+ return x.Authz
+ }
+ return nil
+}
+
+type Authorizations_MapElement struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"`
+ Authz *proto.Authorization `protobuf:"bytes,2,opt,name=authz,proto3" json:"authz,omitempty"`
+}
+
+func (x *Authorizations_MapElement) Reset() {
+ *x = Authorizations_MapElement{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_sa_proto_msgTypes[46]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Authorizations_MapElement) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Authorizations_MapElement) ProtoMessage() {}
+
+func (x *Authorizations_MapElement) ProtoReflect() protoreflect.Message {
+ mi := &file_sa_proto_msgTypes[46]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Authorizations_MapElement.ProtoReflect.Descriptor instead.
+func (*Authorizations_MapElement) Descriptor() ([]byte, []int) {
+ return file_sa_proto_rawDescGZIP(), []int{31, 0}
+}
+
+func (x *Authorizations_MapElement) GetDomain() string {
+ if x != nil {
+ return x.Domain
+ }
+ return ""
+}
+
+func (x *Authorizations_MapElement) GetAuthz() *proto.Authorization {
+ if x != nil {
+ return x.Authz
+ }
+ return nil
+}
+
+var File_sa_proto protoreflect.FileDescriptor
+
+var file_sa_proto_rawDesc = []byte{
+ 0x0a, 0x08, 0x73, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x73, 0x61, 0x1a, 0x15,
+ 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x22, 0x20, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x02, 0x69, 0x64, 0x22, 0x1e, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x57, 0x65, 0x62, 0x4b,
+ 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6a, 0x77, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x03, 0x6a, 0x77, 0x6b, 0x22, 0x21, 0x0a, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0xba, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x50,
+ 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65,
+ 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x49, 0x44, 0x12, 0x26, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
+ 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x69, 0x64,
+ 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x55, 0x6e, 0x74,
+ 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x55,
+ 0x6e, 0x74, 0x69, 0x6c, 0x22, 0x73, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64,
+ 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72,
+ 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a,
+ 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07,
+ 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x6f, 0x77, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6e, 0x6f, 0x77, 0x22, 0xa0, 0x01, 0x0a, 0x13, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x22, 0x2e, 0x73, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f,
+ 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6c, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x1a, 0x4f, 0x0a, 0x0a, 0x4d,
+ 0x61, 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d,
+ 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69,
+ 0x6e, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x22, 0x20, 0x0a, 0x06,
+ 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x84,
+ 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67,
+ 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49,
+ 0x44, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65,
+ 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78,
+ 0x70, 0x69, 0x72, 0x65, 0x73, 0x22, 0x3b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1a,
+ 0x0a, 0x08, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x08, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61,
+ 0x74, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6c, 0x61, 0x74, 0x65,
+ 0x73, 0x74, 0x22, 0x1d, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x22, 0x2c, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12,
+ 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x03, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x22,
+ 0x58, 0x0a, 0x1f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61,
+ 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x7f, 0x0a, 0x0c, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x06, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x2e, 0x43, 0x6f, 0x75, 0x6e,
+ 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x1a,
+ 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x1d, 0x43, 0x6f,
+ 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69,
+ 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x70, 0x12, 0x1f, 0x0a, 0x05, 0x72,
+ 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e,
+ 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x88, 0x01, 0x0a,
+ 0x21, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74,
+ 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69,
+ 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f,
+ 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f,
+ 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x53, 0x0a, 0x12, 0x43, 0x6f, 0x75, 0x6e, 0x74,
+ 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a,
+ 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x1f, 0x0a, 0x05, 0x72,
+ 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e,
+ 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x48, 0x0a, 0x14,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x18, 0x0a, 0x07,
+ 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64,
+ 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x22, 0x30, 0x0a, 0x14, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65,
+ 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18,
+ 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x22, 0x50, 0x0a, 0x20, 0x50, 0x72, 0x65, 0x76,
+ 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x45,
+ 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06,
+ 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f,
+ 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x22, 0x20, 0x0a, 0x06, 0x45, 0x78,
+ 0x69, 0x73, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x74, 0x0a, 0x10,
+ 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x18,
+ 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69,
+ 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72,
+ 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03,
+ 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, 0x12, 0x14,
+ 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72,
+ 0x65, 0x67, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x63, 0x73, 0x70, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x04, 0x6f, 0x63, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75,
+ 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64,
+ 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x22, 0x30, 0x0a, 0x16,
+ 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x1e,
+ 0x0a, 0x0c, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e,
+ 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x95,
+ 0x01, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69,
+ 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78,
+ 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70,
+ 0x69, 0x72, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x76, 0x32,
+ 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04,
+ 0x20, 0x03, 0x28, 0x03, 0x52, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x7e, 0x0a, 0x18, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64,
+ 0x65, 0x72, 0x41, 0x6e, 0x64, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64,
+ 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x4f, 0x72,
+ 0x64, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75,
+ 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6e, 0x65, 0x77,
+ 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x22, 0x52, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64,
+ 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e,
+ 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a,
+ 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61,
+ 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x4c, 0x0a, 0x22, 0x47, 0x65,
+ 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f,
+ 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64,
+ 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x22, 0x47, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4f,
+ 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x6e,
+ 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65,
+ 0x73, 0x22, 0x54, 0x0a, 0x14, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64,
+ 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2c, 0x0a, 0x11, 0x63, 0x65, 0x72,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x6e, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x41, 0x75,
+ 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67,
+ 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x64,
+ 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f,
+ 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x03, 0x6e, 0x6f, 0x77, 0x22, 0x96, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x05, 0x61, 0x75,
+ 0x74, 0x68, 0x7a, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x61, 0x2e, 0x41,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x61,
+ 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x1a,
+ 0x4f, 0x0a, 0x0a, 0x4d, 0x61, 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a,
+ 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64,
+ 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a,
+ 0x22, 0x4c, 0x0a, 0x1f, 0x41, 0x64, 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75,
+ 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x22, 0x24,
+ 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49,
+ 0x44, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x03, 0x69, 0x64, 0x73, 0x22, 0x22, 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x25, 0x0a, 0x11, 0x41, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x49, 0x44, 0x73, 0x12, 0x10, 0x0a,
+ 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22,
+ 0xb2, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06,
+ 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65,
+ 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04,
+ 0x64, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x64, 0x61, 0x74, 0x65,
+ 0x12, 0x1a, 0x0a, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x64, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08,
+ 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08,
+ 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, 0x75,
+ 0x65, 0x72, 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75,
+ 0x65, 0x72, 0x49, 0x44, 0x22, 0xa6, 0x02, 0x0a, 0x1c, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a,
+ 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a,
+ 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
+ 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x65, 0x6d,
+ 0x70, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x74, 0x74, 0x65,
+ 0x6d, 0x70, 0x74, 0x65, 0x64, 0x12, 0x44, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x3e, 0x0a, 0x0f, 0x76,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62,
+ 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0f, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x61,
+ 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x96, 0x01,
+ 0x0a, 0x14, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73,
+ 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68,
+ 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18,
+ 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f,
+ 0x6b, 0x65, 0x64, 0x42, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x76,
+ 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x79, 0x22, 0x2d, 0x0a, 0x11, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6b,
+ 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65,
+ 0x79, 0x48, 0x61, 0x73, 0x68, 0x22, 0x82, 0x01, 0x0a, 0x08, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65,
+ 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02,
+ 0x69, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x42,
+ 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x42, 0x79,
+ 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x41, 0x0a, 0x19, 0x53, 0x65,
+ 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x69, 0x64,
+ 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d,
+ 0x69, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x92, 0x01,
+ 0x0a, 0x0e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c,
+ 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69,
+ 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44,
+ 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x0e, 0x6c, 0x61,
+ 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x63, 0x65, 0x53, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x63, 0x65, 0x53, 0x65,
+ 0x6e, 0x74, 0x22, 0xac, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65,
+ 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a,
+ 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49,
+ 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x66, 0x74, 0x65,
+ 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73,
+ 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73,
+ 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x65, 0x78,
+ 0x70, 0x69, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x72,
+ 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72,
+ 0x65, 0x32, 0xa7, 0x17, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x41, 0x75, 0x74,
+ 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3b, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67,
+ 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52,
+ 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x12, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x2e, 0x73, 0x61,
+ 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x57, 0x65, 0x62, 0x4b, 0x65, 0x79, 0x1a, 0x12, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22,
+ 0x00, 0x12, 0x35, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65,
+ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69,
+ 0x61, 0x6c, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65,
+ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x43,
+ 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e,
+ 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x11, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65,
+ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x34, 0x0a, 0x11, 0x47,
+ 0x65, 0x74, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
+ 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x11, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22,
+ 0x00, 0x12, 0x3d, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53,
+ 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x17, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00,
+ 0x12, 0x53, 0x0a, 0x18, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x23, 0x2e, 0x73,
+ 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61,
+ 0x6d, 0x65, 0x73, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x16, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65,
+ 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x12,
+ 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12,
+ 0x4d, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x21,
+ 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x32,
+ 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e,
+ 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74,
+ 0x22, 0x00, 0x12, 0x36, 0x0a, 0x0d, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53,
+ 0x65, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51,
+ 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e,
+ 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x1a, 0x46, 0x51,
+ 0x44, 0x4e, 0x53, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x46,
+ 0x6f, 0x72, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f,
+ 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x73, 0x61, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x73, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x0d, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45,
+ 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x46, 0x51, 0x44, 0x4e, 0x53,
+ 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x4f, 0x0a,
+ 0x19, 0x50, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x24, 0x2e, 0x73, 0x61, 0x2e,
+ 0x50, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x40,
+ 0x0a, 0x11, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x32, 0x12, 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69,
+ 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00,
+ 0x12, 0x48, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x41,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x18, 0x47, 0x65,
+ 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x22, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50,
+ 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22,
+ 0x00, 0x12, 0x3e, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e,
+ 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32,
+ 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22,
+ 0x00, 0x12, 0x5c, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64,
+ 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x32, 0x12, 0x26, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f,
+ 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12,
+ 0x51, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x25,
+ 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74,
+ 0x22, 0x00, 0x12, 0x52, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75,
+ 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x21, 0x2e,
+ 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f,
+ 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0a, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x65, 0x64, 0x12, 0x15, 0x2e, 0x73, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, 0x2e, 0x73, 0x61,
+ 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x12, 0x53, 0x65, 0x72,
+ 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12,
+ 0x1d, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49,
+ 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12,
+ 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x69,
+ 0x61, 0x6c, 0x22, 0x00, 0x30, 0x01, 0x12, 0x41, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x76,
+ 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x1a, 0x2e, 0x73, 0x61, 0x2e, 0x47,
+ 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x52, 0x4c,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, 0x3b, 0x0a, 0x0f, 0x4e, 0x65, 0x77,
+ 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0e, 0x41, 0x64,
+ 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x19, 0x2e, 0x73,
+ 0x61, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64,
+ 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x50, 0x72, 0x65, 0x63,
+ 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x19, 0x2e, 0x73, 0x61, 0x2e,
+ 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12,
+ 0x3b, 0x0a, 0x09, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x14, 0x2e, 0x73,
+ 0x61, 0x2e, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x16,
+ 0x44, 0x65, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69,
+ 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70,
+ 0x74, 0x79, 0x22, 0x00, 0x12, 0x2e, 0x0a, 0x08, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72,
+ 0x12, 0x13, 0x2e, 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64,
+ 0x65, 0x72, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x11, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72,
+ 0x41, 0x6e, 0x64, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x4e,
+ 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x6e, 0x64, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f,
+ 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64,
+ 0x65, 0x72, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, 0x10, 0x2e, 0x73,
+ 0x61, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x4f,
+ 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x53,
+ 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x43, 0x0a,
+ 0x0d, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x18,
+ 0x2e, 0x73, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65,
+ 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
+ 0x22, 0x00, 0x12, 0x2b, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x10,
+ 0x2e, 0x73, 0x61, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12,
+ 0x3e, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61,
+ 0x6d, 0x65, 0x73, 0x12, 0x1b, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65,
+ 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12,
+ 0x4b, 0x0a, 0x11, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65,
+ 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x18,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65,
+ 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00,
+ 0x12, 0x52, 0x0a, 0x12, 0x4e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x23, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x50,
+ 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x73, 0x61,
+ 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x49,
+ 0x44, 0x73, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x16, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65,
+ 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x20,
+ 0x2e, 0x73, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x18, 0x44, 0x65,
+ 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x1a, 0x16, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
+ 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0d, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64,
+ 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x42, 0x29, 0x5a, 0x27, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x73, 0x61,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_sa_proto_rawDescOnce sync.Once
+ file_sa_proto_rawDescData = file_sa_proto_rawDesc
+)
+
+func file_sa_proto_rawDescGZIP() []byte {
+ file_sa_proto_rawDescOnce.Do(func() {
+ file_sa_proto_rawDescData = protoimpl.X.CompressGZIP(file_sa_proto_rawDescData)
+ })
+ return file_sa_proto_rawDescData
+}
+
+var file_sa_proto_msgTypes = make([]protoimpl.MessageInfo, 47)
+var file_sa_proto_goTypes = []interface{}{
+ (*RegistrationID)(nil), // 0: sa.RegistrationID
+ (*JSONWebKey)(nil), // 1: sa.JSONWebKey
+ (*AuthorizationID)(nil), // 2: sa.AuthorizationID
+ (*GetPendingAuthorizationRequest)(nil), // 3: sa.GetPendingAuthorizationRequest
+ (*GetValidAuthorizationsRequest)(nil), // 4: sa.GetValidAuthorizationsRequest
+ (*ValidAuthorizations)(nil), // 5: sa.ValidAuthorizations
+ (*Serial)(nil), // 6: sa.Serial
+ (*SerialMetadata)(nil), // 7: sa.SerialMetadata
+ (*Range)(nil), // 8: sa.Range
+ (*Count)(nil), // 9: sa.Count
+ (*Timestamps)(nil), // 10: sa.Timestamps
+ (*CountCertificatesByNamesRequest)(nil), // 11: sa.CountCertificatesByNamesRequest
+ (*CountByNames)(nil), // 12: sa.CountByNames
+ (*CountRegistrationsByIPRequest)(nil), // 13: sa.CountRegistrationsByIPRequest
+ (*CountInvalidAuthorizationsRequest)(nil), // 14: sa.CountInvalidAuthorizationsRequest
+ (*CountOrdersRequest)(nil), // 15: sa.CountOrdersRequest
+ (*CountFQDNSetsRequest)(nil), // 16: sa.CountFQDNSetsRequest
+ (*FQDNSetExistsRequest)(nil), // 17: sa.FQDNSetExistsRequest
+ (*PreviousCertificateExistsRequest)(nil), // 18: sa.PreviousCertificateExistsRequest
+ (*Exists)(nil), // 19: sa.Exists
+ (*AddSerialRequest)(nil), // 20: sa.AddSerialRequest
+ (*AddCertificateRequest)(nil), // 21: sa.AddCertificateRequest
+ (*AddCertificateResponse)(nil), // 22: sa.AddCertificateResponse
+ (*OrderRequest)(nil), // 23: sa.OrderRequest
+ (*NewOrderRequest)(nil), // 24: sa.NewOrderRequest
+ (*NewOrderAndAuthzsRequest)(nil), // 25: sa.NewOrderAndAuthzsRequest
+ (*SetOrderErrorRequest)(nil), // 26: sa.SetOrderErrorRequest
+ (*GetValidOrderAuthorizationsRequest)(nil), // 27: sa.GetValidOrderAuthorizationsRequest
+ (*GetOrderForNamesRequest)(nil), // 28: sa.GetOrderForNamesRequest
+ (*FinalizeOrderRequest)(nil), // 29: sa.FinalizeOrderRequest
+ (*GetAuthorizationsRequest)(nil), // 30: sa.GetAuthorizationsRequest
+ (*Authorizations)(nil), // 31: sa.Authorizations
+ (*AddPendingAuthorizationsRequest)(nil), // 32: sa.AddPendingAuthorizationsRequest
+ (*AuthorizationIDs)(nil), // 33: sa.AuthorizationIDs
+ (*AuthorizationID2)(nil), // 34: sa.AuthorizationID2
+ (*Authorization2IDs)(nil), // 35: sa.Authorization2IDs
+ (*RevokeCertificateRequest)(nil), // 36: sa.RevokeCertificateRequest
+ (*FinalizeAuthorizationRequest)(nil), // 37: sa.FinalizeAuthorizationRequest
+ (*AddBlockedKeyRequest)(nil), // 38: sa.AddBlockedKeyRequest
+ (*KeyBlockedRequest)(nil), // 39: sa.KeyBlockedRequest
+ (*Incident)(nil), // 40: sa.Incident
+ (*SerialsForIncidentRequest)(nil), // 41: sa.SerialsForIncidentRequest
+ (*IncidentSerial)(nil), // 42: sa.IncidentSerial
+ (*GetRevokedCertsRequest)(nil), // 43: sa.GetRevokedCertsRequest
+ (*ValidAuthorizations_MapElement)(nil), // 44: sa.ValidAuthorizations.MapElement
+ nil, // 45: sa.CountByNames.CountsEntry
+ (*Authorizations_MapElement)(nil), // 46: sa.Authorizations.MapElement
+ (*proto.Authorization)(nil), // 47: core.Authorization
+ (*proto.ProblemDetails)(nil), // 48: core.ProblemDetails
+ (*proto.ValidationRecord)(nil), // 49: core.ValidationRecord
+ (*proto.Registration)(nil), // 50: core.Registration
+ (*proto.Certificate)(nil), // 51: core.Certificate
+ (*proto.CertificateStatus)(nil), // 52: core.CertificateStatus
+ (*proto.CRLEntry)(nil), // 53: core.CRLEntry
+ (*emptypb.Empty)(nil), // 54: google.protobuf.Empty
+ (*proto.Order)(nil), // 55: core.Order
+}
+var file_sa_proto_depIdxs = []int32{
+ 44, // 0: sa.ValidAuthorizations.valid:type_name -> sa.ValidAuthorizations.MapElement
+ 8, // 1: sa.CountCertificatesByNamesRequest.range:type_name -> sa.Range
+ 45, // 2: sa.CountByNames.counts:type_name -> sa.CountByNames.CountsEntry
+ 8, // 3: sa.CountRegistrationsByIPRequest.range:type_name -> sa.Range
+ 8, // 4: sa.CountInvalidAuthorizationsRequest.range:type_name -> sa.Range
+ 8, // 5: sa.CountOrdersRequest.range:type_name -> sa.Range
+ 24, // 6: sa.NewOrderAndAuthzsRequest.newOrder:type_name -> sa.NewOrderRequest
+ 47, // 7: sa.NewOrderAndAuthzsRequest.newAuthzs:type_name -> core.Authorization
+ 48, // 8: sa.SetOrderErrorRequest.error:type_name -> core.ProblemDetails
+ 46, // 9: sa.Authorizations.authz:type_name -> sa.Authorizations.MapElement
+ 47, // 10: sa.AddPendingAuthorizationsRequest.authz:type_name -> core.Authorization
+ 49, // 11: sa.FinalizeAuthorizationRequest.validationRecords:type_name -> core.ValidationRecord
+ 48, // 12: sa.FinalizeAuthorizationRequest.validationError:type_name -> core.ProblemDetails
+ 47, // 13: sa.ValidAuthorizations.MapElement.authz:type_name -> core.Authorization
+ 47, // 14: sa.Authorizations.MapElement.authz:type_name -> core.Authorization
+ 0, // 15: sa.StorageAuthority.GetRegistration:input_type -> sa.RegistrationID
+ 1, // 16: sa.StorageAuthority.GetRegistrationByKey:input_type -> sa.JSONWebKey
+ 6, // 17: sa.StorageAuthority.GetSerialMetadata:input_type -> sa.Serial
+ 6, // 18: sa.StorageAuthority.GetCertificate:input_type -> sa.Serial
+ 6, // 19: sa.StorageAuthority.GetPrecertificate:input_type -> sa.Serial
+ 6, // 20: sa.StorageAuthority.GetCertificateStatus:input_type -> sa.Serial
+ 11, // 21: sa.StorageAuthority.CountCertificatesByNames:input_type -> sa.CountCertificatesByNamesRequest
+ 13, // 22: sa.StorageAuthority.CountRegistrationsByIP:input_type -> sa.CountRegistrationsByIPRequest
+ 13, // 23: sa.StorageAuthority.CountRegistrationsByIPRange:input_type -> sa.CountRegistrationsByIPRequest
+ 15, // 24: sa.StorageAuthority.CountOrders:input_type -> sa.CountOrdersRequest
+ 16, // 25: sa.StorageAuthority.CountFQDNSets:input_type -> sa.CountFQDNSetsRequest
+ 16, // 26: sa.StorageAuthority.FQDNSetTimestampsForWindow:input_type -> sa.CountFQDNSetsRequest
+ 17, // 27: sa.StorageAuthority.FQDNSetExists:input_type -> sa.FQDNSetExistsRequest
+ 18, // 28: sa.StorageAuthority.PreviousCertificateExists:input_type -> sa.PreviousCertificateExistsRequest
+ 34, // 29: sa.StorageAuthority.GetAuthorization2:input_type -> sa.AuthorizationID2
+ 30, // 30: sa.StorageAuthority.GetAuthorizations2:input_type -> sa.GetAuthorizationsRequest
+ 3, // 31: sa.StorageAuthority.GetPendingAuthorization2:input_type -> sa.GetPendingAuthorizationRequest
+ 0, // 32: sa.StorageAuthority.CountPendingAuthorizations2:input_type -> sa.RegistrationID
+ 27, // 33: sa.StorageAuthority.GetValidOrderAuthorizations2:input_type -> sa.GetValidOrderAuthorizationsRequest
+ 14, // 34: sa.StorageAuthority.CountInvalidAuthorizations2:input_type -> sa.CountInvalidAuthorizationsRequest
+ 4, // 35: sa.StorageAuthority.GetValidAuthorizations2:input_type -> sa.GetValidAuthorizationsRequest
+ 39, // 36: sa.StorageAuthority.KeyBlocked:input_type -> sa.KeyBlockedRequest
+ 41, // 37: sa.StorageAuthority.SerialsForIncident:input_type -> sa.SerialsForIncidentRequest
+ 43, // 38: sa.StorageAuthority.GetRevokedCerts:input_type -> sa.GetRevokedCertsRequest
+ 50, // 39: sa.StorageAuthority.NewRegistration:input_type -> core.Registration
+ 50, // 40: sa.StorageAuthority.UpdateRegistration:input_type -> core.Registration
+ 21, // 41: sa.StorageAuthority.AddCertificate:input_type -> sa.AddCertificateRequest
+ 21, // 42: sa.StorageAuthority.AddPrecertificate:input_type -> sa.AddCertificateRequest
+ 20, // 43: sa.StorageAuthority.AddSerial:input_type -> sa.AddSerialRequest
+ 0, // 44: sa.StorageAuthority.DeactivateRegistration:input_type -> sa.RegistrationID
+ 24, // 45: sa.StorageAuthority.NewOrder:input_type -> sa.NewOrderRequest
+ 25, // 46: sa.StorageAuthority.NewOrderAndAuthzs:input_type -> sa.NewOrderAndAuthzsRequest
+ 23, // 47: sa.StorageAuthority.SetOrderProcessing:input_type -> sa.OrderRequest
+ 26, // 48: sa.StorageAuthority.SetOrderError:input_type -> sa.SetOrderErrorRequest
+ 29, // 49: sa.StorageAuthority.FinalizeOrder:input_type -> sa.FinalizeOrderRequest
+ 23, // 50: sa.StorageAuthority.GetOrder:input_type -> sa.OrderRequest
+ 28, // 51: sa.StorageAuthority.GetOrderForNames:input_type -> sa.GetOrderForNamesRequest
+ 36, // 52: sa.StorageAuthority.RevokeCertificate:input_type -> sa.RevokeCertificateRequest
+ 36, // 53: sa.StorageAuthority.UpdateRevokedCertificate:input_type -> sa.RevokeCertificateRequest
+ 32, // 54: sa.StorageAuthority.NewAuthorizations2:input_type -> sa.AddPendingAuthorizationsRequest
+ 37, // 55: sa.StorageAuthority.FinalizeAuthorization2:input_type -> sa.FinalizeAuthorizationRequest
+ 34, // 56: sa.StorageAuthority.DeactivateAuthorization2:input_type -> sa.AuthorizationID2
+ 38, // 57: sa.StorageAuthority.AddBlockedKey:input_type -> sa.AddBlockedKeyRequest
+ 50, // 58: sa.StorageAuthority.GetRegistration:output_type -> core.Registration
+ 50, // 59: sa.StorageAuthority.GetRegistrationByKey:output_type -> core.Registration
+ 7, // 60: sa.StorageAuthority.GetSerialMetadata:output_type -> sa.SerialMetadata
+ 51, // 61: sa.StorageAuthority.GetCertificate:output_type -> core.Certificate
+ 51, // 62: sa.StorageAuthority.GetPrecertificate:output_type -> core.Certificate
+ 52, // 63: sa.StorageAuthority.GetCertificateStatus:output_type -> core.CertificateStatus
+ 12, // 64: sa.StorageAuthority.CountCertificatesByNames:output_type -> sa.CountByNames
+ 9, // 65: sa.StorageAuthority.CountRegistrationsByIP:output_type -> sa.Count
+ 9, // 66: sa.StorageAuthority.CountRegistrationsByIPRange:output_type -> sa.Count
+ 9, // 67: sa.StorageAuthority.CountOrders:output_type -> sa.Count
+ 9, // 68: sa.StorageAuthority.CountFQDNSets:output_type -> sa.Count
+ 10, // 69: sa.StorageAuthority.FQDNSetTimestampsForWindow:output_type -> sa.Timestamps
+ 19, // 70: sa.StorageAuthority.FQDNSetExists:output_type -> sa.Exists
+ 19, // 71: sa.StorageAuthority.PreviousCertificateExists:output_type -> sa.Exists
+ 47, // 72: sa.StorageAuthority.GetAuthorization2:output_type -> core.Authorization
+ 31, // 73: sa.StorageAuthority.GetAuthorizations2:output_type -> sa.Authorizations
+ 47, // 74: sa.StorageAuthority.GetPendingAuthorization2:output_type -> core.Authorization
+ 9, // 75: sa.StorageAuthority.CountPendingAuthorizations2:output_type -> sa.Count
+ 31, // 76: sa.StorageAuthority.GetValidOrderAuthorizations2:output_type -> sa.Authorizations
+ 9, // 77: sa.StorageAuthority.CountInvalidAuthorizations2:output_type -> sa.Count
+ 31, // 78: sa.StorageAuthority.GetValidAuthorizations2:output_type -> sa.Authorizations
+ 19, // 79: sa.StorageAuthority.KeyBlocked:output_type -> sa.Exists
+ 42, // 80: sa.StorageAuthority.SerialsForIncident:output_type -> sa.IncidentSerial
+ 53, // 81: sa.StorageAuthority.GetRevokedCerts:output_type -> core.CRLEntry
+ 50, // 82: sa.StorageAuthority.NewRegistration:output_type -> core.Registration
+ 54, // 83: sa.StorageAuthority.UpdateRegistration:output_type -> google.protobuf.Empty
+ 22, // 84: sa.StorageAuthority.AddCertificate:output_type -> sa.AddCertificateResponse
+ 54, // 85: sa.StorageAuthority.AddPrecertificate:output_type -> google.protobuf.Empty
+ 54, // 86: sa.StorageAuthority.AddSerial:output_type -> google.protobuf.Empty
+ 54, // 87: sa.StorageAuthority.DeactivateRegistration:output_type -> google.protobuf.Empty
+ 55, // 88: sa.StorageAuthority.NewOrder:output_type -> core.Order
+ 55, // 89: sa.StorageAuthority.NewOrderAndAuthzs:output_type -> core.Order
+ 54, // 90: sa.StorageAuthority.SetOrderProcessing:output_type -> google.protobuf.Empty
+ 54, // 91: sa.StorageAuthority.SetOrderError:output_type -> google.protobuf.Empty
+ 54, // 92: sa.StorageAuthority.FinalizeOrder:output_type -> google.protobuf.Empty
+ 55, // 93: sa.StorageAuthority.GetOrder:output_type -> core.Order
+ 55, // 94: sa.StorageAuthority.GetOrderForNames:output_type -> core.Order
+ 54, // 95: sa.StorageAuthority.RevokeCertificate:output_type -> google.protobuf.Empty
+ 54, // 96: sa.StorageAuthority.UpdateRevokedCertificate:output_type -> google.protobuf.Empty
+ 35, // 97: sa.StorageAuthority.NewAuthorizations2:output_type -> sa.Authorization2IDs
+ 54, // 98: sa.StorageAuthority.FinalizeAuthorization2:output_type -> google.protobuf.Empty
+ 54, // 99: sa.StorageAuthority.DeactivateAuthorization2:output_type -> google.protobuf.Empty
+ 54, // 100: sa.StorageAuthority.AddBlockedKey:output_type -> google.protobuf.Empty
+ 58, // [58:101] is the sub-list for method output_type
+ 15, // [15:58] is the sub-list for method input_type
+ 15, // [15:15] is the sub-list for extension type_name
+ 15, // [15:15] is the sub-list for extension extendee
+ 0, // [0:15] is the sub-list for field type_name
+}
+
+func init() { file_sa_proto_init() }
+func file_sa_proto_init() {
+ if File_sa_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_sa_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegistrationID); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*JSONWebKey); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AuthorizationID); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetPendingAuthorizationRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetValidAuthorizationsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidAuthorizations); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Serial); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SerialMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Range); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Count); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Timestamps); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CountCertificatesByNamesRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CountByNames); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CountRegistrationsByIPRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CountInvalidAuthorizationsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CountOrdersRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CountFQDNSetsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FQDNSetExistsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PreviousCertificateExistsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Exists); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddSerialRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddCertificateRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddCertificateResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OrderRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NewOrderRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NewOrderAndAuthzsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SetOrderErrorRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetValidOrderAuthorizationsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetOrderForNamesRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FinalizeOrderRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetAuthorizationsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Authorizations); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddPendingAuthorizationsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AuthorizationIDs); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AuthorizationID2); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Authorization2IDs); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RevokeCertificateRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FinalizeAuthorizationRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddBlockedKeyRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeyBlockedRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Incident); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SerialsForIncidentRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*IncidentSerial); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetRevokedCertsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidAuthorizations_MapElement); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_sa_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Authorizations_MapElement); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_sa_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 47,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_sa_proto_goTypes,
+ DependencyIndexes: file_sa_proto_depIdxs,
+ MessageInfos: file_sa_proto_msgTypes,
+ }.Build()
+ File_sa_proto = out.File
+ file_sa_proto_rawDesc = nil
+ file_sa_proto_goTypes = nil
+ file_sa_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto
new file mode 100644
index 00000000000..6eafefbe432
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto
@@ -0,0 +1,306 @@
+syntax = "proto3";
+
+package sa;
+option go_package = "github.com/letsencrypt/boulder/sa/proto";
+
+import "core/proto/core.proto";
+import "google/protobuf/empty.proto";
+
+service StorageAuthority {
+ // Getters
+ rpc GetRegistration(RegistrationID) returns (core.Registration) {}
+ rpc GetRegistrationByKey(JSONWebKey) returns (core.Registration) {}
+ rpc GetSerialMetadata(Serial) returns (SerialMetadata) {}
+ rpc GetCertificate(Serial) returns (core.Certificate) {}
+ rpc GetPrecertificate(Serial) returns (core.Certificate) {}
+ rpc GetCertificateStatus(Serial) returns (core.CertificateStatus) {}
+ rpc CountCertificatesByNames(CountCertificatesByNamesRequest) returns (CountByNames) {}
+ rpc CountRegistrationsByIP(CountRegistrationsByIPRequest) returns (Count) {}
+ rpc CountRegistrationsByIPRange(CountRegistrationsByIPRequest) returns (Count) {}
+ rpc CountOrders(CountOrdersRequest) returns (Count) {}
+ // Return a count of authorizations with status "invalid" that belong to
+ // a given registration ID and expire in the given time range.
+ rpc CountFQDNSets(CountFQDNSetsRequest) returns (Count) {}
+ rpc FQDNSetTimestampsForWindow(CountFQDNSetsRequest) returns (Timestamps) {}
+ rpc FQDNSetExists(FQDNSetExistsRequest) returns (Exists) {}
+ rpc PreviousCertificateExists(PreviousCertificateExistsRequest) returns (Exists) {}
+ rpc GetAuthorization2(AuthorizationID2) returns (core.Authorization) {}
+ rpc GetAuthorizations2(GetAuthorizationsRequest) returns (Authorizations) {}
+ rpc GetPendingAuthorization2(GetPendingAuthorizationRequest) returns (core.Authorization) {}
+ rpc CountPendingAuthorizations2(RegistrationID) returns (Count) {}
+ rpc GetValidOrderAuthorizations2(GetValidOrderAuthorizationsRequest) returns (Authorizations) {}
+ rpc CountInvalidAuthorizations2(CountInvalidAuthorizationsRequest) returns (Count) {}
+ rpc GetValidAuthorizations2(GetValidAuthorizationsRequest) returns (Authorizations) {}
+ rpc KeyBlocked(KeyBlockedRequest) returns (Exists) {}
+ rpc SerialsForIncident (SerialsForIncidentRequest) returns (stream IncidentSerial) {}
+ rpc GetRevokedCerts(GetRevokedCertsRequest) returns (stream core.CRLEntry) {}
+ // Adders
+ rpc NewRegistration(core.Registration) returns (core.Registration) {}
+ rpc UpdateRegistration(core.Registration) returns (google.protobuf.Empty) {}
+ rpc AddCertificate(AddCertificateRequest) returns (AddCertificateResponse) {}
+ rpc AddPrecertificate(AddCertificateRequest) returns (google.protobuf.Empty) {}
+ rpc AddSerial(AddSerialRequest) returns (google.protobuf.Empty) {}
+ rpc DeactivateRegistration(RegistrationID) returns (google.protobuf.Empty) {}
+ rpc NewOrder(NewOrderRequest) returns (core.Order) {}
+ rpc NewOrderAndAuthzs(NewOrderAndAuthzsRequest) returns (core.Order) {}
+ rpc SetOrderProcessing(OrderRequest) returns (google.protobuf.Empty) {}
+ rpc SetOrderError(SetOrderErrorRequest) returns (google.protobuf.Empty) {}
+ rpc FinalizeOrder(FinalizeOrderRequest) returns (google.protobuf.Empty) {}
+ rpc GetOrder(OrderRequest) returns (core.Order) {}
+ rpc GetOrderForNames(GetOrderForNamesRequest) returns (core.Order) {}
+ rpc RevokeCertificate(RevokeCertificateRequest) returns (google.protobuf.Empty) {}
+ rpc UpdateRevokedCertificate(RevokeCertificateRequest) returns (google.protobuf.Empty) {}
+ rpc NewAuthorizations2(AddPendingAuthorizationsRequest) returns (Authorization2IDs) {}
+ rpc FinalizeAuthorization2(FinalizeAuthorizationRequest) returns (google.protobuf.Empty) {}
+ rpc DeactivateAuthorization2(AuthorizationID2) returns (google.protobuf.Empty) {}
+ rpc AddBlockedKey(AddBlockedKeyRequest) returns (google.protobuf.Empty) {}
+}
+
+message RegistrationID {
+ int64 id = 1;
+}
+
+message JSONWebKey {
+ bytes jwk = 1;
+}
+
+message AuthorizationID {
+ string id = 1;
+}
+
+message GetPendingAuthorizationRequest {
+ int64 registrationID = 1;
+ string identifierType = 2;
+ string identifierValue = 3;
+ // Result must be valid until at least this Unix timestamp (nanos)
+ int64 validUntil = 4;
+}
+
+message GetValidAuthorizationsRequest {
+ int64 registrationID = 1;
+ repeated string domains = 2;
+ int64 now = 3; // Unix timestamp (nanoseconds)
+}
+
+message ValidAuthorizations {
+ message MapElement {
+ string domain = 1;
+ core.Authorization authz = 2;
+ }
+ repeated MapElement valid = 1;
+}
+
+message Serial {
+ string serial = 1;
+}
+
+message SerialMetadata {
+ string serial = 1;
+ int64 registrationID = 2;
+ int64 created = 3; // Unix timestamp (nanoseconds)
+ int64 expires = 4; // Unix timestamp (nanoseconds)
+}
+
+message Range {
+ int64 earliest = 1; // Unix timestamp (nanoseconds)
+ int64 latest = 2; // Unix timestamp (nanoseconds)
+}
+
+message Count {
+ int64 count = 1;
+}
+
+message Timestamps {
+ repeated int64 timestamps = 1; // Unix timestamp (nanoseconds)
+}
+
+message CountCertificatesByNamesRequest {
+ Range range = 1;
+ repeated string names = 2;
+}
+
+message CountByNames {
+ map counts = 1;
+}
+
+message CountRegistrationsByIPRequest {
+ bytes ip = 1;
+ Range range = 2;
+}
+
+message CountInvalidAuthorizationsRequest {
+ int64 registrationID = 1;
+ string hostname = 2;
+ // Count authorizations that expire in this range.
+ Range range = 3;
+}
+
+message CountOrdersRequest {
+ int64 accountID = 1;
+ Range range = 2;
+}
+
+message CountFQDNSetsRequest {
+ int64 window = 1;
+ repeated string domains = 2;
+}
+
+message FQDNSetExistsRequest {
+ repeated string domains = 1;
+}
+
+message PreviousCertificateExistsRequest {
+ string domain = 1;
+ int64 regID = 2;
+}
+
+message Exists {
+ bool exists = 1;
+}
+
+message AddSerialRequest {
+ int64 regID = 1;
+ string serial = 2;
+ int64 created = 3; // Unix timestamp (nanoseconds)
+ int64 expires = 4; // Unix timestamp (nanoseconds)
+}
+
+message AddCertificateRequest {
+ bytes der = 1;
+ int64 regID = 2;
+ // A signed OCSP response for the certificate contained in "der".
+ // Note: The certificate status in the OCSP response is assumed to be 0 (good).
+ bytes ocsp = 3;
+ // An issued time. When not present the SA defaults to using
+ // the current time. The orphan-finder uses this parameter to add
+ // certificates with the correct historic issued date
+ int64 issued = 4;
+ int64 issuerID = 5;
+}
+
+message AddCertificateResponse {
+ string digest = 1;
+}
+
+message OrderRequest {
+ int64 id = 1;
+}
+
+message NewOrderRequest {
+ int64 registrationID = 1;
+ int64 expires = 2;
+ repeated string names = 3;
+ repeated int64 v2Authorizations = 4;
+}
+
+message NewOrderAndAuthzsRequest {
+ NewOrderRequest newOrder = 1;
+ repeated core.Authorization newAuthzs = 2;
+}
+
+message SetOrderErrorRequest {
+ int64 id = 1;
+ core.ProblemDetails error = 2;
+}
+
+message GetValidOrderAuthorizationsRequest {
+ int64 id = 1;
+ int64 acctID = 2;
+}
+
+message GetOrderForNamesRequest {
+ int64 acctID = 1;
+ repeated string names = 2;
+}
+
+message FinalizeOrderRequest {
+ int64 id = 1;
+ string certificateSerial = 2;
+}
+
+message GetAuthorizationsRequest {
+ int64 registrationID = 1;
+ repeated string domains = 2;
+ int64 now = 3; // Unix timestamp (nanoseconds)
+}
+
+message Authorizations {
+ message MapElement {
+ string domain = 1;
+ core.Authorization authz = 2;
+ }
+ repeated MapElement authz = 1;
+}
+
+message AddPendingAuthorizationsRequest {
+ repeated core.Authorization authz = 1;
+}
+
+message AuthorizationIDs {
+ repeated string ids = 1;
+}
+
+message AuthorizationID2 {
+ int64 id = 1;
+}
+
+message Authorization2IDs {
+ repeated int64 ids = 1;
+}
+
+message RevokeCertificateRequest {
+ string serial = 1;
+ int64 reason = 2;
+ int64 date = 3; // Unix timestamp (nanoseconds)
+ int64 backdate = 5; // Unix timestamp (nanoseconds)
+ bytes response = 4;
+ int64 issuerID = 6;
+}
+
+message FinalizeAuthorizationRequest {
+ int64 id = 1;
+ string status = 2;
+ int64 expires = 3; // Unix timestamp (nanoseconds)
+ string attempted = 4;
+ repeated core.ValidationRecord validationRecords = 5;
+ core.ProblemDetails validationError = 6;
+ int64 attemptedAt = 7; // Unix timestamp (nanoseconds)
+}
+
+message AddBlockedKeyRequest {
+ bytes keyHash = 1;
+ int64 added = 2; // Unix timestamp (nanoseconds)
+ string source = 3;
+ string comment = 4;
+ int64 revokedBy = 5;
+}
+
+message KeyBlockedRequest {
+ bytes keyHash = 1;
+}
+
+message Incident {
+ int64 id = 1;
+ string serialTable = 2;
+ string url = 3;
+ int64 renewBy = 4; // Unix timestamp (nanoseconds)
+ bool enabled = 5;
+}
+
+message SerialsForIncidentRequest {
+ string incidentTable = 1;
+}
+
+message IncidentSerial {
+ string serial = 1;
+ int64 registrationID = 2;
+ int64 orderID = 3;
+ int64 lastNoticeSent = 4; // Unix timestamp (nanoseconds)
+}
+
+message GetRevokedCertsRequest {
+ int64 issuerNameID = 1;
+ int64 expiresAfter = 2; // Unix timestamp (nanoseconds), inclusive
+ int64 expiresBefore = 3; // Unix timestamp (nanoseconds), exclusive
+ int64 revokedBefore = 4; // Unix timestamp (nanoseconds)
+}
diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go b/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go
new file mode 100644
index 00000000000..7534016eb26
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go
@@ -0,0 +1,1682 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.2.0
+// - protoc v3.20.1
+// source: sa.proto
+
+package proto
+
+import (
+ context "context"
+ proto "github.com/letsencrypt/boulder/core/proto"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+// StorageAuthorityClient is the client API for StorageAuthority service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type StorageAuthorityClient interface {
+ // Getters
+ GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error)
+ GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error)
+ GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error)
+ GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
+ GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
+ GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error)
+ CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error)
+ CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
+ CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
+ CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error)
+ // Return a count of authorizations with status "invalid" that belong to
+ // a given registration ID and expire in the given time range.
+ CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error)
+ FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error)
+ FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error)
+ PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error)
+ GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error)
+ GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
+ GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error)
+ CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error)
+ GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
+ CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error)
+ GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
+ KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error)
+ SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (StorageAuthority_SerialsForIncidentClient, error)
+ GetRevokedCerts(ctx context.Context, in *GetRevokedCertsRequest, opts ...grpc.CallOption) (StorageAuthority_GetRevokedCertsClient, error)
+ // Adders
+ NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error)
+ UpdateRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error)
+ AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ DeactivateRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ NewOrder(ctx context.Context, in *NewOrderRequest, opts ...grpc.CallOption) (*proto.Order, error)
+ NewOrderAndAuthzs(ctx context.Context, in *NewOrderAndAuthzsRequest, opts ...grpc.CallOption) (*proto.Order, error)
+ SetOrderProcessing(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ SetOrderError(ctx context.Context, in *SetOrderErrorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error)
+ GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error)
+ RevokeCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ UpdateRevokedCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ NewAuthorizations2(ctx context.Context, in *AddPendingAuthorizationsRequest, opts ...grpc.CallOption) (*Authorization2IDs, error)
+ FinalizeAuthorization2(ctx context.Context, in *FinalizeAuthorizationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ DeactivateAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ AddBlockedKey(ctx context.Context, in *AddBlockedKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+}
+
+type storageAuthorityClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewStorageAuthorityClient(cc grpc.ClientConnInterface) StorageAuthorityClient {
+ return &storageAuthorityClient{cc}
+}
+
+func (c *storageAuthorityClient) GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) {
+ out := new(proto.Registration)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetRegistration", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) {
+ out := new(proto.Registration)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetRegistrationByKey", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error) {
+ out := new(SerialMetadata)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetSerialMetadata", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) {
+ out := new(proto.Certificate)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetCertificate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) {
+ out := new(proto.Certificate)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetPrecertificate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) {
+ out := new(proto.CertificateStatus)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetCertificateStatus", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error) {
+ out := new(CountByNames)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountCertificatesByNames", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) {
+ out := new(Count)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountRegistrationsByIP", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) {
+ out := new(Count)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountRegistrationsByIPRange", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error) {
+ out := new(Count)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountOrders", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error) {
+ out := new(Count)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountFQDNSets", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error) {
+ out := new(Timestamps)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FQDNSetTimestampsForWindow", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) {
+ out := new(Exists)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FQDNSetExists", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error) {
+ out := new(Exists)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/PreviousCertificateExists", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) {
+ out := new(proto.Authorization)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetAuthorization2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) {
+ out := new(Authorizations)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetAuthorizations2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) {
+ out := new(proto.Authorization)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetPendingAuthorization2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) {
+ out := new(Count)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountPendingAuthorizations2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) {
+ out := new(Authorizations)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetValidOrderAuthorizations2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) {
+ out := new(Count)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountInvalidAuthorizations2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) {
+ out := new(Authorizations)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetValidAuthorizations2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error) {
+ out := new(Exists)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/KeyBlocked", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (StorageAuthority_SerialsForIncidentClient, error) {
+ stream, err := c.cc.NewStream(ctx, &StorageAuthority_ServiceDesc.Streams[0], "/sa.StorageAuthority/SerialsForIncident", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &storageAuthoritySerialsForIncidentClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type StorageAuthority_SerialsForIncidentClient interface {
+ Recv() (*IncidentSerial, error)
+ grpc.ClientStream
+}
+
+type storageAuthoritySerialsForIncidentClient struct {
+ grpc.ClientStream
+}
+
+func (x *storageAuthoritySerialsForIncidentClient) Recv() (*IncidentSerial, error) {
+ m := new(IncidentSerial)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *storageAuthorityClient) GetRevokedCerts(ctx context.Context, in *GetRevokedCertsRequest, opts ...grpc.CallOption) (StorageAuthority_GetRevokedCertsClient, error) {
+ stream, err := c.cc.NewStream(ctx, &StorageAuthority_ServiceDesc.Streams[1], "/sa.StorageAuthority/GetRevokedCerts", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &storageAuthorityGetRevokedCertsClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type StorageAuthority_GetRevokedCertsClient interface {
+ Recv() (*proto.CRLEntry, error)
+ grpc.ClientStream
+}
+
+type storageAuthorityGetRevokedCertsClient struct {
+ grpc.ClientStream
+}
+
+func (x *storageAuthorityGetRevokedCertsClient) Recv() (*proto.CRLEntry, error) {
+ m := new(proto.CRLEntry)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *storageAuthorityClient) NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error) {
+ out := new(proto.Registration)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewRegistration", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) UpdateRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/UpdateRegistration", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error) {
+ out := new(AddCertificateResponse)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddCertificate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddPrecertificate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddSerial", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) DeactivateRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/DeactivateRegistration", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) NewOrder(ctx context.Context, in *NewOrderRequest, opts ...grpc.CallOption) (*proto.Order, error) {
+ out := new(proto.Order)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewOrder", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) NewOrderAndAuthzs(ctx context.Context, in *NewOrderAndAuthzsRequest, opts ...grpc.CallOption) (*proto.Order, error) {
+ out := new(proto.Order)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewOrderAndAuthzs", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) SetOrderProcessing(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/SetOrderProcessing", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) SetOrderError(ctx context.Context, in *SetOrderErrorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/SetOrderError", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FinalizeOrder", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) {
+ out := new(proto.Order)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetOrder", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) {
+ out := new(proto.Order)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetOrderForNames", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) RevokeCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/RevokeCertificate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) UpdateRevokedCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/UpdateRevokedCertificate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) NewAuthorizations2(ctx context.Context, in *AddPendingAuthorizationsRequest, opts ...grpc.CallOption) (*Authorization2IDs, error) {
+ out := new(Authorization2IDs)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewAuthorizations2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) FinalizeAuthorization2(ctx context.Context, in *FinalizeAuthorizationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FinalizeAuthorization2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) DeactivateAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/DeactivateAuthorization2", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *storageAuthorityClient) AddBlockedKey(ctx context.Context, in *AddBlockedKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddBlockedKey", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// StorageAuthorityServer is the server API for StorageAuthority service.
+// All implementations must embed UnimplementedStorageAuthorityServer
+// for forward compatibility
+type StorageAuthorityServer interface {
+ // Getters
+ GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error)
+ GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error)
+ GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error)
+ GetCertificate(context.Context, *Serial) (*proto.Certificate, error)
+ GetPrecertificate(context.Context, *Serial) (*proto.Certificate, error)
+ GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error)
+ CountCertificatesByNames(context.Context, *CountCertificatesByNamesRequest) (*CountByNames, error)
+ CountRegistrationsByIP(context.Context, *CountRegistrationsByIPRequest) (*Count, error)
+ CountRegistrationsByIPRange(context.Context, *CountRegistrationsByIPRequest) (*Count, error)
+ CountOrders(context.Context, *CountOrdersRequest) (*Count, error)
+ // Return a count of authorizations with status "invalid" that belong to
+ // a given registration ID and expire in the given time range.
+ CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error)
+ FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error)
+ FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error)
+ PreviousCertificateExists(context.Context, *PreviousCertificateExistsRequest) (*Exists, error)
+ GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error)
+ GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error)
+ GetPendingAuthorization2(context.Context, *GetPendingAuthorizationRequest) (*proto.Authorization, error)
+ CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error)
+ GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error)
+ CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error)
+ GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error)
+ KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error)
+ SerialsForIncident(*SerialsForIncidentRequest, StorageAuthority_SerialsForIncidentServer) error
+ GetRevokedCerts(*GetRevokedCertsRequest, StorageAuthority_GetRevokedCertsServer) error
+ // Adders
+ NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error)
+ UpdateRegistration(context.Context, *proto.Registration) (*emptypb.Empty, error)
+ AddCertificate(context.Context, *AddCertificateRequest) (*AddCertificateResponse, error)
+ AddPrecertificate(context.Context, *AddCertificateRequest) (*emptypb.Empty, error)
+ AddSerial(context.Context, *AddSerialRequest) (*emptypb.Empty, error)
+ DeactivateRegistration(context.Context, *RegistrationID) (*emptypb.Empty, error)
+ NewOrder(context.Context, *NewOrderRequest) (*proto.Order, error)
+ NewOrderAndAuthzs(context.Context, *NewOrderAndAuthzsRequest) (*proto.Order, error)
+ SetOrderProcessing(context.Context, *OrderRequest) (*emptypb.Empty, error)
+ SetOrderError(context.Context, *SetOrderErrorRequest) (*emptypb.Empty, error)
+ FinalizeOrder(context.Context, *FinalizeOrderRequest) (*emptypb.Empty, error)
+ GetOrder(context.Context, *OrderRequest) (*proto.Order, error)
+ GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error)
+ RevokeCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error)
+ UpdateRevokedCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error)
+ NewAuthorizations2(context.Context, *AddPendingAuthorizationsRequest) (*Authorization2IDs, error)
+ FinalizeAuthorization2(context.Context, *FinalizeAuthorizationRequest) (*emptypb.Empty, error)
+ DeactivateAuthorization2(context.Context, *AuthorizationID2) (*emptypb.Empty, error)
+ AddBlockedKey(context.Context, *AddBlockedKeyRequest) (*emptypb.Empty, error)
+ mustEmbedUnimplementedStorageAuthorityServer()
+}
+
+// UnimplementedStorageAuthorityServer must be embedded to have forward compatible implementations.
+type UnimplementedStorageAuthorityServer struct {
+}
+
+func (UnimplementedStorageAuthorityServer) GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetRegistration not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetRegistrationByKey not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetSerialMetadata not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetCertificate(context.Context, *Serial) (*proto.Certificate, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetCertificate not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetPrecertificate(context.Context, *Serial) (*proto.Certificate, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetPrecertificate not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetCertificateStatus not implemented")
+}
+func (UnimplementedStorageAuthorityServer) CountCertificatesByNames(context.Context, *CountCertificatesByNamesRequest) (*CountByNames, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CountCertificatesByNames not implemented")
+}
+func (UnimplementedStorageAuthorityServer) CountRegistrationsByIP(context.Context, *CountRegistrationsByIPRequest) (*Count, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CountRegistrationsByIP not implemented")
+}
+func (UnimplementedStorageAuthorityServer) CountRegistrationsByIPRange(context.Context, *CountRegistrationsByIPRequest) (*Count, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CountRegistrationsByIPRange not implemented")
+}
+func (UnimplementedStorageAuthorityServer) CountOrders(context.Context, *CountOrdersRequest) (*Count, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CountOrders not implemented")
+}
+func (UnimplementedStorageAuthorityServer) CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CountFQDNSets not implemented")
+}
+func (UnimplementedStorageAuthorityServer) FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method FQDNSetTimestampsForWindow not implemented")
+}
+func (UnimplementedStorageAuthorityServer) FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method FQDNSetExists not implemented")
+}
+func (UnimplementedStorageAuthorityServer) PreviousCertificateExists(context.Context, *PreviousCertificateExistsRequest) (*Exists, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method PreviousCertificateExists not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetAuthorization2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetAuthorizations2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetPendingAuthorization2(context.Context, *GetPendingAuthorizationRequest) (*proto.Authorization, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetPendingAuthorization2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CountPendingAuthorizations2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetValidOrderAuthorizations2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CountInvalidAuthorizations2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetValidAuthorizations2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method KeyBlocked not implemented")
+}
+func (UnimplementedStorageAuthorityServer) SerialsForIncident(*SerialsForIncidentRequest, StorageAuthority_SerialsForIncidentServer) error {
+ return status.Errorf(codes.Unimplemented, "method SerialsForIncident not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetRevokedCerts(*GetRevokedCertsRequest, StorageAuthority_GetRevokedCertsServer) error {
+ return status.Errorf(codes.Unimplemented, "method GetRevokedCerts not implemented")
+}
+func (UnimplementedStorageAuthorityServer) NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method NewRegistration not implemented")
+}
+func (UnimplementedStorageAuthorityServer) UpdateRegistration(context.Context, *proto.Registration) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateRegistration not implemented")
+}
+func (UnimplementedStorageAuthorityServer) AddCertificate(context.Context, *AddCertificateRequest) (*AddCertificateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AddCertificate not implemented")
+}
+func (UnimplementedStorageAuthorityServer) AddPrecertificate(context.Context, *AddCertificateRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AddPrecertificate not implemented")
+}
+func (UnimplementedStorageAuthorityServer) AddSerial(context.Context, *AddSerialRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AddSerial not implemented")
+}
+func (UnimplementedStorageAuthorityServer) DeactivateRegistration(context.Context, *RegistrationID) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeactivateRegistration not implemented")
+}
+func (UnimplementedStorageAuthorityServer) NewOrder(context.Context, *NewOrderRequest) (*proto.Order, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method NewOrder not implemented")
+}
+func (UnimplementedStorageAuthorityServer) NewOrderAndAuthzs(context.Context, *NewOrderAndAuthzsRequest) (*proto.Order, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method NewOrderAndAuthzs not implemented")
+}
+func (UnimplementedStorageAuthorityServer) SetOrderProcessing(context.Context, *OrderRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SetOrderProcessing not implemented")
+}
+func (UnimplementedStorageAuthorityServer) SetOrderError(context.Context, *SetOrderErrorRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SetOrderError not implemented")
+}
+func (UnimplementedStorageAuthorityServer) FinalizeOrder(context.Context, *FinalizeOrderRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method FinalizeOrder not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetOrder(context.Context, *OrderRequest) (*proto.Order, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetOrder not implemented")
+}
+func (UnimplementedStorageAuthorityServer) GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetOrderForNames not implemented")
+}
+func (UnimplementedStorageAuthorityServer) RevokeCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RevokeCertificate not implemented")
+}
+func (UnimplementedStorageAuthorityServer) UpdateRevokedCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateRevokedCertificate not implemented")
+}
+func (UnimplementedStorageAuthorityServer) NewAuthorizations2(context.Context, *AddPendingAuthorizationsRequest) (*Authorization2IDs, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method NewAuthorizations2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) FinalizeAuthorization2(context.Context, *FinalizeAuthorizationRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method FinalizeAuthorization2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) DeactivateAuthorization2(context.Context, *AuthorizationID2) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeactivateAuthorization2 not implemented")
+}
+func (UnimplementedStorageAuthorityServer) AddBlockedKey(context.Context, *AddBlockedKeyRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AddBlockedKey not implemented")
+}
+func (UnimplementedStorageAuthorityServer) mustEmbedUnimplementedStorageAuthorityServer() {}
+
+// UnsafeStorageAuthorityServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to StorageAuthorityServer will
+// result in compilation errors.
+type UnsafeStorageAuthorityServer interface {
+ mustEmbedUnimplementedStorageAuthorityServer()
+}
+
+func RegisterStorageAuthorityServer(s grpc.ServiceRegistrar, srv StorageAuthorityServer) {
+ s.RegisterService(&StorageAuthority_ServiceDesc, srv)
+}
+
+func _StorageAuthority_GetRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RegistrationID)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetRegistration(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetRegistration",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetRegistration(ctx, req.(*RegistrationID))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetRegistrationByKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(JSONWebKey)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetRegistrationByKey(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetRegistrationByKey",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetRegistrationByKey(ctx, req.(*JSONWebKey))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetSerialMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Serial)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetSerialMetadata(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetSerialMetadata",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetSerialMetadata(ctx, req.(*Serial))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Serial)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetCertificate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetCertificate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetCertificate(ctx, req.(*Serial))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Serial)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetPrecertificate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetPrecertificate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetPrecertificate(ctx, req.(*Serial))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetCertificateStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Serial)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetCertificateStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetCertificateStatus",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetCertificateStatus(ctx, req.(*Serial))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_CountCertificatesByNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CountCertificatesByNamesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).CountCertificatesByNames(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/CountCertificatesByNames",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).CountCertificatesByNames(ctx, req.(*CountCertificatesByNamesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_CountRegistrationsByIP_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CountRegistrationsByIPRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).CountRegistrationsByIP(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/CountRegistrationsByIP",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).CountRegistrationsByIP(ctx, req.(*CountRegistrationsByIPRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_CountRegistrationsByIPRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CountRegistrationsByIPRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).CountRegistrationsByIPRange(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/CountRegistrationsByIPRange",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).CountRegistrationsByIPRange(ctx, req.(*CountRegistrationsByIPRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_CountOrders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CountOrdersRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).CountOrders(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/CountOrders",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).CountOrders(ctx, req.(*CountOrdersRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_CountFQDNSets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CountFQDNSetsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).CountFQDNSets(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/CountFQDNSets",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).CountFQDNSets(ctx, req.(*CountFQDNSetsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_FQDNSetTimestampsForWindow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CountFQDNSetsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).FQDNSetTimestampsForWindow(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/FQDNSetTimestampsForWindow",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).FQDNSetTimestampsForWindow(ctx, req.(*CountFQDNSetsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_FQDNSetExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(FQDNSetExistsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).FQDNSetExists(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/FQDNSetExists",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).FQDNSetExists(ctx, req.(*FQDNSetExistsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_PreviousCertificateExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(PreviousCertificateExistsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).PreviousCertificateExists(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/PreviousCertificateExists",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).PreviousCertificateExists(ctx, req.(*PreviousCertificateExistsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthorizationID2)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetAuthorization2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetAuthorization2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetAuthorization2(ctx, req.(*AuthorizationID2))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetAuthorizationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetAuthorizations2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetAuthorizations2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetAuthorizations2(ctx, req.(*GetAuthorizationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetPendingAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetPendingAuthorizationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetPendingAuthorization2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetPendingAuthorization2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetPendingAuthorization2(ctx, req.(*GetPendingAuthorizationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_CountPendingAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RegistrationID)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).CountPendingAuthorizations2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/CountPendingAuthorizations2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).CountPendingAuthorizations2(ctx, req.(*RegistrationID))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetValidOrderAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetValidOrderAuthorizationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetValidOrderAuthorizations2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetValidOrderAuthorizations2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetValidOrderAuthorizations2(ctx, req.(*GetValidOrderAuthorizationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_CountInvalidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CountInvalidAuthorizationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).CountInvalidAuthorizations2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/CountInvalidAuthorizations2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).CountInvalidAuthorizations2(ctx, req.(*CountInvalidAuthorizationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetValidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetValidAuthorizationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetValidAuthorizations2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetValidAuthorizations2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetValidAuthorizations2(ctx, req.(*GetValidAuthorizationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_KeyBlocked_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(KeyBlockedRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).KeyBlocked(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/KeyBlocked",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).KeyBlocked(ctx, req.(*KeyBlockedRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_SerialsForIncident_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(SerialsForIncidentRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(StorageAuthorityServer).SerialsForIncident(m, &storageAuthoritySerialsForIncidentServer{stream})
+}
+
+type StorageAuthority_SerialsForIncidentServer interface {
+ Send(*IncidentSerial) error
+ grpc.ServerStream
+}
+
+type storageAuthoritySerialsForIncidentServer struct {
+ grpc.ServerStream
+}
+
+func (x *storageAuthoritySerialsForIncidentServer) Send(m *IncidentSerial) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _StorageAuthority_GetRevokedCerts_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(GetRevokedCertsRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(StorageAuthorityServer).GetRevokedCerts(m, &storageAuthorityGetRevokedCertsServer{stream})
+}
+
+type StorageAuthority_GetRevokedCertsServer interface {
+ Send(*proto.CRLEntry) error
+ grpc.ServerStream
+}
+
+type storageAuthorityGetRevokedCertsServer struct {
+ grpc.ServerStream
+}
+
+func (x *storageAuthorityGetRevokedCertsServer) Send(m *proto.CRLEntry) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _StorageAuthority_NewRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(proto.Registration)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).NewRegistration(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/NewRegistration",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).NewRegistration(ctx, req.(*proto.Registration))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_UpdateRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(proto.Registration)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).UpdateRegistration(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/UpdateRegistration",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).UpdateRegistration(ctx, req.(*proto.Registration))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_AddCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddCertificateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).AddCertificate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/AddCertificate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).AddCertificate(ctx, req.(*AddCertificateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_AddPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddCertificateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).AddPrecertificate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/AddPrecertificate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).AddPrecertificate(ctx, req.(*AddCertificateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_AddSerial_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddSerialRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).AddSerial(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/AddSerial",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).AddSerial(ctx, req.(*AddSerialRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_DeactivateRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RegistrationID)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).DeactivateRegistration(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/DeactivateRegistration",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).DeactivateRegistration(ctx, req.(*RegistrationID))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_NewOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(NewOrderRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).NewOrder(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/NewOrder",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).NewOrder(ctx, req.(*NewOrderRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_NewOrderAndAuthzs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(NewOrderAndAuthzsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).NewOrderAndAuthzs(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/NewOrderAndAuthzs",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).NewOrderAndAuthzs(ctx, req.(*NewOrderAndAuthzsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_SetOrderProcessing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(OrderRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).SetOrderProcessing(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/SetOrderProcessing",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).SetOrderProcessing(ctx, req.(*OrderRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_SetOrderError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SetOrderErrorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).SetOrderError(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/SetOrderError",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).SetOrderError(ctx, req.(*SetOrderErrorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_FinalizeOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(FinalizeOrderRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).FinalizeOrder(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/FinalizeOrder",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).FinalizeOrder(ctx, req.(*FinalizeOrderRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(OrderRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetOrder(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetOrder",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetOrder(ctx, req.(*OrderRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_GetOrderForNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetOrderForNamesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).GetOrderForNames(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/GetOrderForNames",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).GetOrderForNames(ctx, req.(*GetOrderForNamesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RevokeCertificateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).RevokeCertificate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/RevokeCertificate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).RevokeCertificate(ctx, req.(*RevokeCertificateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_UpdateRevokedCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RevokeCertificateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).UpdateRevokedCertificate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/UpdateRevokedCertificate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).UpdateRevokedCertificate(ctx, req.(*RevokeCertificateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_NewAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddPendingAuthorizationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).NewAuthorizations2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/NewAuthorizations2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).NewAuthorizations2(ctx, req.(*AddPendingAuthorizationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_FinalizeAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(FinalizeAuthorizationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).FinalizeAuthorization2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/FinalizeAuthorization2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).FinalizeAuthorization2(ctx, req.(*FinalizeAuthorizationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_DeactivateAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthorizationID2)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).DeactivateAuthorization2(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/DeactivateAuthorization2",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).DeactivateAuthorization2(ctx, req.(*AuthorizationID2))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _StorageAuthority_AddBlockedKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddBlockedKeyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageAuthorityServer).AddBlockedKey(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/sa.StorageAuthority/AddBlockedKey",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageAuthorityServer).AddBlockedKey(ctx, req.(*AddBlockedKeyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// StorageAuthority_ServiceDesc is the grpc.ServiceDesc for StorageAuthority service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var StorageAuthority_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "sa.StorageAuthority",
+ HandlerType: (*StorageAuthorityServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "GetRegistration",
+ Handler: _StorageAuthority_GetRegistration_Handler,
+ },
+ {
+ MethodName: "GetRegistrationByKey",
+ Handler: _StorageAuthority_GetRegistrationByKey_Handler,
+ },
+ {
+ MethodName: "GetSerialMetadata",
+ Handler: _StorageAuthority_GetSerialMetadata_Handler,
+ },
+ {
+ MethodName: "GetCertificate",
+ Handler: _StorageAuthority_GetCertificate_Handler,
+ },
+ {
+ MethodName: "GetPrecertificate",
+ Handler: _StorageAuthority_GetPrecertificate_Handler,
+ },
+ {
+ MethodName: "GetCertificateStatus",
+ Handler: _StorageAuthority_GetCertificateStatus_Handler,
+ },
+ {
+ MethodName: "CountCertificatesByNames",
+ Handler: _StorageAuthority_CountCertificatesByNames_Handler,
+ },
+ {
+ MethodName: "CountRegistrationsByIP",
+ Handler: _StorageAuthority_CountRegistrationsByIP_Handler,
+ },
+ {
+ MethodName: "CountRegistrationsByIPRange",
+ Handler: _StorageAuthority_CountRegistrationsByIPRange_Handler,
+ },
+ {
+ MethodName: "CountOrders",
+ Handler: _StorageAuthority_CountOrders_Handler,
+ },
+ {
+ MethodName: "CountFQDNSets",
+ Handler: _StorageAuthority_CountFQDNSets_Handler,
+ },
+ {
+ MethodName: "FQDNSetTimestampsForWindow",
+ Handler: _StorageAuthority_FQDNSetTimestampsForWindow_Handler,
+ },
+ {
+ MethodName: "FQDNSetExists",
+ Handler: _StorageAuthority_FQDNSetExists_Handler,
+ },
+ {
+ MethodName: "PreviousCertificateExists",
+ Handler: _StorageAuthority_PreviousCertificateExists_Handler,
+ },
+ {
+ MethodName: "GetAuthorization2",
+ Handler: _StorageAuthority_GetAuthorization2_Handler,
+ },
+ {
+ MethodName: "GetAuthorizations2",
+ Handler: _StorageAuthority_GetAuthorizations2_Handler,
+ },
+ {
+ MethodName: "GetPendingAuthorization2",
+ Handler: _StorageAuthority_GetPendingAuthorization2_Handler,
+ },
+ {
+ MethodName: "CountPendingAuthorizations2",
+ Handler: _StorageAuthority_CountPendingAuthorizations2_Handler,
+ },
+ {
+ MethodName: "GetValidOrderAuthorizations2",
+ Handler: _StorageAuthority_GetValidOrderAuthorizations2_Handler,
+ },
+ {
+ MethodName: "CountInvalidAuthorizations2",
+ Handler: _StorageAuthority_CountInvalidAuthorizations2_Handler,
+ },
+ {
+ MethodName: "GetValidAuthorizations2",
+ Handler: _StorageAuthority_GetValidAuthorizations2_Handler,
+ },
+ {
+ MethodName: "KeyBlocked",
+ Handler: _StorageAuthority_KeyBlocked_Handler,
+ },
+ {
+ MethodName: "NewRegistration",
+ Handler: _StorageAuthority_NewRegistration_Handler,
+ },
+ {
+ MethodName: "UpdateRegistration",
+ Handler: _StorageAuthority_UpdateRegistration_Handler,
+ },
+ {
+ MethodName: "AddCertificate",
+ Handler: _StorageAuthority_AddCertificate_Handler,
+ },
+ {
+ MethodName: "AddPrecertificate",
+ Handler: _StorageAuthority_AddPrecertificate_Handler,
+ },
+ {
+ MethodName: "AddSerial",
+ Handler: _StorageAuthority_AddSerial_Handler,
+ },
+ {
+ MethodName: "DeactivateRegistration",
+ Handler: _StorageAuthority_DeactivateRegistration_Handler,
+ },
+ {
+ MethodName: "NewOrder",
+ Handler: _StorageAuthority_NewOrder_Handler,
+ },
+ {
+ MethodName: "NewOrderAndAuthzs",
+ Handler: _StorageAuthority_NewOrderAndAuthzs_Handler,
+ },
+ {
+ MethodName: "SetOrderProcessing",
+ Handler: _StorageAuthority_SetOrderProcessing_Handler,
+ },
+ {
+ MethodName: "SetOrderError",
+ Handler: _StorageAuthority_SetOrderError_Handler,
+ },
+ {
+ MethodName: "FinalizeOrder",
+ Handler: _StorageAuthority_FinalizeOrder_Handler,
+ },
+ {
+ MethodName: "GetOrder",
+ Handler: _StorageAuthority_GetOrder_Handler,
+ },
+ {
+ MethodName: "GetOrderForNames",
+ Handler: _StorageAuthority_GetOrderForNames_Handler,
+ },
+ {
+ MethodName: "RevokeCertificate",
+ Handler: _StorageAuthority_RevokeCertificate_Handler,
+ },
+ {
+ MethodName: "UpdateRevokedCertificate",
+ Handler: _StorageAuthority_UpdateRevokedCertificate_Handler,
+ },
+ {
+ MethodName: "NewAuthorizations2",
+ Handler: _StorageAuthority_NewAuthorizations2_Handler,
+ },
+ {
+ MethodName: "FinalizeAuthorization2",
+ Handler: _StorageAuthority_FinalizeAuthorization2_Handler,
+ },
+ {
+ MethodName: "DeactivateAuthorization2",
+ Handler: _StorageAuthority_DeactivateAuthorization2_Handler,
+ },
+ {
+ MethodName: "AddBlockedKey",
+ Handler: _StorageAuthority_AddBlockedKey_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "SerialsForIncident",
+ Handler: _StorageAuthority_SerialsForIncident_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "GetRevokedCerts",
+ Handler: _StorageAuthority_GetRevokedCerts_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "sa.proto",
+}
diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/subsets.go b/vendor/github.com/letsencrypt/boulder/sa/proto/subsets.go
new file mode 100644
index 00000000000..fcf52279dae
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/sa/proto/subsets.go
@@ -0,0 +1,46 @@
+// Copied from the auto-generated sa_grpc.pb.go
+
+package proto
+
+import (
+ context "context"
+
+ proto "github.com/letsencrypt/boulder/core/proto"
+ grpc "google.golang.org/grpc"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+// StorageAuthorityGetterClient is a read-only subset of the sapb.StorageAuthorityClient interface
+type StorageAuthorityGetterClient interface {
+ GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error)
+ GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error)
+ GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
+ GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
+ GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error)
+ CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error)
+ CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
+ CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
+ CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error)
+ CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error)
+ FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error)
+ PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error)
+ GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error)
+ GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
+ GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error)
+ CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error)
+ GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
+ CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error)
+ GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
+ KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error)
+ GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error)
+ GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error)
+}
+
+// StorageAuthorityCertificateClient is a subset of the sapb.StorageAuthorityClient interface that only reads and writes certificates
+type StorageAuthorityCertificateClient interface {
+ AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
+ AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error)
+ GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
+}
diff --git a/vendor/github.com/lunixbochs/vtclean/.travis.yml b/vendor/github.com/lunixbochs/vtclean/.travis.yml
deleted file mode 100644
index fc0a543252f..00000000000
--- a/vendor/github.com/lunixbochs/vtclean/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-sudo: false
-
-script: go test -v
-
-go:
- - 1.5
- - 1.6
- - 1.7
diff --git a/vendor/github.com/lunixbochs/vtclean/LICENSE b/vendor/github.com/lunixbochs/vtclean/LICENSE
deleted file mode 100644
index 42e82633f19..00000000000
--- a/vendor/github.com/lunixbochs/vtclean/LICENSE
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2015 Ryan Hileman
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/lunixbochs/vtclean/README.md b/vendor/github.com/lunixbochs/vtclean/README.md
deleted file mode 100644
index 99910a4606d..00000000000
--- a/vendor/github.com/lunixbochs/vtclean/README.md
+++ /dev/null
@@ -1,46 +0,0 @@
-[](https://travis-ci.org/lunixbochs/vtclean)
-
-vtclean
-----
-
-Clean up raw terminal output by stripping escape sequences, optionally preserving color.
-
-Get it: `go get github.com/lunixbochs/vtclean/vtclean`
-
-API:
-
- import "github.com/lunixbochs/vtclean"
- vtclean.Clean(line string, color bool) string
-
-Command line example:
-
- $ echo -e '\x1b[1;32mcolor example
- color forced to stop at end of line
- backspace is ba\b\bgood
- no beeps!\x07\x07' | ./vtclean -color
-
- color example
- color forced to stop at end of line
- backspace is good
- no beeps!
-
-Go example:
-
- package main
-
- import (
- "fmt"
- "github.com/lunixbochs/vtclean"
- )
-
- func main() {
- line := vtclean.Clean(
- "\033[1;32mcolor, " +
- "curs\033[Aor, " +
- "backspace\b\b\b\b\b\b\b\b\b\b\b\033[K", false)
- fmt.Println(line)
- }
-
-Output:
-
- color, cursor
diff --git a/vendor/github.com/lunixbochs/vtclean/io.go b/vendor/github.com/lunixbochs/vtclean/io.go
deleted file mode 100644
index 31be0076a3e..00000000000
--- a/vendor/github.com/lunixbochs/vtclean/io.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package vtclean
-
-import (
- "bufio"
- "bytes"
- "io"
-)
-
-type reader struct {
- io.Reader
- scanner *bufio.Scanner
- buf []byte
-
- color bool
-}
-
-func NewReader(r io.Reader, color bool) io.Reader {
- return &reader{Reader: r, color: color}
-}
-
-func (r *reader) scan() bool {
- if r.scanner == nil {
- r.scanner = bufio.NewScanner(r.Reader)
- }
- if len(r.buf) > 0 {
- return true
- }
- if r.scanner.Scan() {
- r.buf = []byte(Clean(r.scanner.Text(), r.color) + "\n")
- return true
- }
- return false
-}
-
-func (r *reader) fill(p []byte) int {
- n := len(r.buf)
- copy(p, r.buf)
- if len(p) < len(r.buf) {
- r.buf = r.buf[len(p):]
- n = len(p)
- } else {
- r.buf = nil
- }
- return n
-}
-
-func (r *reader) Read(p []byte) (int, error) {
- n := r.fill(p)
- if n < len(p) {
- if !r.scan() {
- if n == 0 {
- return 0, io.EOF
- }
- return n, nil
- }
- n += r.fill(p[n:])
- }
- return n, nil
-}
-
-type writer struct {
- io.Writer
- buf []byte
- color bool
-}
-
-func NewWriter(w io.Writer, color bool) io.WriteCloser {
- return &writer{Writer: w, color: color}
-}
-
-func (w *writer) Write(p []byte) (int, error) {
- buf := append(w.buf, p...)
- lines := bytes.Split(buf, []byte("\n"))
- if len(lines) > 0 {
- last := len(lines) - 1
- w.buf = lines[last]
- count := 0
- for _, line := range lines[:last] {
- n, err := w.Writer.Write([]byte(Clean(string(line), w.color) + "\n"))
- count += n
- if err != nil {
- return count, err
- }
- }
- }
- return len(p), nil
-}
-
-func (w *writer) Close() error {
- cl := Clean(string(w.buf), w.color)
- _, err := w.Writer.Write([]byte(cl))
- return err
-}
diff --git a/vendor/github.com/lunixbochs/vtclean/line.go b/vendor/github.com/lunixbochs/vtclean/line.go
deleted file mode 100644
index 66ee990be6f..00000000000
--- a/vendor/github.com/lunixbochs/vtclean/line.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package vtclean
-
-type char struct {
- char byte
- vt100 []byte
-}
-
-func chars(p []byte) []char {
- tmp := make([]char, len(p))
- for i, v := range p {
- tmp[i].char = v
- }
- return tmp
-}
-
-type lineEdit struct {
- buf []char
- pos, size int
- vt100 []byte
-}
-
-func newLineEdit(length int) *lineEdit {
- return &lineEdit{buf: make([]char, length)}
-}
-
-func (l *lineEdit) Vt100(p []byte) {
- l.vt100 = p
-}
-
-func (l *lineEdit) Move(x int) {
- if x < 0 && l.pos <= -x {
- l.pos = 0
- } else if x > 0 && l.pos+x > l.size {
- l.pos = l.size
- } else {
- l.pos += x
- }
-}
-
-func (l *lineEdit) MoveAbs(x int) {
- if x < l.size {
- l.pos = x
- }
-}
-
-func (l *lineEdit) Write(p []byte) {
- c := chars(p)
- if len(c) > 0 {
- c[0].vt100 = l.vt100
- l.vt100 = nil
- }
- if len(l.buf)-l.pos < len(c) {
- l.buf = append(l.buf[:l.pos], c...)
- } else {
- copy(l.buf[l.pos:], c)
- }
- l.pos += len(c)
- if l.pos > l.size {
- l.size = l.pos
- }
-}
-
-func (l *lineEdit) Insert(p []byte) {
- c := chars(p)
- if len(c) > 0 {
- c[0].vt100 = l.vt100
- l.vt100 = nil
- }
- l.size += len(c)
- c = append(c, l.buf[l.pos:]...)
- l.buf = append(l.buf[:l.pos], c...)
-}
-
-func (l *lineEdit) Delete(n int) {
- most := l.size - l.pos
- if n > most {
- n = most
- }
- copy(l.buf[l.pos:], l.buf[l.pos+n:])
- l.size -= n
-}
-
-func (l *lineEdit) Clear() {
- for i := 0; i < len(l.buf); i++ {
- l.buf[i].char = ' '
- }
-}
-func (l *lineEdit) ClearLeft() {
- for i := 0; i < l.pos+1; i++ {
- l.buf[i].char = ' '
- }
-}
-func (l *lineEdit) ClearRight() {
- l.size = l.pos
-}
-
-func (l *lineEdit) Bytes() []byte {
- length := 0
- buf := l.buf[:l.size]
- for _, v := range buf {
- length += 1 + len(v.vt100)
- }
- tmp := make([]byte, 0, length)
- for _, v := range buf {
- tmp = append(tmp, v.vt100...)
- tmp = append(tmp, v.char)
- }
- return tmp
-}
-
-func (l *lineEdit) String() string {
- return string(l.Bytes())
-}
diff --git a/vendor/github.com/lunixbochs/vtclean/vtclean.go b/vendor/github.com/lunixbochs/vtclean/vtclean.go
deleted file mode 100644
index 64fe01fdb58..00000000000
--- a/vendor/github.com/lunixbochs/vtclean/vtclean.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package vtclean
-
-import (
- "bytes"
- "regexp"
- "strconv"
-)
-
-// regex based on ECMA-48:
-// 1. optional:
-// one of [ or ]
-// any amount of 0x30-0x3f
-// any amount of 0x20-0x2f
-// 3. exactly one 0x40-0x7e
-var vt100re = regexp.MustCompile(`^\033([\[\]]([0-9:;<=>\?]*)([!"#$%&'()*+,\-./]*))?([@A-Z\[\]^_\x60a-z{|}~])`)
-var vt100exc = regexp.MustCompile(`^\033(\[[^a-zA-Z0-9@\?]+|[\(\)]).`)
-
-// this is to handle the RGB escape generated by `tput initc 1 500 500 500`
-var vt100long = regexp.MustCompile(`^\033](\d+);([^\033]+)\033\\`)
-
-func Clean(line string, color bool) string {
- var edit = newLineEdit(len(line))
- lineb := []byte(line)
-
- hadColor := false
- for i := 0; i < len(lineb); {
- c := lineb[i]
- switch c {
- case '\r':
- edit.MoveAbs(0)
- case '\b':
- edit.Move(-1)
- case '\033':
- // set terminal title
- if bytes.HasPrefix(lineb[i:], []byte("\x1b]0;")) {
- pos := bytes.Index(lineb[i:], []byte("\a"))
- if pos != -1 {
- i += pos + 1
- continue
- }
- }
- if m := vt100long.Find(lineb[i:]); m != nil {
- i += len(m)
- } else if m := vt100exc.Find(lineb[i:]); m != nil {
- i += len(m)
- } else if m := vt100re.FindSubmatch(lineb[i:]); m != nil {
- i += len(m[0])
- num := string(m[2])
- n, err := strconv.Atoi(num)
- if err != nil || n > 10000 {
- n = 1
- }
- switch m[4][0] {
- case 'm':
- if color {
- hadColor = true
- edit.Vt100(m[0])
- }
- case '@':
- edit.Insert(bytes.Repeat([]byte{' '}, n))
- case 'G':
- edit.MoveAbs(n)
- case 'C':
- edit.Move(n)
- case 'D':
- edit.Move(-n)
- case 'P':
- edit.Delete(n)
- case 'K':
- switch num {
- case "", "0":
- edit.ClearRight()
- case "1":
- edit.ClearLeft()
- case "2":
- edit.Clear()
- }
- }
- } else {
- i += 1
- }
- continue
- default:
- if c == '\n' || c == '\t' || c >= ' ' {
- edit.Write([]byte{c})
- }
- }
- i += 1
- }
- out := edit.Bytes()
- if hadColor {
- out = append(out, []byte("\033[0m")...)
- }
- return string(out)
-}
diff --git a/vendor/github.com/manifoldco/promptui/CHANGELOG.md b/vendor/github.com/manifoldco/promptui/CHANGELOG.md
index 563e9d00a0b..ff30afdf08e 100644
--- a/vendor/github.com/manifoldco/promptui/CHANGELOG.md
+++ b/vendor/github.com/manifoldco/promptui/CHANGELOG.md
@@ -7,6 +7,13 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
## Unreleased
+## [0.9.0] - 2021-10-30
+
+### Fixed
+
+- Resolve license incompatibility in tabwriter
+
+
## [0.8.0] - 2020-09-28
### Added
diff --git a/vendor/github.com/manifoldco/promptui/go.mod b/vendor/github.com/manifoldco/promptui/go.mod
index 760a44deb12..a69023954f7 100644
--- a/vendor/github.com/manifoldco/promptui/go.mod
+++ b/vendor/github.com/manifoldco/promptui/go.mod
@@ -6,11 +6,5 @@ require (
github.com/chzyer/logex v1.1.10 // indirect
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 // indirect
- github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a
- github.com/kr/pretty v0.1.0 // indirect
- github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a // indirect
- github.com/mattn/go-colorable v0.0.9 // indirect
- github.com/mattn/go-isatty v0.0.4 // indirect
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b // indirect
- gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
)
diff --git a/vendor/github.com/manifoldco/promptui/go.sum b/vendor/github.com/manifoldco/promptui/go.sum
index be5f990252b..fcb24c8545e 100644
--- a/vendor/github.com/manifoldco/promptui/go.sum
+++ b/vendor/github.com/manifoldco/promptui/go.sum
@@ -4,20 +4,5 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5O
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a h1:FaWFmfWdAUKbSCtOU2QjDaorUexogfaMgbipgYATUMU=
-github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a h1:weJVJJRzAJBFRlAiJQROKQs8oC9vOxvm4rZmBBk0ONw=
-github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
-github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b h1:MQE+LT/ABUuuvEZ+YQAMSXindAdUh7slEmAkup74op4=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/vendor/github.com/manifoldco/promptui/select.go b/vendor/github.com/manifoldco/promptui/select.go
index 19b9e0c2e0c..b58ed9734ca 100644
--- a/vendor/github.com/manifoldco/promptui/select.go
+++ b/vendor/github.com/manifoldco/promptui/select.go
@@ -5,10 +5,10 @@ import (
"fmt"
"io"
"os"
+ "text/tabwriter"
"text/template"
"github.com/chzyer/readline"
- "github.com/juju/ansiterm"
"github.com/manifoldco/promptui/list"
"github.com/manifoldco/promptui/screenbuf"
)
@@ -587,7 +587,8 @@ func (s *Select) renderDetails(item interface{}) [][]byte {
}
var buf bytes.Buffer
- w := ansiterm.NewTabWriter(&buf, 0, 0, 8, ' ', 0)
+
+ w := tabwriter.NewWriter(&buf, 0, 0, 8, ' ', 0)
err := s.Templates.details.Execute(w, item)
if err != nil {
diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml
deleted file mode 100644
index 98db8f060bd..00000000000
--- a/vendor/github.com/mattn/go-colorable/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-go:
- - tip
-
-before_install:
- - go get github.com/mattn/goveralls
- - go get golang.org/x/tools/cmd/cover
-script:
- - $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw
diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md
deleted file mode 100644
index 56729a92ca6..00000000000
--- a/vendor/github.com/mattn/go-colorable/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-# go-colorable
-
-[](http://godoc.org/github.com/mattn/go-colorable)
-[](https://travis-ci.org/mattn/go-colorable)
-[](https://coveralls.io/github/mattn/go-colorable?branch=master)
-[](https://goreportcard.com/report/mattn/go-colorable)
-
-Colorable writer for windows.
-
-For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
-This package is possible to handle escape sequence for ansi color on windows.
-
-## Too Bad!
-
-
-
-
-## So Good!
-
-
-
-## Usage
-
-```go
-logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
-logrus.SetOutput(colorable.NewColorableStdout())
-
-logrus.Info("succeeded")
-logrus.Warn("not correct")
-logrus.Error("something error")
-logrus.Fatal("panic")
-```
-
-You can compile above code on non-windows OSs.
-
-## Installation
-
-```
-$ go get github.com/mattn/go-colorable
-```
-
-# License
-
-MIT
-
-# Author
-
-Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go
deleted file mode 100644
index 1f28d773d74..00000000000
--- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// +build appengine
-
-package colorable
-
-import (
- "io"
- "os"
-
- _ "github.com/mattn/go-isatty"
-)
-
-// NewColorable return new instance of Writer which handle escape sequence.
-func NewColorable(file *os.File) io.Writer {
- if file == nil {
- panic("nil passed instead of *os.File to NewColorable()")
- }
-
- return file
-}
-
-// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
-func NewColorableStdout() io.Writer {
- return os.Stdout
-}
-
-// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
-func NewColorableStderr() io.Writer {
- return os.Stderr
-}
diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go
deleted file mode 100644
index 887f203dc7f..00000000000
--- a/vendor/github.com/mattn/go-colorable/colorable_others.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build !windows
-// +build !appengine
-
-package colorable
-
-import (
- "io"
- "os"
-
- _ "github.com/mattn/go-isatty"
-)
-
-// NewColorable return new instance of Writer which handle escape sequence.
-func NewColorable(file *os.File) io.Writer {
- if file == nil {
- panic("nil passed instead of *os.File to NewColorable()")
- }
-
- return file
-}
-
-// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
-func NewColorableStdout() io.Writer {
- return os.Stdout
-}
-
-// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
-func NewColorableStderr() io.Writer {
- return os.Stderr
-}
diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go
deleted file mode 100644
index e17a5474e98..00000000000
--- a/vendor/github.com/mattn/go-colorable/colorable_windows.go
+++ /dev/null
@@ -1,884 +0,0 @@
-// +build windows
-// +build !appengine
-
-package colorable
-
-import (
- "bytes"
- "io"
- "math"
- "os"
- "strconv"
- "strings"
- "syscall"
- "unsafe"
-
- "github.com/mattn/go-isatty"
-)
-
-const (
- foregroundBlue = 0x1
- foregroundGreen = 0x2
- foregroundRed = 0x4
- foregroundIntensity = 0x8
- foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
- backgroundBlue = 0x10
- backgroundGreen = 0x20
- backgroundRed = 0x40
- backgroundIntensity = 0x80
- backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
-)
-
-type wchar uint16
-type short int16
-type dword uint32
-type word uint16
-
-type coord struct {
- x short
- y short
-}
-
-type smallRect struct {
- left short
- top short
- right short
- bottom short
-}
-
-type consoleScreenBufferInfo struct {
- size coord
- cursorPosition coord
- attributes word
- window smallRect
- maximumWindowSize coord
-}
-
-type consoleCursorInfo struct {
- size dword
- visible int32
-}
-
-var (
- kernel32 = syscall.NewLazyDLL("kernel32.dll")
- procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
- procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
- procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
- procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
- procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
- procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo")
- procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo")
- procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW")
-)
-
-// Writer provide colorable Writer to the console
-type Writer struct {
- out io.Writer
- handle syscall.Handle
- oldattr word
- oldpos coord
-}
-
-// NewColorable return new instance of Writer which handle escape sequence from File.
-func NewColorable(file *os.File) io.Writer {
- if file == nil {
- panic("nil passed instead of *os.File to NewColorable()")
- }
-
- if isatty.IsTerminal(file.Fd()) {
- var csbi consoleScreenBufferInfo
- handle := syscall.Handle(file.Fd())
- procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
- return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}}
- }
- return file
-}
-
-// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
-func NewColorableStdout() io.Writer {
- return NewColorable(os.Stdout)
-}
-
-// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
-func NewColorableStderr() io.Writer {
- return NewColorable(os.Stderr)
-}
-
-var color256 = map[int]int{
- 0: 0x000000,
- 1: 0x800000,
- 2: 0x008000,
- 3: 0x808000,
- 4: 0x000080,
- 5: 0x800080,
- 6: 0x008080,
- 7: 0xc0c0c0,
- 8: 0x808080,
- 9: 0xff0000,
- 10: 0x00ff00,
- 11: 0xffff00,
- 12: 0x0000ff,
- 13: 0xff00ff,
- 14: 0x00ffff,
- 15: 0xffffff,
- 16: 0x000000,
- 17: 0x00005f,
- 18: 0x000087,
- 19: 0x0000af,
- 20: 0x0000d7,
- 21: 0x0000ff,
- 22: 0x005f00,
- 23: 0x005f5f,
- 24: 0x005f87,
- 25: 0x005faf,
- 26: 0x005fd7,
- 27: 0x005fff,
- 28: 0x008700,
- 29: 0x00875f,
- 30: 0x008787,
- 31: 0x0087af,
- 32: 0x0087d7,
- 33: 0x0087ff,
- 34: 0x00af00,
- 35: 0x00af5f,
- 36: 0x00af87,
- 37: 0x00afaf,
- 38: 0x00afd7,
- 39: 0x00afff,
- 40: 0x00d700,
- 41: 0x00d75f,
- 42: 0x00d787,
- 43: 0x00d7af,
- 44: 0x00d7d7,
- 45: 0x00d7ff,
- 46: 0x00ff00,
- 47: 0x00ff5f,
- 48: 0x00ff87,
- 49: 0x00ffaf,
- 50: 0x00ffd7,
- 51: 0x00ffff,
- 52: 0x5f0000,
- 53: 0x5f005f,
- 54: 0x5f0087,
- 55: 0x5f00af,
- 56: 0x5f00d7,
- 57: 0x5f00ff,
- 58: 0x5f5f00,
- 59: 0x5f5f5f,
- 60: 0x5f5f87,
- 61: 0x5f5faf,
- 62: 0x5f5fd7,
- 63: 0x5f5fff,
- 64: 0x5f8700,
- 65: 0x5f875f,
- 66: 0x5f8787,
- 67: 0x5f87af,
- 68: 0x5f87d7,
- 69: 0x5f87ff,
- 70: 0x5faf00,
- 71: 0x5faf5f,
- 72: 0x5faf87,
- 73: 0x5fafaf,
- 74: 0x5fafd7,
- 75: 0x5fafff,
- 76: 0x5fd700,
- 77: 0x5fd75f,
- 78: 0x5fd787,
- 79: 0x5fd7af,
- 80: 0x5fd7d7,
- 81: 0x5fd7ff,
- 82: 0x5fff00,
- 83: 0x5fff5f,
- 84: 0x5fff87,
- 85: 0x5fffaf,
- 86: 0x5fffd7,
- 87: 0x5fffff,
- 88: 0x870000,
- 89: 0x87005f,
- 90: 0x870087,
- 91: 0x8700af,
- 92: 0x8700d7,
- 93: 0x8700ff,
- 94: 0x875f00,
- 95: 0x875f5f,
- 96: 0x875f87,
- 97: 0x875faf,
- 98: 0x875fd7,
- 99: 0x875fff,
- 100: 0x878700,
- 101: 0x87875f,
- 102: 0x878787,
- 103: 0x8787af,
- 104: 0x8787d7,
- 105: 0x8787ff,
- 106: 0x87af00,
- 107: 0x87af5f,
- 108: 0x87af87,
- 109: 0x87afaf,
- 110: 0x87afd7,
- 111: 0x87afff,
- 112: 0x87d700,
- 113: 0x87d75f,
- 114: 0x87d787,
- 115: 0x87d7af,
- 116: 0x87d7d7,
- 117: 0x87d7ff,
- 118: 0x87ff00,
- 119: 0x87ff5f,
- 120: 0x87ff87,
- 121: 0x87ffaf,
- 122: 0x87ffd7,
- 123: 0x87ffff,
- 124: 0xaf0000,
- 125: 0xaf005f,
- 126: 0xaf0087,
- 127: 0xaf00af,
- 128: 0xaf00d7,
- 129: 0xaf00ff,
- 130: 0xaf5f00,
- 131: 0xaf5f5f,
- 132: 0xaf5f87,
- 133: 0xaf5faf,
- 134: 0xaf5fd7,
- 135: 0xaf5fff,
- 136: 0xaf8700,
- 137: 0xaf875f,
- 138: 0xaf8787,
- 139: 0xaf87af,
- 140: 0xaf87d7,
- 141: 0xaf87ff,
- 142: 0xafaf00,
- 143: 0xafaf5f,
- 144: 0xafaf87,
- 145: 0xafafaf,
- 146: 0xafafd7,
- 147: 0xafafff,
- 148: 0xafd700,
- 149: 0xafd75f,
- 150: 0xafd787,
- 151: 0xafd7af,
- 152: 0xafd7d7,
- 153: 0xafd7ff,
- 154: 0xafff00,
- 155: 0xafff5f,
- 156: 0xafff87,
- 157: 0xafffaf,
- 158: 0xafffd7,
- 159: 0xafffff,
- 160: 0xd70000,
- 161: 0xd7005f,
- 162: 0xd70087,
- 163: 0xd700af,
- 164: 0xd700d7,
- 165: 0xd700ff,
- 166: 0xd75f00,
- 167: 0xd75f5f,
- 168: 0xd75f87,
- 169: 0xd75faf,
- 170: 0xd75fd7,
- 171: 0xd75fff,
- 172: 0xd78700,
- 173: 0xd7875f,
- 174: 0xd78787,
- 175: 0xd787af,
- 176: 0xd787d7,
- 177: 0xd787ff,
- 178: 0xd7af00,
- 179: 0xd7af5f,
- 180: 0xd7af87,
- 181: 0xd7afaf,
- 182: 0xd7afd7,
- 183: 0xd7afff,
- 184: 0xd7d700,
- 185: 0xd7d75f,
- 186: 0xd7d787,
- 187: 0xd7d7af,
- 188: 0xd7d7d7,
- 189: 0xd7d7ff,
- 190: 0xd7ff00,
- 191: 0xd7ff5f,
- 192: 0xd7ff87,
- 193: 0xd7ffaf,
- 194: 0xd7ffd7,
- 195: 0xd7ffff,
- 196: 0xff0000,
- 197: 0xff005f,
- 198: 0xff0087,
- 199: 0xff00af,
- 200: 0xff00d7,
- 201: 0xff00ff,
- 202: 0xff5f00,
- 203: 0xff5f5f,
- 204: 0xff5f87,
- 205: 0xff5faf,
- 206: 0xff5fd7,
- 207: 0xff5fff,
- 208: 0xff8700,
- 209: 0xff875f,
- 210: 0xff8787,
- 211: 0xff87af,
- 212: 0xff87d7,
- 213: 0xff87ff,
- 214: 0xffaf00,
- 215: 0xffaf5f,
- 216: 0xffaf87,
- 217: 0xffafaf,
- 218: 0xffafd7,
- 219: 0xffafff,
- 220: 0xffd700,
- 221: 0xffd75f,
- 222: 0xffd787,
- 223: 0xffd7af,
- 224: 0xffd7d7,
- 225: 0xffd7ff,
- 226: 0xffff00,
- 227: 0xffff5f,
- 228: 0xffff87,
- 229: 0xffffaf,
- 230: 0xffffd7,
- 231: 0xffffff,
- 232: 0x080808,
- 233: 0x121212,
- 234: 0x1c1c1c,
- 235: 0x262626,
- 236: 0x303030,
- 237: 0x3a3a3a,
- 238: 0x444444,
- 239: 0x4e4e4e,
- 240: 0x585858,
- 241: 0x626262,
- 242: 0x6c6c6c,
- 243: 0x767676,
- 244: 0x808080,
- 245: 0x8a8a8a,
- 246: 0x949494,
- 247: 0x9e9e9e,
- 248: 0xa8a8a8,
- 249: 0xb2b2b2,
- 250: 0xbcbcbc,
- 251: 0xc6c6c6,
- 252: 0xd0d0d0,
- 253: 0xdadada,
- 254: 0xe4e4e4,
- 255: 0xeeeeee,
-}
-
-// `\033]0;TITLESTR\007`
-func doTitleSequence(er *bytes.Reader) error {
- var c byte
- var err error
-
- c, err = er.ReadByte()
- if err != nil {
- return err
- }
- if c != '0' && c != '2' {
- return nil
- }
- c, err = er.ReadByte()
- if err != nil {
- return err
- }
- if c != ';' {
- return nil
- }
- title := make([]byte, 0, 80)
- for {
- c, err = er.ReadByte()
- if err != nil {
- return err
- }
- if c == 0x07 || c == '\n' {
- break
- }
- title = append(title, c)
- }
- if len(title) > 0 {
- title8, err := syscall.UTF16PtrFromString(string(title))
- if err == nil {
- procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8)))
- }
- }
- return nil
-}
-
-// Write write data on console
-func (w *Writer) Write(data []byte) (n int, err error) {
- var csbi consoleScreenBufferInfo
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
-
- er := bytes.NewReader(data)
- var bw [1]byte
-loop:
- for {
- c1, err := er.ReadByte()
- if err != nil {
- break loop
- }
- if c1 != 0x1b {
- bw[0] = c1
- w.out.Write(bw[:])
- continue
- }
- c2, err := er.ReadByte()
- if err != nil {
- break loop
- }
-
- if c2 == ']' {
- if err := doTitleSequence(er); err != nil {
- break loop
- }
- continue
- }
- if c2 != 0x5b {
- continue
- }
-
- var buf bytes.Buffer
- var m byte
- for {
- c, err := er.ReadByte()
- if err != nil {
- break loop
- }
- if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
- m = c
- break
- }
- buf.Write([]byte(string(c)))
- }
-
- switch m {
- case 'A':
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- csbi.cursorPosition.y -= short(n)
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- case 'B':
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- csbi.cursorPosition.y += short(n)
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- case 'C':
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- csbi.cursorPosition.x += short(n)
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- case 'D':
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- csbi.cursorPosition.x -= short(n)
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- case 'E':
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- csbi.cursorPosition.x = 0
- csbi.cursorPosition.y += short(n)
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- case 'F':
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- csbi.cursorPosition.x = 0
- csbi.cursorPosition.y -= short(n)
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- case 'G':
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- csbi.cursorPosition.x = short(n - 1)
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- case 'H', 'f':
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- if buf.Len() > 0 {
- token := strings.Split(buf.String(), ";")
- switch len(token) {
- case 1:
- n1, err := strconv.Atoi(token[0])
- if err != nil {
- continue
- }
- csbi.cursorPosition.y = short(n1 - 1)
- case 2:
- n1, err := strconv.Atoi(token[0])
- if err != nil {
- continue
- }
- n2, err := strconv.Atoi(token[1])
- if err != nil {
- continue
- }
- csbi.cursorPosition.x = short(n2 - 1)
- csbi.cursorPosition.y = short(n1 - 1)
- }
- } else {
- csbi.cursorPosition.y = 0
- }
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- case 'J':
- n := 0
- if buf.Len() > 0 {
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- }
- var count, written dword
- var cursor coord
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- switch n {
- case 0:
- cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
- count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
- case 1:
- cursor = coord{x: csbi.window.left, y: csbi.window.top}
- count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.window.top-csbi.cursorPosition.y)*csbi.size.x)
- case 2:
- cursor = coord{x: csbi.window.left, y: csbi.window.top}
- count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
- }
- procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
- procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
- case 'K':
- n := 0
- if buf.Len() > 0 {
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- }
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- var cursor coord
- var count, written dword
- switch n {
- case 0:
- cursor = coord{x: csbi.cursorPosition.x + 1, y: csbi.cursorPosition.y}
- count = dword(csbi.size.x - csbi.cursorPosition.x - 1)
- case 1:
- cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
- count = dword(csbi.size.x - csbi.cursorPosition.x)
- case 2:
- cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
- count = dword(csbi.size.x)
- }
- procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
- procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
- case 'm':
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- attr := csbi.attributes
- cs := buf.String()
- if cs == "" {
- procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
- continue
- }
- token := strings.Split(cs, ";")
- for i := 0; i < len(token); i++ {
- ns := token[i]
- if n, err = strconv.Atoi(ns); err == nil {
- switch {
- case n == 0 || n == 100:
- attr = w.oldattr
- case 1 <= n && n <= 5:
- attr |= foregroundIntensity
- case n == 7:
- attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
- case n == 22 || n == 25:
- attr |= foregroundIntensity
- case n == 27:
- attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
- case 30 <= n && n <= 37:
- attr &= backgroundMask
- if (n-30)&1 != 0 {
- attr |= foregroundRed
- }
- if (n-30)&2 != 0 {
- attr |= foregroundGreen
- }
- if (n-30)&4 != 0 {
- attr |= foregroundBlue
- }
- case n == 38: // set foreground color.
- if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
- if n256, err := strconv.Atoi(token[i+2]); err == nil {
- if n256foreAttr == nil {
- n256setup()
- }
- attr &= backgroundMask
- attr |= n256foreAttr[n256]
- i += 2
- }
- } else {
- attr = attr & (w.oldattr & backgroundMask)
- }
- case n == 39: // reset foreground color.
- attr &= backgroundMask
- attr |= w.oldattr & foregroundMask
- case 40 <= n && n <= 47:
- attr &= foregroundMask
- if (n-40)&1 != 0 {
- attr |= backgroundRed
- }
- if (n-40)&2 != 0 {
- attr |= backgroundGreen
- }
- if (n-40)&4 != 0 {
- attr |= backgroundBlue
- }
- case n == 48: // set background color.
- if i < len(token)-2 && token[i+1] == "5" {
- if n256, err := strconv.Atoi(token[i+2]); err == nil {
- if n256backAttr == nil {
- n256setup()
- }
- attr &= foregroundMask
- attr |= n256backAttr[n256]
- i += 2
- }
- } else {
- attr = attr & (w.oldattr & foregroundMask)
- }
- case n == 49: // reset foreground color.
- attr &= foregroundMask
- attr |= w.oldattr & backgroundMask
- case 90 <= n && n <= 97:
- attr = (attr & backgroundMask)
- attr |= foregroundIntensity
- if (n-90)&1 != 0 {
- attr |= foregroundRed
- }
- if (n-90)&2 != 0 {
- attr |= foregroundGreen
- }
- if (n-90)&4 != 0 {
- attr |= foregroundBlue
- }
- case 100 <= n && n <= 107:
- attr = (attr & foregroundMask)
- attr |= backgroundIntensity
- if (n-100)&1 != 0 {
- attr |= backgroundRed
- }
- if (n-100)&2 != 0 {
- attr |= backgroundGreen
- }
- if (n-100)&4 != 0 {
- attr |= backgroundBlue
- }
- }
- procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
- }
- }
- case 'h':
- var ci consoleCursorInfo
- cs := buf.String()
- if cs == "5>" {
- procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
- ci.visible = 0
- procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
- } else if cs == "?25" {
- procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
- ci.visible = 1
- procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
- }
- case 'l':
- var ci consoleCursorInfo
- cs := buf.String()
- if cs == "5>" {
- procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
- ci.visible = 1
- procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
- } else if cs == "?25" {
- procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
- ci.visible = 0
- procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
- }
- case 's':
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- w.oldpos = csbi.cursorPosition
- case 'u':
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&w.oldpos)))
- }
- }
-
- return len(data), nil
-}
-
-type consoleColor struct {
- rgb int
- red bool
- green bool
- blue bool
- intensity bool
-}
-
-func (c consoleColor) foregroundAttr() (attr word) {
- if c.red {
- attr |= foregroundRed
- }
- if c.green {
- attr |= foregroundGreen
- }
- if c.blue {
- attr |= foregroundBlue
- }
- if c.intensity {
- attr |= foregroundIntensity
- }
- return
-}
-
-func (c consoleColor) backgroundAttr() (attr word) {
- if c.red {
- attr |= backgroundRed
- }
- if c.green {
- attr |= backgroundGreen
- }
- if c.blue {
- attr |= backgroundBlue
- }
- if c.intensity {
- attr |= backgroundIntensity
- }
- return
-}
-
-var color16 = []consoleColor{
- {0x000000, false, false, false, false},
- {0x000080, false, false, true, false},
- {0x008000, false, true, false, false},
- {0x008080, false, true, true, false},
- {0x800000, true, false, false, false},
- {0x800080, true, false, true, false},
- {0x808000, true, true, false, false},
- {0xc0c0c0, true, true, true, false},
- {0x808080, false, false, false, true},
- {0x0000ff, false, false, true, true},
- {0x00ff00, false, true, false, true},
- {0x00ffff, false, true, true, true},
- {0xff0000, true, false, false, true},
- {0xff00ff, true, false, true, true},
- {0xffff00, true, true, false, true},
- {0xffffff, true, true, true, true},
-}
-
-type hsv struct {
- h, s, v float32
-}
-
-func (a hsv) dist(b hsv) float32 {
- dh := a.h - b.h
- switch {
- case dh > 0.5:
- dh = 1 - dh
- case dh < -0.5:
- dh = -1 - dh
- }
- ds := a.s - b.s
- dv := a.v - b.v
- return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
-}
-
-func toHSV(rgb int) hsv {
- r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
- float32((rgb&0x00FF00)>>8)/256.0,
- float32(rgb&0x0000FF)/256.0
- min, max := minmax3f(r, g, b)
- h := max - min
- if h > 0 {
- if max == r {
- h = (g - b) / h
- if h < 0 {
- h += 6
- }
- } else if max == g {
- h = 2 + (b-r)/h
- } else {
- h = 4 + (r-g)/h
- }
- }
- h /= 6.0
- s := max - min
- if max != 0 {
- s /= max
- }
- v := max
- return hsv{h: h, s: s, v: v}
-}
-
-type hsvTable []hsv
-
-func toHSVTable(rgbTable []consoleColor) hsvTable {
- t := make(hsvTable, len(rgbTable))
- for i, c := range rgbTable {
- t[i] = toHSV(c.rgb)
- }
- return t
-}
-
-func (t hsvTable) find(rgb int) consoleColor {
- hsv := toHSV(rgb)
- n := 7
- l := float32(5.0)
- for i, p := range t {
- d := hsv.dist(p)
- if d < l {
- l, n = d, i
- }
- }
- return color16[n]
-}
-
-func minmax3f(a, b, c float32) (min, max float32) {
- if a < b {
- if b < c {
- return a, c
- } else if a < c {
- return a, b
- } else {
- return c, b
- }
- } else {
- if a < c {
- return b, c
- } else if b < c {
- return b, a
- } else {
- return c, a
- }
- }
-}
-
-var n256foreAttr []word
-var n256backAttr []word
-
-func n256setup() {
- n256foreAttr = make([]word, 256)
- n256backAttr = make([]word, 256)
- t := toHSVTable(color16)
- for i, rgb := range color256 {
- c := t.find(rgb)
- n256foreAttr[i] = c.foregroundAttr()
- n256backAttr[i] = c.backgroundAttr()
- }
-}
diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go
deleted file mode 100644
index 9721e16f4bf..00000000000
--- a/vendor/github.com/mattn/go-colorable/noncolorable.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package colorable
-
-import (
- "bytes"
- "io"
-)
-
-// NonColorable hold writer but remove escape sequence.
-type NonColorable struct {
- out io.Writer
-}
-
-// NewNonColorable return new instance of Writer which remove escape sequence from Writer.
-func NewNonColorable(w io.Writer) io.Writer {
- return &NonColorable{out: w}
-}
-
-// Write write data on console
-func (w *NonColorable) Write(data []byte) (n int, err error) {
- er := bytes.NewReader(data)
- var bw [1]byte
-loop:
- for {
- c1, err := er.ReadByte()
- if err != nil {
- break loop
- }
- if c1 != 0x1b {
- bw[0] = c1
- w.out.Write(bw[:])
- continue
- }
- c2, err := er.ReadByte()
- if err != nil {
- break loop
- }
- if c2 != 0x5b {
- continue
- }
-
- var buf bytes.Buffer
- for {
- c, err := er.ReadByte()
- if err != nil {
- break loop
- }
- if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
- break
- }
- buf.Write([]byte(string(c)))
- }
- }
-
- return len(data), nil
-}
diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml
deleted file mode 100644
index 5597e026ddf..00000000000
--- a/vendor/github.com/mattn/go-isatty/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-go:
- - tip
-
-os:
- - linux
- - osx
-
-before_install:
- - go get github.com/mattn/goveralls
- - go get golang.org/x/tools/cmd/cover
-script:
- - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5
diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE
deleted file mode 100644
index 65dc692b6b1..00000000000
--- a/vendor/github.com/mattn/go-isatty/LICENSE
+++ /dev/null
@@ -1,9 +0,0 @@
-Copyright (c) Yasuhiro MATSUMOTO
-
-MIT License (Expat)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md
deleted file mode 100644
index 1e69004bb03..00000000000
--- a/vendor/github.com/mattn/go-isatty/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# go-isatty
-
-[](http://godoc.org/github.com/mattn/go-isatty)
-[](https://travis-ci.org/mattn/go-isatty)
-[](https://coveralls.io/github/mattn/go-isatty?branch=master)
-[](https://goreportcard.com/report/mattn/go-isatty)
-
-isatty for golang
-
-## Usage
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/mattn/go-isatty"
- "os"
-)
-
-func main() {
- if isatty.IsTerminal(os.Stdout.Fd()) {
- fmt.Println("Is Terminal")
- } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) {
- fmt.Println("Is Cygwin/MSYS2 Terminal")
- } else {
- fmt.Println("Is Not Terminal")
- }
-}
-```
-
-## Installation
-
-```
-$ go get github.com/mattn/go-isatty
-```
-
-## License
-
-MIT
-
-## Author
-
-Yasuhiro Matsumoto (a.k.a mattn)
-
-## Thanks
-
-* k-takata: base idea for IsCygwinTerminal
-
- https://github.com/k-takata/go-iscygpty
diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go
deleted file mode 100644
index 17d4f90ebcc..00000000000
--- a/vendor/github.com/mattn/go-isatty/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package isatty implements interface to isatty
-package isatty
diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go
deleted file mode 100644
index 9584a98842e..00000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_appengine.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build appengine
-
-package isatty
-
-// IsTerminal returns true if the file descriptor is terminal which
-// is always false on on appengine classic which is a sandboxed PaaS.
-func IsTerminal(fd uintptr) bool {
- return false
-}
-
-// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
-// terminal. This is also always false on this environment.
-func IsCygwinTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
deleted file mode 100644
index 42f2514d133..00000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build darwin freebsd openbsd netbsd dragonfly
-// +build !appengine
-
-package isatty
-
-import (
- "syscall"
- "unsafe"
-)
-
-const ioctlReadTermios = syscall.TIOCGETA
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go
deleted file mode 100644
index 7384cf99167..00000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_linux.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build linux
-// +build !appengine,!ppc64,!ppc64le
-
-package isatty
-
-import (
- "syscall"
- "unsafe"
-)
-
-const ioctlReadTermios = syscall.TCGETS
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go
deleted file mode 100644
index 44e5d213021..00000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build linux
-// +build ppc64 ppc64le
-
-package isatty
-
-import (
- "unsafe"
-
- syscall "golang.org/x/sys/unix"
-)
-
-const ioctlReadTermios = syscall.TCGETS
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go
deleted file mode 100644
index 9d8b4a59961..00000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_others.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build !windows
-// +build !appengine
-
-package isatty
-
-// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
-// terminal. This is also always false on this environment.
-func IsCygwinTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
deleted file mode 100644
index 1f0c6bf53dc..00000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build solaris
-// +build !appengine
-
-package isatty
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
-func IsTerminal(fd uintptr) bool {
- var termio unix.Termio
- err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
- return err == nil
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go
deleted file mode 100644
index af51cbcaa48..00000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_windows.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// +build windows
-// +build !appengine
-
-package isatty
-
-import (
- "strings"
- "syscall"
- "unicode/utf16"
- "unsafe"
-)
-
-const (
- fileNameInfo uintptr = 2
- fileTypePipe = 3
-)
-
-var (
- kernel32 = syscall.NewLazyDLL("kernel32.dll")
- procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
- procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
- procGetFileType = kernel32.NewProc("GetFileType")
-)
-
-func init() {
- // Check if GetFileInformationByHandleEx is available.
- if procGetFileInformationByHandleEx.Find() != nil {
- procGetFileInformationByHandleEx = nil
- }
-}
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var st uint32
- r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
- return r != 0 && e == 0
-}
-
-// Check pipe name is used for cygwin/msys2 pty.
-// Cygwin/MSYS2 PTY has a name like:
-// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
-func isCygwinPipeName(name string) bool {
- token := strings.Split(name, "-")
- if len(token) < 5 {
- return false
- }
-
- if token[0] != `\msys` && token[0] != `\cygwin` {
- return false
- }
-
- if token[1] == "" {
- return false
- }
-
- if !strings.HasPrefix(token[2], "pty") {
- return false
- }
-
- if token[3] != `from` && token[3] != `to` {
- return false
- }
-
- if token[4] != "master" {
- return false
- }
-
- return true
-}
-
-// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
-// terminal.
-func IsCygwinTerminal(fd uintptr) bool {
- if procGetFileInformationByHandleEx == nil {
- return false
- }
-
- // Cygwin/msys's pty is a pipe.
- ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
- if ft != fileTypePipe || e != 0 {
- return false
- }
-
- var buf [2 + syscall.MAX_PATH]uint16
- r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(),
- 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)),
- uintptr(len(buf)*2), 0, 0)
- if r == 0 || e != 0 {
- return false
- }
-
- l := *(*uint32)(unsafe.Pointer(&buf))
- return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2])))
-}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
deleted file mode 100644
index 8dada3edaf5..00000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
deleted file mode 100644
index 5d8cb5b72e7..00000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
+++ /dev/null
@@ -1 +0,0 @@
-Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
deleted file mode 100644
index e16fb946bb9..00000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-cover.dat
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
deleted file mode 100644
index 81be214370d..00000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-all:
-
-cover:
- go test -cover -v -coverprofile=cover.dat ./...
- go tool cover -func cover.dat
-
-.PHONY: cover
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
deleted file mode 100644
index 258c0636aac..00000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2013 Matt T. Proud
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pbutil
-
-import (
- "encoding/binary"
- "errors"
- "io"
-
- "github.com/golang/protobuf/proto"
-)
-
-var errInvalidVarint = errors.New("invalid varint32 encountered")
-
-// ReadDelimited decodes a message from the provided length-delimited stream,
-// where the length is encoded as 32-bit varint prefix to the message body.
-// It returns the total number of bytes read and any applicable error. This is
-// roughly equivalent to the companion Java API's
-// MessageLite#parseDelimitedFrom. As per the reader contract, this function
-// calls r.Read repeatedly as required until exactly one message including its
-// prefix is read and decoded (or an error has occurred). The function never
-// reads more bytes from the stream than required. The function never returns
-// an error if a message has been read and decoded correctly, even if the end
-// of the stream has been reached in doing so. In that case, any subsequent
-// calls return (0, io.EOF).
-func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
- // Per AbstractParser#parsePartialDelimitedFrom with
- // CodedInputStream#readRawVarint32.
- var headerBuf [binary.MaxVarintLen32]byte
- var bytesRead, varIntBytes int
- var messageLength uint64
- for varIntBytes == 0 { // i.e. no varint has been decoded yet.
- if bytesRead >= len(headerBuf) {
- return bytesRead, errInvalidVarint
- }
- // We have to read byte by byte here to avoid reading more bytes
- // than required. Each read byte is appended to what we have
- // read before.
- newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
- if newBytesRead == 0 {
- if err != nil {
- return bytesRead, err
- }
- // A Reader should not return (0, nil), but if it does,
- // it should be treated as no-op (according to the
- // Reader contract). So let's go on...
- continue
- }
- bytesRead += newBytesRead
- // Now present everything read so far to the varint decoder and
- // see if a varint can be decoded already.
- messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
- }
-
- messageBuf := make([]byte, messageLength)
- newBytesRead, err := io.ReadFull(r, messageBuf)
- bytesRead += newBytesRead
- if err != nil {
- return bytesRead, err
- }
-
- return bytesRead, proto.Unmarshal(messageBuf, m)
-}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
deleted file mode 100644
index 8fb59ad226f..00000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2013 Matt T. Proud
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pbutil
-
-import (
- "encoding/binary"
- "io"
-
- "github.com/golang/protobuf/proto"
-)
-
-// WriteDelimited encodes and dumps a message to the provided writer prefixed
-// with a 32-bit varint indicating the length of the encoded message, producing
-// a length-delimited record stream, which can be used to chain together
-// encoded messages of the same type together in a file. It returns the total
-// number of bytes written and any applicable error. This is roughly
-// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
-func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
- buffer, err := proto.Marshal(m)
- if err != nil {
- return 0, err
- }
-
- var buf [binary.MaxVarintLen32]byte
- encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
-
- sync, err := w.Write(buf[:encodedLength])
- if err != nil {
- return sync, err
- }
-
- n, err = w.Write(buffer)
- return n + sync, err
-}
diff --git a/vendor/github.com/miekg/pkcs11/.travis.yml b/vendor/github.com/miekg/pkcs11/.travis.yml
deleted file mode 100644
index 687044d830a..00000000000
--- a/vendor/github.com/miekg/pkcs11/.travis.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-language: go
-sudo: required
-dist: trusty
-
-go:
- - 1.9
- - tip
-
-script:
- - go test -v ./...
-
-before_script:
- - sudo apt-get update
- - sudo apt-get -y install libsofthsm
diff --git a/vendor/github.com/miekg/pkcs11/README.md b/vendor/github.com/miekg/pkcs11/README.md
index 0a5c1b7b6e7..18a361a9903 100644
--- a/vendor/github.com/miekg/pkcs11/README.md
+++ b/vendor/github.com/miekg/pkcs11/README.md
@@ -1,6 +1,6 @@
-# PKCS#11 [](https://travis-ci.org/miekg/pkcs11) [](http://godoc.org/github.com/miekg/pkcs11)
+# PKCS#11
-This is a Go implementation of the PKCS#11 API. It wraps the library closely, but uses Go idiom were
+This is a Go implementation of the PKCS#11 API. It wraps the library closely, but uses Go idiom where
it makes sense. It has been tested with SoftHSM.
## SoftHSM
@@ -13,10 +13,10 @@ it makes sense. It has been tested with SoftHSM.
softhsm --init-token --slot 0 --label test --pin 1234
~~~
- * Then use `libsofthsm.so` as the pkcs11 module:
+ * Then use `libsofthsm2.so` as the pkcs11 module:
~~~ go
- p := pkcs11.New("/usr/lib/softhsm/libsofthsm.so")
+ p := pkcs11.New("/usr/lib/softhsm/libsofthsm2.so")
~~~
## Examples
@@ -24,7 +24,7 @@ it makes sense. It has been tested with SoftHSM.
A skeleton program would look somewhat like this (yes, pkcs#11 is verbose):
~~~ go
-p := pkcs11.New("/usr/lib/softhsm/libsofthsm.so")
+p := pkcs11.New("/usr/lib/softhsm/libsofthsm2.so")
err := p.Initialize()
if err != nil {
panic(err)
diff --git a/vendor/github.com/miekg/pkcs11/pkcs11.go b/vendor/github.com/miekg/pkcs11/pkcs11.go
index e21d23b73e6..e1b5824ec89 100644
--- a/vendor/github.com/miekg/pkcs11/pkcs11.go
+++ b/vendor/github.com/miekg/pkcs11/pkcs11.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:generate go run const_generate.go
+
// Package pkcs11 is a wrapper around the PKCS#11 cryptographic library.
package pkcs11
@@ -14,7 +16,7 @@ package pkcs11
#cgo windows CFLAGS: -DPACKED_STRUCTURES
#cgo linux LDFLAGS: -ldl
#cgo darwin LDFLAGS: -ldl
-#cgo openbsd LDFLAGS: -ldl
+#cgo openbsd LDFLAGS:
#cgo freebsd LDFLAGS: -ldl
#include
@@ -770,9 +772,10 @@ static inline CK_VOID_PTR getAttributePval(CK_ATTRIBUTE_PTR a)
*/
import "C"
-import "strings"
-
-import "unsafe"
+import (
+ "strings"
+ "unsafe"
+)
// Ctx contains the current pkcs11 context.
type Ctx struct {
diff --git a/vendor/github.com/miekg/pkcs11/release.go b/vendor/github.com/miekg/pkcs11/release.go
index 4380f374d28..d8b99f147ea 100644
--- a/vendor/github.com/miekg/pkcs11/release.go
+++ b/vendor/github.com/miekg/pkcs11/release.go
@@ -1,3 +1,4 @@
+//go:build release
// +build release
package pkcs11
@@ -5,7 +6,7 @@ package pkcs11
import "fmt"
// Release is current version of the pkcs11 library.
-var Release = R{1, 0, 3}
+var Release = R{1, 1, 1}
// R holds the version of this library.
type R struct {
diff --git a/vendor/github.com/miekg/pkcs11/types.go b/vendor/github.com/miekg/pkcs11/types.go
index 970db9061b6..60eadcb71bb 100644
--- a/vendor/github.com/miekg/pkcs11/types.go
+++ b/vendor/github.com/miekg/pkcs11/types.go
@@ -182,8 +182,20 @@ func NewAttribute(typ uint, x interface{}) *Attribute {
}
case int:
a.Value = uintToBytes(uint64(v))
+ case int16:
+ a.Value = uintToBytes(uint64(v))
+ case int32:
+ a.Value = uintToBytes(uint64(v))
+ case int64:
+ a.Value = uintToBytes(uint64(v))
case uint:
a.Value = uintToBytes(uint64(v))
+ case uint16:
+ a.Value = uintToBytes(uint64(v))
+ case uint32:
+ a.Value = uintToBytes(uint64(v))
+ case uint64:
+ a.Value = uintToBytes(uint64(v))
case string:
a.Value = []byte(v)
case []byte:
diff --git a/vendor/github.com/miekg/pkcs11/const.go b/vendor/github.com/miekg/pkcs11/zconst.go
similarity index 85%
rename from vendor/github.com/miekg/pkcs11/const.go
rename to vendor/github.com/miekg/pkcs11/zconst.go
index 40885614616..41df5cfcf0c 100644
--- a/vendor/github.com/miekg/pkcs11/const.go
+++ b/vendor/github.com/miekg/pkcs11/zconst.go
@@ -2,48 +2,18 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package pkcs11
-
-const (
- CKU_SO uint = 0
- CKU_USER uint = 1
- CKU_CONTEXT_SPECIFIC uint = 2
-)
-
-const (
- CKO_DATA uint = 0x00000000
- CKO_CERTIFICATE uint = 0x00000001
- CKO_PUBLIC_KEY uint = 0x00000002
- CKO_PRIVATE_KEY uint = 0x00000003
- CKO_SECRET_KEY uint = 0x00000004
- CKO_HW_FEATURE uint = 0x00000005
- CKO_DOMAIN_PARAMETERS uint = 0x00000006
- CKO_MECHANISM uint = 0x00000007
- CKO_OTP_KEY uint = 0x00000008
- CKO_VENDOR_DEFINED uint = 0x80000000
-)
-
-const (
- CKG_MGF1_SHA1 uint = 0x00000001
- CKG_MGF1_SHA224 uint = 0x00000005
- CKG_MGF1_SHA256 uint = 0x00000002
- CKG_MGF1_SHA384 uint = 0x00000003
- CKG_MGF1_SHA512 uint = 0x00000004
- CKG_MGF1_SHA3_224 uint = 0x00000006
- CKG_MGF1_SHA3_256 uint = 0x00000007
- CKG_MGF1_SHA3_384 uint = 0x00000008
- CKG_MGF1_SHA3_512 uint = 0x00000009
-)
-
-const (
- CKZ_DATA_SPECIFIED uint = 0x00000001
-)
+// Code generated by "go run const_generate.go"; DO NOT EDIT.
-// Generated with: awk '/#define CK[AFKMRC]/{ print $2 " = " $3 }' pkcs11t.h | sed -e 's/UL$//g' -e 's/UL)$/)/g'
+package pkcs11
-// All the flag (CKF_), attribute (CKA_), error code (CKR_), key type (CKK_), certificate type (CKC_) and
-// mechanism (CKM_) constants as defined in PKCS#11.
const (
+ CK_TRUE = 1
+ CK_FALSE = 0
+ CK_UNAVAILABLE_INFORMATION = ^uint(0)
+ CK_EFFECTIVELY_INFINITE = 0
+ CK_INVALID_HANDLE = 0
+ CKN_SURRENDER = 0
+ CKN_OTP_CHANGED = 1
CKF_TOKEN_PRESENT = 0x00000001
CKF_REMOVABLE_DEVICE = 0x00000002
CKF_HW_SLOT = 0x00000004
@@ -66,12 +36,34 @@ const (
CKF_SO_PIN_LOCKED = 0x00400000
CKF_SO_PIN_TO_BE_CHANGED = 0x00800000
CKF_ERROR_STATE = 0x01000000
+ CKU_SO = 0
+ CKU_USER = 1
+ CKU_CONTEXT_SPECIFIC = 2
+ CKS_RO_PUBLIC_SESSION = 0
+ CKS_RO_USER_FUNCTIONS = 1
+ CKS_RW_PUBLIC_SESSION = 2
+ CKS_RW_USER_FUNCTIONS = 3
+ CKS_RW_SO_FUNCTIONS = 4
CKF_RW_SESSION = 0x00000002
CKF_SERIAL_SESSION = 0x00000004
+ CKO_DATA = 0x00000000
+ CKO_CERTIFICATE = 0x00000001
+ CKO_PUBLIC_KEY = 0x00000002
+ CKO_PRIVATE_KEY = 0x00000003
+ CKO_SECRET_KEY = 0x00000004
+ CKO_HW_FEATURE = 0x00000005
+ CKO_DOMAIN_PARAMETERS = 0x00000006
+ CKO_MECHANISM = 0x00000007
+ CKO_OTP_KEY = 0x00000008
+ CKO_VENDOR_DEFINED = 0x80000000
+ CKH_MONOTONIC_COUNTER = 0x00000001
+ CKH_CLOCK = 0x00000002
+ CKH_USER_INTERFACE = 0x00000003
+ CKH_VENDOR_DEFINED = 0x80000000
CKK_RSA = 0x00000000
CKK_DSA = 0x00000001
CKK_DH = 0x00000002
- CKK_ECDSA = 0x00000003
+ CKK_ECDSA = 0x00000003 // Deprecated
CKK_EC = 0x00000003
CKK_X9_42_DH = 0x00000004
CKK_KEA = 0x00000005
@@ -83,7 +75,7 @@ const (
CKK_DES3 = 0x00000015
CKK_CAST = 0x00000016
CKK_CAST3 = 0x00000017
- CKK_CAST5 = 0x00000018
+ CKK_CAST5 = 0x00000018 // Deprecated
CKK_CAST128 = 0x00000018
CKK_RC5 = 0x00000019
CKK_IDEA = 0x0000001A
@@ -99,14 +91,14 @@ const (
CKK_ACTI = 0x00000024
CKK_CAMELLIA = 0x00000025
CKK_ARIA = 0x00000026
- CKK_SHA512_224_HMAC = 0x00000027
- CKK_SHA512_256_HMAC = 0x00000028
- CKK_SHA512_T_HMAC = 0x00000029
+ CKK_MD5_HMAC = 0x00000027
CKK_SHA_1_HMAC = 0x00000028
- CKK_SHA224_HMAC = 0x0000002E
+ CKK_RIPEMD128_HMAC = 0x00000029
+ CKK_RIPEMD160_HMAC = 0x0000002A
CKK_SHA256_HMAC = 0x0000002B
CKK_SHA384_HMAC = 0x0000002C
CKK_SHA512_HMAC = 0x0000002D
+ CKK_SHA224_HMAC = 0x0000002E
CKK_SEED = 0x0000002F
CKK_GOSTR3410 = 0x00000030
CKK_GOSTR3411 = 0x00000031
@@ -116,11 +108,26 @@ const (
CKK_SHA3_384_HMAC = 0x00000035
CKK_SHA3_512_HMAC = 0x00000036
CKK_VENDOR_DEFINED = 0x80000000
+ CK_CERTIFICATE_CATEGORY_UNSPECIFIED = 0
+ CK_CERTIFICATE_CATEGORY_TOKEN_USER = 1
+ CK_CERTIFICATE_CATEGORY_AUTHORITY = 2
+ CK_CERTIFICATE_CATEGORY_OTHER_ENTITY = 3
+ CK_SECURITY_DOMAIN_UNSPECIFIED = 0
+ CK_SECURITY_DOMAIN_MANUFACTURER = 1
+ CK_SECURITY_DOMAIN_OPERATOR = 2
+ CK_SECURITY_DOMAIN_THIRD_PARTY = 3
CKC_X_509 = 0x00000000
CKC_X_509_ATTR_CERT = 0x00000001
CKC_WTLS = 0x00000002
CKC_VENDOR_DEFINED = 0x80000000
CKF_ARRAY_ATTRIBUTE = 0x40000000
+ CK_OTP_FORMAT_DECIMAL = 0
+ CK_OTP_FORMAT_HEXADECIMAL = 1
+ CK_OTP_FORMAT_ALPHANUMERIC = 2
+ CK_OTP_FORMAT_BINARY = 3
+ CK_OTP_PARAM_IGNORED = 0
+ CK_OTP_PARAM_OPTIONAL = 1
+ CK_OTP_PARAM_MANDATORY = 2
CKA_CLASS = 0x00000000
CKA_TOKEN = 0x00000001
CKA_PRIVATE = 0x00000002
@@ -183,15 +190,16 @@ const (
CKA_MODIFIABLE = 0x00000170
CKA_COPYABLE = 0x00000171
CKA_DESTROYABLE = 0x00000172
- CKA_ECDSA_PARAMS = 0x00000180
+ CKA_ECDSA_PARAMS = 0x00000180 // Deprecated
CKA_EC_PARAMS = 0x00000180
CKA_EC_POINT = 0x00000181
- CKA_SECONDARY_AUTH = 0x00000200
- CKA_AUTH_PIN_FLAGS = 0x00000201
+ CKA_SECONDARY_AUTH = 0x00000200 // Deprecated
+ CKA_AUTH_PIN_FLAGS = 0x00000201 // Deprecated
CKA_ALWAYS_AUTHENTICATE = 0x00000202
CKA_WRAP_WITH_TRUSTED = 0x00000210
- CKA_WRAP_TEMPLATE = CKF_ARRAY_ATTRIBUTE | 0x00000211
- CKA_UNWRAP_TEMPLATE = CKF_ARRAY_ATTRIBUTE | 0x00000212
+ CKA_WRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000211)
+ CKA_UNWRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000212)
+ CKA_DERIVE_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000213)
CKA_OTP_FORMAT = 0x00000220
CKA_OTP_LENGTH = 0x00000221
CKA_OTP_TIME_INTERVAL = 0x00000222
@@ -226,7 +234,7 @@ const (
CKA_REQUIRED_CMS_ATTRIBUTES = 0x00000501
CKA_DEFAULT_CMS_ATTRIBUTES = 0x00000502
CKA_SUPPORTED_CMS_ATTRIBUTES = 0x00000503
- CKA_ALLOWED_MECHANISMS = CKF_ARRAY_ATTRIBUTE | 0x00000600
+ CKA_ALLOWED_MECHANISMS = (CKF_ARRAY_ATTRIBUTE | 0x00000600)
CKA_VENDOR_DEFINED = 0x80000000
CKM_RSA_PKCS_KEY_PAIR_GEN = 0x00000000
CKM_RSA_PKCS = 0x00000001
@@ -246,11 +254,10 @@ const (
CKM_DSA_KEY_PAIR_GEN = 0x00000010
CKM_DSA = 0x00000011
CKM_DSA_SHA1 = 0x00000012
- CKM_DSA_FIPS_G_GEN = 0x00000013
- CKM_DSA_SHA224 = 0x00000014
- CKM_DSA_SHA256 = 0x00000015
- CKM_DSA_SHA384 = 0x00000016
- CKM_DSA_SHA512 = 0x00000017
+ CKM_DSA_SHA224 = 0x00000013
+ CKM_DSA_SHA256 = 0x00000014
+ CKM_DSA_SHA384 = 0x00000015
+ CKM_DSA_SHA512 = 0x00000016
CKM_DSA_SHA3_224 = 0x00000018
CKM_DSA_SHA3_256 = 0x00000019
CKM_DSA_SHA3_384 = 0x0000001A
@@ -387,13 +394,13 @@ const (
CKM_CAST128_KEY_GEN = 0x00000320
CKM_CAST5_ECB = 0x00000321
CKM_CAST128_ECB = 0x00000321
- CKM_CAST5_CBC = 0x00000322
+ CKM_CAST5_CBC = 0x00000322 // Deprecated
CKM_CAST128_CBC = 0x00000322
- CKM_CAST5_MAC = 0x00000323
+ CKM_CAST5_MAC = 0x00000323 // Deprecated
CKM_CAST128_MAC = 0x00000323
- CKM_CAST5_MAC_GENERAL = 0x00000324
+ CKM_CAST5_MAC_GENERAL = 0x00000324 // Deprecated
CKM_CAST128_MAC_GENERAL = 0x00000324
- CKM_CAST5_CBC_PAD = 0x00000325
+ CKM_CAST5_CBC_PAD = 0x00000325 // Deprecated
CKM_CAST128_CBC_PAD = 0x00000325
CKM_RC5_KEY_GEN = 0x00000330
CKM_RC5_ECB = 0x00000331
@@ -441,9 +448,9 @@ const (
CKM_PBE_MD5_DES_CBC = 0x000003A1
CKM_PBE_MD5_CAST_CBC = 0x000003A2
CKM_PBE_MD5_CAST3_CBC = 0x000003A3
- CKM_PBE_MD5_CAST5_CBC = 0x000003A4
+ CKM_PBE_MD5_CAST5_CBC = 0x000003A4 // Deprecated
CKM_PBE_MD5_CAST128_CBC = 0x000003A4
- CKM_PBE_SHA1_CAST5_CBC = 0x000003A5
+ CKM_PBE_SHA1_CAST5_CBC = 0x000003A5 // Deprecated
CKM_PBE_SHA1_CAST128_CBC = 0x000003A5
CKM_PBE_SHA1_RC4_128 = 0x000003A6
CKM_PBE_SHA1_RC4_40 = 0x000003A7
@@ -522,7 +529,7 @@ const (
CKM_BATON_COUNTER = 0x00001034
CKM_BATON_SHUFFLE = 0x00001035
CKM_BATON_WRAP = 0x00001036
- CKM_ECDSA_KEY_PAIR_GEN = 0x00001040
+ CKM_ECDSA_KEY_PAIR_GEN = 0x00001040 // Deprecated
CKM_EC_KEY_PAIR_GEN = 0x00001040
CKM_ECDSA = 0x00001041
CKM_ECDSA_SHA1 = 0x00001042
@@ -551,9 +558,9 @@ const (
CKM_AES_CTR = 0x00001086
CKM_AES_GCM = 0x00001087
CKM_AES_CCM = 0x00001088
- CKM_AES_CMAC_GENERAL = 0x00001089
+ CKM_AES_CTS = 0x00001089
CKM_AES_CMAC = 0x0000108A
- CKM_AES_CTS = 0x0000108B
+ CKM_AES_CMAC_GENERAL = 0x0000108B
CKM_AES_XCBC_MAC = 0x0000108C
CKM_AES_XCBC_MAC_96 = 0x0000108D
CKM_AES_GMAC = 0x0000108E
@@ -704,33 +711,56 @@ const (
CKR_MUTEX_NOT_LOCKED = 0x000001A1
CKR_NEW_PIN_MODE = 0x000001B0
CKR_NEXT_OTP = 0x000001B1
- CKR_EXCEEDED_MAX_ITERATIONS = 0x000001C0
- CKR_FIPS_SELF_TEST_FAILED = 0x000001C1
- CKR_LIBRARY_LOAD_FAILED = 0x000001C2
- CKR_PIN_TOO_WEAK = 0x000001C3
- CKR_PUBLIC_KEY_INVALID = 0x000001C4
+ CKR_EXCEEDED_MAX_ITERATIONS = 0x000001B5
+ CKR_FIPS_SELF_TEST_FAILED = 0x000001B6
+ CKR_LIBRARY_LOAD_FAILED = 0x000001B7
+ CKR_PIN_TOO_WEAK = 0x000001B8
+ CKR_PUBLIC_KEY_INVALID = 0x000001B9
CKR_FUNCTION_REJECTED = 0x00000200
CKR_VENDOR_DEFINED = 0x80000000
CKF_LIBRARY_CANT_CREATE_OS_THREADS = 0x00000001
CKF_OS_LOCKING_OK = 0x00000002
CKF_DONT_BLOCK = 1
+ CKG_MGF1_SHA1 = 0x00000001
+ CKG_MGF1_SHA256 = 0x00000002
+ CKG_MGF1_SHA384 = 0x00000003
+ CKG_MGF1_SHA512 = 0x00000004
+ CKG_MGF1_SHA224 = 0x00000005
+ CKZ_DATA_SPECIFIED = 0x00000001
+ CKD_NULL = 0x00000001
+ CKD_SHA1_KDF = 0x00000002
+ CKD_SHA1_KDF_ASN1 = 0x00000003
+ CKD_SHA1_KDF_CONCATENATE = 0x00000004
+ CKD_SHA224_KDF = 0x00000005
+ CKD_SHA256_KDF = 0x00000006
+ CKD_SHA384_KDF = 0x00000007
+ CKD_SHA512_KDF = 0x00000008
+ CKD_CPDIVERSIFY_KDF = 0x00000009
+ CKD_SHA3_224_KDF = 0x0000000A
+ CKD_SHA3_256_KDF = 0x0000000B
+ CKD_SHA3_384_KDF = 0x0000000C
+ CKD_SHA3_512_KDF = 0x0000000D
+ CKP_PKCS5_PBKD2_HMAC_SHA1 = 0x00000001
+ CKP_PKCS5_PBKD2_HMAC_GOSTR3411 = 0x00000002
+ CKP_PKCS5_PBKD2_HMAC_SHA224 = 0x00000003
+ CKP_PKCS5_PBKD2_HMAC_SHA256 = 0x00000004
+ CKP_PKCS5_PBKD2_HMAC_SHA384 = 0x00000005
+ CKP_PKCS5_PBKD2_HMAC_SHA512 = 0x00000006
+ CKP_PKCS5_PBKD2_HMAC_SHA512_224 = 0x00000007
+ CKP_PKCS5_PBKD2_HMAC_SHA512_256 = 0x00000008
+ CKZ_SALT_SPECIFIED = 0x00000001
+ CK_OTP_VALUE = 0
+ CK_OTP_PIN = 1
+ CK_OTP_CHALLENGE = 2
+ CK_OTP_TIME = 3
+ CK_OTP_COUNTER = 4
+ CK_OTP_FLAGS = 5
+ CK_OTP_OUTPUT_LENGTH = 6
+ CK_OTP_OUTPUT_FORMAT = 7
CKF_NEXT_OTP = 0x00000001
CKF_EXCLUDE_TIME = 0x00000002
CKF_EXCLUDE_COUNTER = 0x00000004
CKF_EXCLUDE_CHALLENGE = 0x00000008
CKF_EXCLUDE_PIN = 0x00000010
CKF_USER_FRIENDLY_OTP = 0x00000020
- CKD_NULL = 0x00000001
- CKD_SHA1_KDF = 0x00000002
-)
-
-// Special return values defined in PKCS#11 v2.40 section 3.2.
-const (
- // CK_EFFECTIVELY_INFINITE may be returned in the CK_TOKEN_INFO fields ulMaxSessionCount and ulMaxRwSessionCount.
- // It indicates there is no practical limit on the number of sessions.
- CK_EFFECTIVELY_INFINITE = 0
-
- // CK_UNAVAILABLE_INFORMATION may be returned for several fields within CK_TOKEN_INFO. It indicates
- // the token is unable or unwilling to provide the requested information.
- CK_UNAVAILABLE_INFORMATION = ^uint(0)
)
diff --git a/vendor/github.com/mistifyio/go-zfs/.gitignore b/vendor/github.com/mistifyio/go-zfs/.gitignore
deleted file mode 100644
index 8000dd9db47..00000000000
--- a/vendor/github.com/mistifyio/go-zfs/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-.vagrant
diff --git a/vendor/github.com/mistifyio/go-zfs/.travis.yml b/vendor/github.com/mistifyio/go-zfs/.travis.yml
deleted file mode 100644
index acbd39cefe9..00000000000
--- a/vendor/github.com/mistifyio/go-zfs/.travis.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-language: go
-dist: trusty
-sudo: required
-cache:
- directories:
- - $HOME/.ccache
- - $HOME/zfs
-
-branches:
- only:
- - master
-
-env:
- - rel=0.6.5.11
- - rel=0.7.6
-
-go:
- - "1.10.x"
- - master
-
-before_install:
- - export MAKEFLAGS=-j$(($(grep -c '^processor' /proc/cpuinfo) * 2 + 1))
- - export PATH=/usr/lib/ccache:$PATH
- - go get github.com/alecthomas/gometalinter
- - gometalinter --install --update
- - sudo apt-get update -y && sudo apt-get install -y libattr1-dev libblkid-dev linux-headers-$(uname -r) tree uuid-dev
- - mkdir -p $HOME/zfs
- - cd $HOME/zfs
- - [[ -d spl-$rel.tar.gz ]] || curl -L https://github.com/zfsonlinux/zfs/releases/download/zfs-$rel/spl-$rel.tar.gz | tar xz
- - [[ -d zfs-$rel.tar.gz ]] || curl -L https://github.com/zfsonlinux/zfs/releases/download/zfs-$rel/zfs-$rel.tar.gz | tar xz
- - (cd spl-$rel && ./configure --prefix=/usr && make && sudo make install)
- - (cd zfs-$rel && ./configure --prefix=/usr && make && sudo make install)
- - sudo modprobe zfs
- - cd $TRAVIS_BUILD_DIR
-
-script:
- - sudo -E $(which go) test -v ./...
- - gometalinter --vendor --vendored-linters ./... || true
- - gometalinter --errors --vendor --vendored-linters ./...
-
-notifications:
- email: false
- irc: "chat.freenode.net#cerana"
diff --git a/vendor/github.com/mistifyio/go-zfs/Vagrantfile b/vendor/github.com/mistifyio/go-zfs/Vagrantfile
deleted file mode 100644
index 3bd6e120bc4..00000000000
--- a/vendor/github.com/mistifyio/go-zfs/Vagrantfile
+++ /dev/null
@@ -1,34 +0,0 @@
-
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- config.vm.box = "ubuntu/trusty64"
- config.ssh.forward_agent = true
-
- config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/mistifyio/go-zfs", create: true
-
- config.vm.provision "shell", inline: < /etc/profile.d/go.sh
-export GOPATH=\\$HOME/go
-export PATH=\\$GOPATH/bin:/usr/local/go/bin:\\$PATH
-END
-
-chown -R vagrant /home/vagrant/go
-
-apt-get update
-apt-get install -y software-properties-common curl
-apt-add-repository --yes ppa:zfs-native/stable
-apt-get update
-apt-get install -y ubuntu-zfs
-
-cd /home/vagrant
-curl -z go1.3.3.linux-amd64.tar.gz -L -O https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz
-tar -C /usr/local -zxf /home/vagrant/go1.3.3.linux-amd64.tar.gz
-
-cat << END > /etc/sudoers.d/go
-Defaults env_keep += "GOPATH"
-END
-
-EOF
-
-end
diff --git a/vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go b/vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go
deleted file mode 100644
index a46f73060d1..00000000000
--- a/vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build !solaris
-
-package zfs
-
-import (
- "strings"
-)
-
-// List of ZFS properties to retrieve from zfs list command on a non-Solaris platform
-var dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "referenced", "written", "logicalused", "usedbydataset"}
-
-var dsPropListOptions = strings.Join(dsPropList, ",")
-
-// List of Zpool properties to retrieve from zpool list command on a non-Solaris platform
-var zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio", "fragmentation", "freeing", "leaked"}
-var zpoolPropListOptions = strings.Join(zpoolPropList, ",")
-var zpoolArgs = []string{"get", "-p", zpoolPropListOptions}
diff --git a/vendor/github.com/mistifyio/go-zfs/utils_solaris.go b/vendor/github.com/mistifyio/go-zfs/utils_solaris.go
deleted file mode 100644
index 0a7e90f2227..00000000000
--- a/vendor/github.com/mistifyio/go-zfs/utils_solaris.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build solaris
-
-package zfs
-
-import (
- "strings"
-)
-
-// List of ZFS properties to retrieve from zfs list command on a Solaris platform
-var dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "referenced"}
-
-var dsPropListOptions = strings.Join(dsPropList, ",")
-
-// List of Zpool properties to retrieve from zpool list command on a non-Solaris platform
-var zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio"}
-var zpoolPropListOptions = strings.Join(zpoolPropList, ",")
-var zpoolArgs = []string{"get", "-p", zpoolPropListOptions}
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.envrc b/vendor/github.com/mistifyio/go-zfs/v3/.envrc
new file mode 100644
index 00000000000..f310aea6659
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/.envrc
@@ -0,0 +1,4 @@
+has nix && use nix
+dotenv_if_exists
+PATH_add bin
+path_add GOBIN bin
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.gitignore b/vendor/github.com/mistifyio/go-zfs/v3/.gitignore
new file mode 100644
index 00000000000..0867490ad59
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/.gitignore
@@ -0,0 +1,6 @@
+bin
+go-zfs.test
+.vagrant
+
+# added by lint-install
+out/
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml b/vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml
new file mode 100644
index 00000000000..499c3eca166
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml
@@ -0,0 +1,207 @@
+run:
+ # The default runtime timeout is 1m, which doesn't work well on Github Actions.
+ timeout: 4m
+
+# NOTE: This file is populated by the lint-install tool. Local adjustments may be overwritten.
+linters-settings:
+ cyclop:
+ # NOTE: This is a very high transitional threshold
+ max-complexity: 37
+ package-average: 34.0
+ skip-tests: true
+
+ gocognit:
+ # NOTE: This is a very high transitional threshold
+ min-complexity: 98
+
+ dupl:
+ threshold: 200
+
+ goconst:
+ min-len: 4
+ min-occurrences: 5
+ ignore-tests: true
+
+ gosec:
+ excludes:
+ - G107 # Potential HTTP request made with variable url
+ - G204 # Subprocess launched with function call as argument or cmd arguments
+ - G404 # Use of weak random number generator (math/rand instead of crypto/rand
+
+ errorlint:
+ # these are still common in Go: for instance, exit errors.
+ asserts: false
+
+ exhaustive:
+ default-signifies-exhaustive: true
+
+ nestif:
+ min-complexity: 8
+
+ nolintlint:
+ require-explanation: true
+ allow-unused: false
+ require-specific: true
+
+ revive:
+ ignore-generated-header: true
+ severity: warning
+ rules:
+ - name: atomic
+ - name: blank-imports
+ - name: bool-literal-in-expr
+ - name: confusing-naming
+ - name: constant-logical-expr
+ - name: context-as-argument
+ - name: context-keys-type
+ - name: deep-exit
+ - name: defer
+ - name: range-val-in-closure
+ - name: range-val-address
+ - name: dot-imports
+ - name: error-naming
+ - name: error-return
+ - name: error-strings
+ - name: errorf
+ - name: exported
+ - name: identical-branches
+ - name: if-return
+ - name: import-shadowing
+ - name: increment-decrement
+ - name: indent-error-flow
+ - name: indent-error-flow
+ - name: package-comments
+ - name: range
+ - name: receiver-naming
+ - name: redefines-builtin-id
+ - name: superfluous-else
+ - name: struct-tag
+ - name: time-naming
+ - name: unexported-naming
+ - name: unexported-return
+ - name: unnecessary-stmt
+ - name: unreachable-code
+ - name: unused-parameter
+ - name: var-declaration
+ - name: var-naming
+ - name: unconditional-recursion
+ - name: waitgroup-by-value
+
+ staticcheck:
+ go: "1.16"
+
+ unused:
+ go: "1.16"
+
+output:
+ sort-results: true
+
+linters:
+ disable-all: true
+ enable:
+ - asciicheck
+ - bodyclose
+ - cyclop
+ - deadcode
+ - dogsled
+ - dupl
+ - durationcheck
+ - errcheck
+ - errname
+ - errorlint
+ - exhaustive
+ - exportloopref
+ - forcetypeassert
+ - gocognit
+ - goconst
+ - gocritic
+ - godot
+ - gofmt
+ - gofumpt
+ - gosec
+ - goheader
+ - goimports
+ - goprintffuncname
+ - gosimple
+ - govet
+ - ifshort
+ - importas
+ - ineffassign
+ - makezero
+ - misspell
+ - nakedret
+ - nestif
+ - nilerr
+ - noctx
+ - nolintlint
+ - predeclared
+ # disabling for the initial iteration of the linting tool
+ # - promlinter
+ - revive
+ - rowserrcheck
+ - sqlclosecheck
+ - staticcheck
+ - structcheck
+ - stylecheck
+ - thelper
+ - tparallel
+ - typecheck
+ - unconvert
+ - unparam
+ - unused
+ - varcheck
+ - wastedassign
+ - whitespace
+
+ # Disabled linters, due to being misaligned with Go practices
+ # - exhaustivestruct
+ # - gochecknoglobals
+ # - gochecknoinits
+ # - goconst
+ # - godox
+ # - goerr113
+ # - gomnd
+ # - lll
+ # - nlreturn
+ # - testpackage
+ # - wsl
+ # Disabled linters, due to not being relevant to our code base:
+ # - maligned
+ # - prealloc "For most programs usage of prealloc will be a premature optimization."
+ # Disabled linters due to bad error messages or bugs
+ # - tagliatelle
+
+issues:
+ # Excluding configuration per-path, per-linter, per-text and per-source
+ exclude-rules:
+ - path: _test\.go
+ linters:
+ - dupl
+ - errcheck
+ - forcetypeassert
+ - gocyclo
+ - gosec
+ - noctx
+
+ - path: .*cmd.*
+ linters:
+ - noctx
+
+ - path: main\.go
+ linters:
+ - noctx
+
+ - path: .*cmd.*
+ text: "deep-exit"
+
+ - path: main\.go
+ text: "deep-exit"
+
+ # This check is of questionable value
+ - linters:
+ - tparallel
+ text: "call t.Parallel on the top level as well as its subtests"
+
+ # Don't hide lint issues just because there are many of them
+ max-same-issues: 0
+ max-issues-per-linter: 0
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.yamllint b/vendor/github.com/mistifyio/go-zfs/v3/.yamllint
new file mode 100644
index 00000000000..9a08ad1765b
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/.yamllint
@@ -0,0 +1,16 @@
+---
+extends: default
+
+rules:
+ braces:
+ max-spaces-inside: 1
+ brackets:
+ max-spaces-inside: 1
+ comments: disable
+ comments-indentation: disable
+ document-start: disable
+ line-length:
+ level: warning
+ max: 160
+ allow-non-breakable-inline-mappings: true
+ truthy: disable
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md b/vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md
new file mode 100644
index 00000000000..349245d0392
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md
@@ -0,0 +1,250 @@
+# Change Log
+
+All notable changes to this project will be documented in this file.
+This project adheres to [Semantic Versioning](http://semver.org/).
+This change log follows the advice of [Keep a CHANGELOG](https://github.com/olivierlacan/keep-a-changelog).
+
+## [Unreleased]
+
+## [3.0.0] - 2022-03-30
+
+### Added
+
+- Rename, Mount and Unmount methods
+- Parse more fields into Zpool type:
+ - dedupratio
+ - fragmentation
+ - freeing
+ - leaked
+ - readonly
+- Parse more fields into Dataset type:
+ - referenced
+- Incremental Send
+- Parse numbers in exact format
+- Support for Solaris (non-blockint, best-effort status)
+- Debug logging for command invocation
+- Use GitHub Actions for CI
+- Nix shell for dev env reproducibility
+- Direnv file for ease of dev
+- Formatting/lint checks (enforced by CI)
+- Go Module
+- FreeBSD based vagrant machine
+
+### Changed
+
+- Temporarily adjust TestDiff expected strings depending on ZFS version
+- Use one `zfs list`/`zpool list` call instead of many `zfs get`/`zpool get`
+- ZFS docs links now point to OpenZFS pages
+- Ubuntu vagrant box changed to generic/ubuntu2004
+
+### Fixed
+
+- `GetProperty` returning `VALUE` instead of the actual value
+
+### Shortlog
+
+ Amit Krishnan (1):
+ Issue #39 and Issue #40 - Enable Solaris support for go-zfs Switch from zfs/zpool get to zfs/zpool list for better performance Signed-off-by: Amit Krishnan
+
+ Anand Patil (3):
+ Added Rename
+ Small fix to rename.
+ Added mount and umount methods
+
+ Brian Akins (1):
+ Add 'referenced' to zfs properties
+
+ Brian Bickerton (3):
+ Add debug logging before and after running external zfs command
+ Don't export the default no-op logger
+ Update uuid package repo url
+
+ Dmitry Teselkin (1):
+ Issue #52 - fix parseLine for fragmentation field
+
+ Edward Betts (1):
+ correct spelling mistake
+
+ Justin Cormack (1):
+ Switch to google/uuid which is the maintained version of pborman/uuid
+
+ Manuel Mendez (40):
+ rename Umount -> Unmount to follow zfs command name
+ add missing Unmount/Mount docs
+ always allocate largest Mount slice
+ add travis config
+ travis: update to go 1.7
+ travis: get go deps first
+ test: add nok helper to verify an error occurred
+ test: add test for Dataset.GetProperty
+ ci: swap #cerana on freenode for slack
+ ci: install new deps for 0.7 relases
+ ci: bump zol versions
+ ci: bump go versions
+ ci: use better gometalinter invocations
+ ci: add ccache
+ ci: set env earlier in before_install
+ fix test nok error printing
+ test: restructure TestDiff to deal with different order of changes
+ test: better unicode path handling in TestDiff
+ travis: bump zfs and go versions
+ cache zfs artifacts
+ Add nix-shell and direnv goodness
+ prettierify all the files
+ Add go based tools
+ Add Makefile and rules.mk files
+ gofumptize the code base
+ Use tinkerbell/lint-install to setup linters
+ make golangci-lint happy
+ Update CONTRIBUTING.md with make based approach
+ Add GitHub Actions
+ Drop Travis CI
+ One sentence per line
+ Update documentation links to openzfs-docs pages
+ Format Vagrantfile using rufo
+ Add go-zfs.test to .gitignore
+ test: Avoid reptitive/duplicate error logging and quitting
+ test: Use t.Logf instead of fmt.Printf
+ test: Better cleanup and error handling in zpoolTest
+ test: Do not mark TestDatasets as a t.Helper.
+ test: Change zpoolTest to a pure helper that returns a clean up function
+ test: Move helpers to a different file
+ vagrant: Add set -euxo pipefail to provision script
+ vagrant: Update to generic/ubuntu2004
+ vagrant: Minor fixes to Vagrantfile
+ vagrant: Update to go 1.17.8
+ vagrant: Run go tests as part of provision script
+ vagrant: Indent heredoc script
+ vagrant: Add freebsd machine
+
+ Matt Layher (1):
+ Parse more fields into Zpool type
+
+ Michael Crosby (1):
+ Add incremental send
+
+ Rikard Gynnerstedt (1):
+ remove command name from joined args
+
+ Sebastiaan van Stijn (1):
+ Add go.mod and rename to github.com/mistifyio/go-zfs/v3 (v3.0.0)
+
+ mikudeko (1):
+ Fix GetProperty always returning 'VALUE'
+
+## [2.1.1] - 2015-05-29
+
+### Fixed
+
+- Ignoring first pool listed
+- Incorrect `zfs get` argument ordering
+
+### Shortlog
+
+ Alexey Guskov (1):
+ zfs command uses different order of arguments on freebsd
+
+ Brian Akins (4):
+ test that ListZpools returns expected zpool
+ test error first
+ test error first
+ fix test to check correct return value
+
+ James Cunningham (1):
+ Fix Truncating First Zpool
+
+ Pat Norton (2):
+ Added Use of Go Tools
+ Update CONTRIBUTING.md
+
+## [2.1.0] - 2014-12-08
+
+### Added
+
+- Parse hardlink modification count returned from `zfs diff`
+
+### Fixed
+
+- Continuing instead of erroring when rolling back a non-snapshot
+
+### Shortlog
+
+ Brian Akins (2):
+ need to return the error here
+ use named struct fields
+
+ Jörg Thalheim (1):
+ zfs diff handle hardlinks modification now
+
+## [2.0.0] - 2014-12-02
+
+### Added
+
+- Flags for Destroy:
+ - DESTROY_DEFAULT
+ - DESTROY_DEFER_DELETION (`zfs destroy ... -d`)
+ - DESTROY_FORCE (`zfs destroy ... -f`)
+ - DESTROY_RECURSIVE_CLONES (`zfs destroy ... -R`)
+ - DESTROY_RECURSIVE (`zfs destroy ... -r`)
+ - etc
+- Diff method (`zfs diff`)
+- LogicalUsed and Origin properties to Dataset
+- Type constants for Dataset
+- State constants for Zpool
+- Logger interface
+- Improve documentation
+
+### Shortlog
+
+ Brian Akins (8):
+ remove reflection
+ style change for switches
+ need to check for error
+ keep in scope
+ go 1.3.3
+ golint cleanup
+ Just test if logical used is greater than 0, as this appears to be implementation specific
+ add docs to satisfy golint
+
+ Jörg Thalheim (8):
+ Add deferred flag to zfs.Destroy()
+ add Logicalused property
+ Add Origin property
+ gofmt
+ Add zfs.Diff
+ Add Logger
+ add recursive destroy with clones
+ use CamelCase-style constants
+
+ Matt Layher (4):
+ Improve documentation, document common ZFS operations, provide more references
+ Add zpool state constants, for easier health checking
+ Add dataset type constants, for easier type checking
+ Fix string split in command.Run(), use strings.Fields() instead of strings.Split()
+
+## [1.0.0] - 2014-11-12
+
+### Shortlog
+
+ Brian Akins (7):
+ add godoc badge
+ Add example
+ add information about zpool to struct and parser
+ Add Quota
+ add Children call
+ add Children call
+ fix snapshot tests
+
+ Brian Bickerton (3):
+ MIST-150 Change Snapshot second paramater from properties map[string][string] to recursive bool
+ MIST-150 Add Rollback method and related tests
+ MIST-160 Add SendSnapshot streaming method and tests
+
+ Matt Layher (1):
+ Add Error struct type and tests, enabling easier error return checking
+
+[3.0.0]: https://github.com/mistifyio/go-zfs/compare/v2.1.1...v3.0.0
+[2.1.1]: https://github.com/mistifyio/go-zfs/compare/v2.1.0...v2.1.1
+[2.1.0]: https://github.com/mistifyio/go-zfs/compare/v2.0.0...v2.1.0
+[2.0.0]: https://github.com/mistifyio/go-zfs/compare/v1.0.0...v2.0.0
+[1.0.0]: https://github.com/mistifyio/go-zfs/compare/v0.0.0...v1.0.0
diff --git a/vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md b/vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md
similarity index 54%
rename from vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md
rename to vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md
index f1880c19e54..9f625d5646b 100644
--- a/vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md
+++ b/vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md
@@ -1,20 +1,23 @@
-## How to Contribute ##
+## How to Contribute
-We always welcome contributions to help make `go-zfs` better. Please take a moment to read this document if you would like to contribute.
+We always welcome contributions to help make `go-zfs` better.
+Please take a moment to read this document if you would like to contribute.
-### Reporting issues ###
+### Reporting issues
We use [Github issues](https://github.com/mistifyio/go-zfs/issues) to track bug reports, feature requests, and submitting pull requests.
If you find a bug:
-* Use the GitHub issue search to check whether the bug has already been reported.
-* If the issue has been fixed, try to reproduce the issue using the latest `master` branch of the repository.
-* If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue, if possible. Also provide the steps taken to reproduce the bug.
+- Use the GitHub issue search to check whether the bug has already been reported.
+- If the issue has been fixed, try to reproduce the issue using the latest `master` branch of the repository.
+- If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue, if possible. Also provide the steps taken to reproduce the bug.
-### Pull requests ###
+### Pull requests
-We welcome bug fixes, improvements, and new features. Before embarking on making significant changes, please open an issue and ask first so that you do not risk duplicating efforts or spending time working on something that may be out of scope. For minor items, just open a pull request.
+We welcome bug fixes, improvements, and new features.
+Before embarking on making significant changes, please open an issue and ask first so that you do not risk duplicating efforts or spending time working on something that may be out of scope.
+For minor items, just open a pull request.
[Fork the project](https://help.github.com/articles/fork-a-repo), clone your fork, and add the upstream to your remote:
@@ -28,11 +31,13 @@ If you need to pull new changes committed upstream:
$ git fetch upstream
$ git merge upstream/master
-Don' work directly on master as this makes it harder to merge later. Create a feature branch for your fix or new feature:
+Don' work directly on master as this makes it harder to merge later.
+Create a feature branch for your fix or new feature:
$ git checkout -b
-Please try to commit your changes in logical chunks. Ideally, you should include the issue number in the commit message.
+Please try to commit your changes in logical chunks.
+Ideally, you should include the issue number in the commit message.
$ git commit -m "Issue # - "
@@ -40,21 +45,20 @@ Push your feature branch to your fork.
$ git push origin
-[Open a Pull Request](https://help.github.com/articles/using-pull-requests) against the upstream master branch. Please give your pull request a clear title and description and note which issue(s) your pull request fixes.
+[Open a Pull Request](https://help.github.com/articles/using-pull-requests) against the upstream master branch.
+Please give your pull request a clear title and description and note which issue(s) your pull request fixes.
-* All Go code should be formatted using [gofmt](http://golang.org/cmd/gofmt/).
-* Every exported function should have [documentation](http://blog.golang.org/godoc-documenting-go-code) and corresponding [tests](http://golang.org/doc/code.html#Testing).
+- All linters should be happy (can be run with `make verify`).
+- Every exported function should have [documentation](http://blog.golang.org/godoc-documenting-go-code) and corresponding [tests](http://golang.org/doc/code.html#Testing).
**Important:** By submitting a patch, you agree to allow the project owners to license your work under the [Apache 2.0 License](./LICENSE).
-### Go Tools ###
-For consistency and to catch minor issues for all of go code, please run the following:
-* goimports
-* go vet
-* golint
-* errcheck
+### Go Tools
+
+For consistency and to catch minor issues for all of go code, please run `make verify`.
Many editors can execute the above on save.
-----
+---
+
Guidelines based on http://azkaban.github.io/contributing.html
diff --git a/vendor/github.com/mistifyio/go-zfs/LICENSE b/vendor/github.com/mistifyio/go-zfs/v3/LICENSE
similarity index 100%
rename from vendor/github.com/mistifyio/go-zfs/LICENSE
rename to vendor/github.com/mistifyio/go-zfs/v3/LICENSE
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/Makefile b/vendor/github.com/mistifyio/go-zfs/v3/Makefile
new file mode 100644
index 00000000000..1c5f55e8c60
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/Makefile
@@ -0,0 +1,19 @@
+help: ## Print this help
+ @grep --no-filename -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sed 's/:.*## /·/' | sort | column -t -W 2 -s '·' -c $(shell tput cols)
+
+all: test ## Run tests
+
+-include rules.mk
+-include lint.mk
+
+test: ## Run tests
+ go test ./...
+
+verify: gofumpt prettier lint ## Verify code style, is lint free, freshness ...
+ git diff | (! grep .)
+
+fix: gofumpt-fix prettier-fix ## Fix code formatting errors
+
+tools: ${toolsBins} ## Build Go based build tools
+
+.PHONY: all help test tools verify
diff --git a/vendor/github.com/mistifyio/go-zfs/README.md b/vendor/github.com/mistifyio/go-zfs/v3/README.md
similarity index 87%
rename from vendor/github.com/mistifyio/go-zfs/README.md
rename to vendor/github.com/mistifyio/go-zfs/v3/README.md
index fef80d727b1..c911833002b 100644
--- a/vendor/github.com/mistifyio/go-zfs/README.md
+++ b/vendor/github.com/mistifyio/go-zfs/v3/README.md
@@ -1,12 +1,12 @@
-# Go Wrapper for ZFS #
+# Go Wrapper for ZFS
Simple wrappers for ZFS command line tools.
[](https://godoc.org/github.com/mistifyio/go-zfs)
-## Requirements ##
+## Requirements
-You need a working ZFS setup. To use on Ubuntu 14.04, setup ZFS:
+You need a working ZFS setup. To use on Ubuntu 14.04, setup ZFS:
sudo apt-get install python-software-properties
sudo apt-add-repository ppa:zfs-native/stable
@@ -17,13 +17,13 @@ Developed using Go 1.3, but currently there isn't anything 1.3 specific. Don't u
Generally you need root privileges to use anything zfs related.
-## Status ##
+## Status
This has been only been tested on Ubuntu 14.04
In the future, we hope to work directly with libzfs.
-# Hacking #
+# Hacking
The tests have decent examples for most functions.
@@ -48,7 +48,6 @@ err := f.Destroy()
```
-# Contributing #
+# Contributing
See the [contributing guidelines](./CONTRIBUTING.md)
-
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile b/vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile
new file mode 100644
index 00000000000..7d8d2decd3c
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile
@@ -0,0 +1,33 @@
+GOVERSION = "1.17.8"
+
+Vagrant.configure("2") do |config|
+ config.vm.define "ubuntu" do |ubuntu|
+ ubuntu.vm.box = "generic/ubuntu2004"
+ end
+ config.vm.define "freebsd" do |freebsd|
+ freebsd.vm.box = "generic/freebsd13"
+ end
+ config.ssh.forward_agent = true
+ config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/mistifyio/go-zfs", create: true
+ config.vm.provision "shell", inline: <<-EOF
+ set -euxo pipefail
+
+ os=$(uname -s|tr '[A-Z]' '[a-z]')
+ case $os in
+ linux) apt-get update -y && apt-get install -y --no-install-recommends gcc libc-dev zfsutils-linux ;;
+ esac
+
+ cd /tmp
+ curl -fLO --retry-max-time 30 --retry 10 https://go.dev/dl/go#{GOVERSION}.$os-amd64.tar.gz
+ tar -C /usr/local -zxf go#{GOVERSION}.$os-amd64.tar.gz
+ ln -nsf /usr/local/go/bin/go /usr/local/bin/go
+ rm -rf go*.tar.gz
+
+ chown -R vagrant:vagrant /home/vagrant/go
+ cd /home/vagrant/go/src/github.com/mistifyio/go-zfs
+ go test -c
+ sudo ./go-zfs.test -test.v
+ CGO_ENABLED=0 go test -c
+ sudo ./go-zfs.test -test.v
+ EOF
+end
diff --git a/vendor/github.com/mistifyio/go-zfs/error.go b/vendor/github.com/mistifyio/go-zfs/v3/error.go
similarity index 100%
rename from vendor/github.com/mistifyio/go-zfs/error.go
rename to vendor/github.com/mistifyio/go-zfs/v3/error.go
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/go.mod b/vendor/github.com/mistifyio/go-zfs/v3/go.mod
new file mode 100644
index 00000000000..da2019c5a07
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/go.mod
@@ -0,0 +1,5 @@
+module github.com/mistifyio/go-zfs/v3
+
+go 1.14
+
+require github.com/google/uuid v1.2.0
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/go.sum b/vendor/github.com/mistifyio/go-zfs/v3/go.sum
new file mode 100644
index 00000000000..a4d2b5ecb01
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/go.sum
@@ -0,0 +1,2 @@
+github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/lint.mk b/vendor/github.com/mistifyio/go-zfs/v3/lint.mk
new file mode 100644
index 00000000000..a1e0a4fd36f
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/lint.mk
@@ -0,0 +1,75 @@
+# BEGIN: lint-install -makefile lint.mk .
+# http://github.com/tinkerbell/lint-install
+
+.PHONY: lint
+lint: _lint
+
+LINT_ARCH := $(shell uname -m)
+LINT_OS := $(shell uname)
+LINT_OS_LOWER := $(shell echo $(LINT_OS) | tr '[:upper:]' '[:lower:]')
+LINT_ROOT := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
+
+# shellcheck and hadolint lack arm64 native binaries: rely on x86-64 emulation
+ifeq ($(LINT_OS),Darwin)
+ ifeq ($(LINT_ARCH),arm64)
+ LINT_ARCH=x86_64
+ endif
+endif
+
+LINTERS :=
+FIXERS :=
+
+SHELLCHECK_VERSION ?= v0.8.0
+SHELLCHECK_BIN := out/linters/shellcheck-$(SHELLCHECK_VERSION)-$(LINT_ARCH)
+$(SHELLCHECK_BIN):
+ mkdir -p out/linters
+ rm -rf out/linters/shellcheck-*
+ curl -sSfL https://github.com/koalaman/shellcheck/releases/download/$(SHELLCHECK_VERSION)/shellcheck-$(SHELLCHECK_VERSION).$(LINT_OS_LOWER).$(LINT_ARCH).tar.xz | tar -C out/linters -xJf -
+ mv out/linters/shellcheck-$(SHELLCHECK_VERSION)/shellcheck $@
+ rm -rf out/linters/shellcheck-$(SHELLCHECK_VERSION)/shellcheck
+
+LINTERS += shellcheck-lint
+shellcheck-lint: $(SHELLCHECK_BIN)
+ $(SHELLCHECK_BIN) $(shell find . -name "*.sh")
+
+FIXERS += shellcheck-fix
+shellcheck-fix: $(SHELLCHECK_BIN)
+ $(SHELLCHECK_BIN) $(shell find . -name "*.sh") -f diff | { read -t 1 line || exit 0; { echo "$$line" && cat; } | git apply -p2; }
+
+GOLANGCI_LINT_CONFIG := $(LINT_ROOT)/.golangci.yml
+GOLANGCI_LINT_VERSION ?= v1.43.0
+GOLANGCI_LINT_BIN := out/linters/golangci-lint-$(GOLANGCI_LINT_VERSION)-$(LINT_ARCH)
+$(GOLANGCI_LINT_BIN):
+ mkdir -p out/linters
+ rm -rf out/linters/golangci-lint-*
+ curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b out/linters $(GOLANGCI_LINT_VERSION)
+ mv out/linters/golangci-lint $@
+
+LINTERS += golangci-lint-lint
+golangci-lint-lint: $(GOLANGCI_LINT_BIN)
+ find . -name go.mod -execdir "$(GOLANGCI_LINT_BIN)" run -c "$(GOLINT_CONFIG)" \;
+
+FIXERS += golangci-lint-fix
+golangci-lint-fix: $(GOLANGCI_LINT_BIN)
+ find . -name go.mod -execdir "$(GOLANGCI_LINT_BIN)" run -c "$(GOLINT_CONFIG)" --fix \;
+
+YAMLLINT_VERSION ?= 1.26.3
+YAMLLINT_ROOT := out/linters/yamllint-$(YAMLLINT_VERSION)
+YAMLLINT_BIN := $(YAMLLINT_ROOT)/dist/bin/yamllint
+$(YAMLLINT_BIN):
+ mkdir -p out/linters
+ rm -rf out/linters/yamllint-*
+ curl -sSfL https://github.com/adrienverge/yamllint/archive/refs/tags/v$(YAMLLINT_VERSION).tar.gz | tar -C out/linters -zxf -
+ cd $(YAMLLINT_ROOT) && pip3 install --target dist .
+
+LINTERS += yamllint-lint
+yamllint-lint: $(YAMLLINT_BIN)
+ PYTHONPATH=$(YAMLLINT_ROOT)/dist $(YAMLLINT_ROOT)/dist/bin/yamllint .
+
+.PHONY: _lint $(LINTERS)
+_lint: $(LINTERS)
+
+.PHONY: fix $(FIXERS)
+fix: $(FIXERS)
+
+# END: lint-install -makefile lint.mk .
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/rules.mk b/vendor/github.com/mistifyio/go-zfs/v3/rules.mk
new file mode 100644
index 00000000000..4746c978a6f
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/rules.mk
@@ -0,0 +1,49 @@
+# Only use the recipes defined in these makefiles
+MAKEFLAGS += --no-builtin-rules
+.SUFFIXES:
+# Delete target files if there's an error
+# This avoids a failure to then skip building on next run if the output is created by shell redirection for example
+# Not really necessary for now, but just good to have already if it becomes necessary later.
+.DELETE_ON_ERROR:
+# Treat the whole recipe as a one shell script/invocation instead of one-per-line
+.ONESHELL:
+# Use bash instead of plain sh
+SHELL := bash
+.SHELLFLAGS := -o pipefail -euc
+
+version := $(shell git rev-parse --short HEAD)
+tag := $(shell git tag --points-at HEAD)
+ifneq (,$(tag))
+version := $(tag)-$(version)
+endif
+LDFLAGS := -ldflags "-X main.version=$(version)"
+export CGO_ENABLED := 0
+
+ifeq ($(origin GOBIN), undefined)
+GOBIN := ${PWD}/bin
+export GOBIN
+PATH := ${GOBIN}:${PATH}
+export PATH
+endif
+
+toolsBins := $(addprefix bin/,$(notdir $(shell grep '^\s*_' tooling/tools.go | awk -F'"' '{print $$2}')))
+
+# installs cli tools defined in tools.go
+$(toolsBins): tooling/go.mod tooling/go.sum tooling/tools.go
+$(toolsBins): CMD=$(shell awk -F'"' '/$(@F)"/ {print $$2}' tooling/tools.go)
+$(toolsBins):
+ cd tooling && go install $(CMD)
+
+.PHONY: gofumpt
+gofumpt: bin/gofumpt
+ gofumpt -s -d .
+
+gofumpt-fix: bin/gofumpt
+ gofumpt -s -w .
+
+.PHONY: prettier prettier-fix
+prettier:
+ prettier --list-different --ignore-path .gitignore .
+
+prettier-fix:
+ prettier --write --ignore-path .gitignore .
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/shell.nix b/vendor/github.com/mistifyio/go-zfs/v3/shell.nix
new file mode 100644
index 00000000000..e0ea24c16f3
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/shell.nix
@@ -0,0 +1,26 @@
+let _pkgs = import { };
+in { pkgs ? import (_pkgs.fetchFromGitHub {
+ owner = "NixOS";
+ repo = "nixpkgs";
+ #branch@date: 21.11@2022-02-13
+ rev = "560ad8a2f89586ab1a14290f128ad6a393046065";
+ sha256 = "0s0dv1clfpjyzy4p6ywxvzmwx9ddbr2yl77jf1wqdbr0x1206hb8";
+}) { } }:
+
+with pkgs;
+
+mkShell {
+ buildInputs = [
+ git
+ gnumake
+ gnused
+ go
+ nixfmt
+ nodePackages.prettier
+ python3Packages.pip
+ python3Packages.setuptools
+ rufo
+ shfmt
+ vagrant
+ ];
+}
diff --git a/vendor/github.com/mistifyio/go-zfs/utils.go b/vendor/github.com/mistifyio/go-zfs/v3/utils.go
similarity index 74%
rename from vendor/github.com/mistifyio/go-zfs/utils.go
rename to vendor/github.com/mistifyio/go-zfs/v3/utils.go
index c18c2c3dae6..0c2cce7d92b 100644
--- a/vendor/github.com/mistifyio/go-zfs/utils.go
+++ b/vendor/github.com/mistifyio/go-zfs/v3/utils.go
@@ -21,7 +21,6 @@ type command struct {
}
func (c *command) Run(arg ...string) ([][]string, error) {
-
cmd := exec.Command(c.Command, arg...)
var stdout, stderr bytes.Buffer
@@ -34,7 +33,6 @@ func (c *command) Run(arg ...string) ([][]string, error) {
if c.Stdin != nil {
cmd.Stdin = c.Stdin
-
}
cmd.Stderr = &stderr
@@ -42,16 +40,14 @@ func (c *command) Run(arg ...string) ([][]string, error) {
joinedArgs := strings.Join(cmd.Args, " ")
logger.Log([]string{"ID:" + id, "START", joinedArgs})
- err := cmd.Run()
- logger.Log([]string{"ID:" + id, "FINISH"})
-
- if err != nil {
+ if err := cmd.Run(); err != nil {
return nil, &Error{
Err: err,
Debug: strings.Join([]string{cmd.Path, joinedArgs[1:]}, " "),
Stderr: stderr.String(),
}
}
+ logger.Log([]string{"ID:" + id, "FINISH"})
// assume if you passed in something for stdout, that you know what to do with it
if c.Stdout != nil {
@@ -60,7 +56,7 @@ func (c *command) Run(arg ...string) ([][]string, error) {
lines := strings.Split(stdout.String(), "\n")
- //last line is always blank
+ // last line is always blank
lines = lines[0 : len(lines)-1]
output := make([][]string, len(lines))
@@ -92,33 +88,33 @@ func setUint(field *uint64, value string) error {
return nil
}
-func (ds *Dataset) parseLine(line []string) error {
+func (d *Dataset) parseLine(line []string) error {
var err error
if len(line) != len(dsPropList) {
- return errors.New("Output does not match what is expected on this platform")
+ return errors.New("output does not match what is expected on this platform")
}
- setString(&ds.Name, line[0])
- setString(&ds.Origin, line[1])
+ setString(&d.Name, line[0])
+ setString(&d.Origin, line[1])
- if err = setUint(&ds.Used, line[2]); err != nil {
+ if err = setUint(&d.Used, line[2]); err != nil {
return err
}
- if err = setUint(&ds.Avail, line[3]); err != nil {
+ if err = setUint(&d.Avail, line[3]); err != nil {
return err
}
- setString(&ds.Mountpoint, line[4])
- setString(&ds.Compression, line[5])
- setString(&ds.Type, line[6])
+ setString(&d.Mountpoint, line[4])
+ setString(&d.Compression, line[5])
+ setString(&d.Type, line[6])
- if err = setUint(&ds.Volsize, line[7]); err != nil {
+ if err = setUint(&d.Volsize, line[7]); err != nil {
return err
}
- if err = setUint(&ds.Quota, line[8]); err != nil {
+ if err = setUint(&d.Quota, line[8]); err != nil {
return err
}
- if err = setUint(&ds.Referenced, line[9]); err != nil {
+ if err = setUint(&d.Referenced, line[9]); err != nil {
return err
}
@@ -126,17 +122,13 @@ func (ds *Dataset) parseLine(line []string) error {
return nil
}
- if err = setUint(&ds.Written, line[10]); err != nil {
+ if err = setUint(&d.Written, line[10]); err != nil {
return err
}
- if err = setUint(&ds.Logicalused, line[11]); err != nil {
+ if err = setUint(&d.Logicalused, line[11]); err != nil {
return err
}
- if err = setUint(&ds.Usedbydataset, line[12]); err != nil {
- return err
- }
-
- return nil
+ return setUint(&d.Usedbydataset, line[12])
}
/*
@@ -156,12 +148,12 @@ func unescapeFilepath(path string) (string, error) {
for i := 0; i < llen; {
if path[i] == '\\' {
if llen < i+4 {
- return "", fmt.Errorf("Invalid octal code: too short")
+ return "", fmt.Errorf("invalid octal code: too short")
}
octalCode := path[(i + 1):(i + 4)]
val, err := strconv.ParseUint(octalCode, 8, 8)
if err != nil {
- return "", fmt.Errorf("Invalid octal code: %v", err)
+ return "", fmt.Errorf("invalid octal code: %w", err)
}
buf = append(buf, byte(val))
i += 4
@@ -179,6 +171,7 @@ var changeTypeMap = map[string]ChangeType{
"M": Modified,
"R": Renamed,
}
+
var inodeTypeMap = map[string]InodeType{
"B": BlockDevice,
"C": CharacterDevice,
@@ -191,51 +184,51 @@ var inodeTypeMap = map[string]InodeType{
"F": File,
}
-// matches (+1) or (-1)
-var referenceCountRegex = regexp.MustCompile("\\(([+-]\\d+?)\\)")
+// matches (+1) or (-1).
+var referenceCountRegex = regexp.MustCompile(`\(([+-]\d+?)\)`)
func parseReferenceCount(field string) (int, error) {
matches := referenceCountRegex.FindStringSubmatch(field)
if matches == nil {
- return 0, fmt.Errorf("Regexp does not match")
+ return 0, fmt.Errorf("regexp does not match")
}
return strconv.Atoi(matches[1])
}
func parseInodeChange(line []string) (*InodeChange, error) {
- llen := len(line)
+ llen := len(line) // nolint:ifshort // llen *is* actually used
if llen < 1 {
- return nil, fmt.Errorf("Empty line passed")
+ return nil, fmt.Errorf("empty line passed")
}
changeType := changeTypeMap[line[0]]
if changeType == 0 {
- return nil, fmt.Errorf("Unknown change type '%s'", line[0])
+ return nil, fmt.Errorf("unknown change type '%s'", line[0])
}
switch changeType {
case Renamed:
if llen != 4 {
- return nil, fmt.Errorf("Mismatching number of fields: expect 4, got: %d", llen)
+ return nil, fmt.Errorf("mismatching number of fields: expect 4, got: %d", llen)
}
case Modified:
if llen != 4 && llen != 3 {
- return nil, fmt.Errorf("Mismatching number of fields: expect 3..4, got: %d", llen)
+ return nil, fmt.Errorf("mismatching number of fields: expect 3..4, got: %d", llen)
}
default:
if llen != 3 {
- return nil, fmt.Errorf("Mismatching number of fields: expect 3, got: %d", llen)
+ return nil, fmt.Errorf("mismatching number of fields: expect 3, got: %d", llen)
}
}
inodeType := inodeTypeMap[line[1]]
if inodeType == 0 {
- return nil, fmt.Errorf("Unknown inode type '%s'", line[1])
+ return nil, fmt.Errorf("unknown inode type '%s'", line[1])
}
path, err := unescapeFilepath(line[2])
if err != nil {
- return nil, fmt.Errorf("Failed to parse filename: %v", err)
+ return nil, fmt.Errorf("failed to parse filename: %w", err)
}
var newPath string
@@ -244,13 +237,13 @@ func parseInodeChange(line []string) (*InodeChange, error) {
case Renamed:
newPath, err = unescapeFilepath(line[3])
if err != nil {
- return nil, fmt.Errorf("Failed to parse filename: %v", err)
+ return nil, fmt.Errorf("failed to parse filename: %w", err)
}
case Modified:
if llen == 4 {
referenceCount, err = parseReferenceCount(line[3])
if err != nil {
- return nil, fmt.Errorf("Failed to parse reference count: %v", err)
+ return nil, fmt.Errorf("failed to parse reference count: %w", err)
}
}
default:
@@ -266,18 +259,19 @@ func parseInodeChange(line []string) (*InodeChange, error) {
}, nil
}
-// example input
-//M / /testpool/bar/
-//+ F /testpool/bar/hello.txt
-//M / /testpool/bar/hello.txt (+1)
-//M / /testpool/bar/hello-hardlink
+// example input for parseInodeChanges
+// M / /testpool/bar/
+// + F /testpool/bar/hello.txt
+// M / /testpool/bar/hello.txt (+1)
+// M / /testpool/bar/hello-hardlink
+
func parseInodeChanges(lines [][]string) ([]*InodeChange, error) {
changes := make([]*InodeChange, len(lines))
for i, line := range lines {
c, err := parseInodeChange(line)
if err != nil {
- return nil, fmt.Errorf("Failed to parse line %d of zfs diff: %v, got: '%s'", i, err, line)
+ return nil, fmt.Errorf("failed to parse line %d of zfs diff: %w, got: '%s'", i, err, line)
}
changes[i] = c
}
@@ -290,7 +284,7 @@ func listByType(t, filter string) ([]*Dataset, error) {
if filter != "" {
args = append(args, filter)
}
- out, err := zfs(args...)
+ out, err := zfsOutput(args...)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go b/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go
new file mode 100644
index 00000000000..ef1beac904b
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go
@@ -0,0 +1,19 @@
+//go:build !solaris
+// +build !solaris
+
+package zfs
+
+import "strings"
+
+var (
+ // List of ZFS properties to retrieve from zfs list command on a non-Solaris platform.
+ dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "referenced", "written", "logicalused", "usedbydataset"}
+
+ dsPropListOptions = strings.Join(dsPropList, ",")
+
+ // List of Zpool properties to retrieve from zpool list command on a non-Solaris platform.
+ zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio", "fragmentation", "freeing", "leaked"}
+
+ zpoolPropListOptions = strings.Join(zpoolPropList, ",")
+ zpoolArgs = []string{"get", "-p", zpoolPropListOptions}
+)
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go b/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go
new file mode 100644
index 00000000000..c6bf6d87a67
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go
@@ -0,0 +1,19 @@
+//go:build solaris
+// +build solaris
+
+package zfs
+
+import "strings"
+
+var (
+ // List of ZFS properties to retrieve from zfs list command on a Solaris platform
+ dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "referenced"}
+
+ dsPropListOptions = strings.Join(dsPropList, ",")
+
+ // List of Zpool properties to retrieve from zpool list command on a non-Solaris platform
+ zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio"}
+
+ zpoolPropListOptions = strings.Join(zpoolPropList, ",")
+ zpoolArgs = []string{"get", "-p", zpoolPropListOptions}
+)
diff --git a/vendor/github.com/mistifyio/go-zfs/zfs.go b/vendor/github.com/mistifyio/go-zfs/v3/zfs.go
similarity index 70%
rename from vendor/github.com/mistifyio/go-zfs/zfs.go
rename to vendor/github.com/mistifyio/go-zfs/v3/zfs.go
index 4e5087ffe28..1166bdc212f 100644
--- a/vendor/github.com/mistifyio/go-zfs/zfs.go
+++ b/vendor/github.com/mistifyio/go-zfs/v3/zfs.go
@@ -9,19 +9,18 @@ import (
"strings"
)
-// ZFS dataset types, which can indicate if a dataset is a filesystem,
-// snapshot, or volume.
+// ZFS dataset types, which can indicate if a dataset is a filesystem, snapshot, or volume.
const (
DatasetFilesystem = "filesystem"
DatasetSnapshot = "snapshot"
DatasetVolume = "volume"
)
-// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot,
-// or volume. The Type struct member can be used to determine a dataset's type.
+// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot, or volume.
+// The Type struct member can be used to determine a dataset's type.
//
// The field definitions can be found in the ZFS manual:
-// http://www.freebsd.org/cgi/man.cgi?zfs(8).
+// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html.
type Dataset struct {
Name string
Origin string
@@ -38,10 +37,10 @@ type Dataset struct {
Referenced uint64
}
-// InodeType is the type of inode as reported by Diff
+// InodeType is the type of inode as reported by Diff.
type InodeType int
-// Types of Inodes
+// Types of Inodes.
const (
_ = iota // 0 == unknown type
BlockDevice InodeType = iota
@@ -55,10 +54,10 @@ const (
File
)
-// ChangeType is the type of inode change as reported by Diff
+// ChangeType is the type of inode change as reported by Diff.
type ChangeType int
-// Types of Changes
+// Types of Changes.
const (
_ = iota // 0 == unknown type
Removed ChangeType = iota
@@ -67,10 +66,10 @@ const (
Renamed
)
-// DestroyFlag is the options flag passed to Destroy
+// DestroyFlag is the options flag passed to Destroy.
type DestroyFlag int
-// Valid destroy options
+// Valid destroy options.
const (
DestroyDefault DestroyFlag = 1 << iota
DestroyRecursive = 1 << iota
@@ -79,7 +78,7 @@ const (
DestroyForceUmount = 1 << iota
)
-// InodeChange represents a change as reported by Diff
+// InodeChange represents a change as reported by Diff.
type InodeChange struct {
Change ChangeType
Type InodeType
@@ -88,65 +87,65 @@ type InodeChange struct {
ReferenceCountChange int
}
-// Logger can be used to log commands/actions
+// Logger can be used to log commands/actions.
type Logger interface {
Log(cmd []string)
}
type defaultLogger struct{}
-func (*defaultLogger) Log(cmd []string) {
- return
+func (*defaultLogger) Log([]string) {
}
var logger Logger = &defaultLogger{}
-// SetLogger set a log handler to log all commands including arguments before
-// they are executed
+// SetLogger set a log handler to log all commands including arguments before they are executed.
func SetLogger(l Logger) {
if l != nil {
logger = l
}
}
+// zfs is a helper function to wrap typical calls to zfs that ignores stdout.
+func zfs(arg ...string) error {
+ _, err := zfsOutput(arg...)
+ return err
+}
+
// zfs is a helper function to wrap typical calls to zfs.
-func zfs(arg ...string) ([][]string, error) {
+func zfsOutput(arg ...string) ([][]string, error) {
c := command{Command: "zfs"}
return c.Run(arg...)
}
// Datasets returns a slice of ZFS datasets, regardless of type.
-// A filter argument may be passed to select a dataset with the matching name,
-// or empty string ("") may be used to select all datasets.
+// A filter argument may be passed to select a dataset with the matching name, or empty string ("") may be used to select all datasets.
func Datasets(filter string) ([]*Dataset, error) {
return listByType("all", filter)
}
// Snapshots returns a slice of ZFS snapshots.
-// A filter argument may be passed to select a snapshot with the matching name,
-// or empty string ("") may be used to select all snapshots.
+// A filter argument may be passed to select a snapshot with the matching name, or empty string ("") may be used to select all snapshots.
func Snapshots(filter string) ([]*Dataset, error) {
return listByType(DatasetSnapshot, filter)
}
// Filesystems returns a slice of ZFS filesystems.
-// A filter argument may be passed to select a filesystem with the matching name,
-// or empty string ("") may be used to select all filesystems.
+// A filter argument may be passed to select a filesystem with the matching name, or empty string ("") may be used to select all filesystems.
func Filesystems(filter string) ([]*Dataset, error) {
return listByType(DatasetFilesystem, filter)
}
// Volumes returns a slice of ZFS volumes.
-// A filter argument may be passed to select a volume with the matching name,
-// or empty string ("") may be used to select all volumes.
+// A filter argument may be passed to select a volume with the matching name, or empty string ("") may be used to select all volumes.
func Volumes(filter string) ([]*Dataset, error) {
return listByType(DatasetVolume, filter)
}
-// GetDataset retrieves a single ZFS dataset by name. This dataset could be
-// any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume.
+// GetDataset retrieves a single ZFS dataset by name.
+// This dataset could be any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume.
func GetDataset(name string) (*Dataset, error) {
- out, err := zfs("list", "-Hp", "-o", dsPropListOptions, name)
+ out, err := zfsOutput("list", "-Hp", "-o", dsPropListOptions, name)
if err != nil {
return nil, err
}
@@ -174,8 +173,7 @@ func (d *Dataset) Clone(dest string, properties map[string]string) (*Dataset, er
args = append(args, propsSlice(properties)...)
}
args = append(args, []string{d.Name, dest}...)
- _, err := zfs(args...)
- if err != nil {
+ if err := zfs(args...); err != nil {
return nil, err
}
return GetDataset(dest)
@@ -192,8 +190,7 @@ func (d *Dataset) Unmount(force bool) (*Dataset, error) {
args = append(args, "-f")
}
args = append(args, d.Name)
- _, err := zfs(args...)
- if err != nil {
+ if err := zfs(args...); err != nil {
return nil, err
}
return GetDataset(d.Name)
@@ -214,20 +211,17 @@ func (d *Dataset) Mount(overlay bool, options []string) (*Dataset, error) {
args = append(args, strings.Join(options, ","))
}
args = append(args, d.Name)
- _, err := zfs(args...)
- if err != nil {
+ if err := zfs(args...); err != nil {
return nil, err
}
return GetDataset(d.Name)
}
-// ReceiveSnapshot receives a ZFS stream from the input io.Reader, creates a
-// new snapshot with the specified name, and streams the input data into the
-// newly-created snapshot.
+// ReceiveSnapshot receives a ZFS stream from the input io.Reader.
+// A new snapshot is created with the specified name, and streams the input data into the newly-created snapshot.
func ReceiveSnapshot(input io.Reader, name string) (*Dataset, error) {
c := command{Command: "zfs", Stdin: input}
- _, err := c.Run("receive", name)
- if err != nil {
+ if _, err := c.Run("receive", name); err != nil {
return nil, err
}
return GetDataset(name)
@@ -245,10 +239,21 @@ func (d *Dataset) SendSnapshot(output io.Writer) error {
return err
}
-// CreateVolume creates a new ZFS volume with the specified name, size, and
-// properties.
-// A full list of available ZFS properties may be found here:
-// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+// IncrementalSend sends a ZFS stream of a snapshot to the input io.Writer using the baseSnapshot as the starting point.
+// An error will be returned if the input dataset is not of snapshot type.
+func (d *Dataset) IncrementalSend(baseSnapshot *Dataset, output io.Writer) error {
+ if d.Type != DatasetSnapshot || baseSnapshot.Type != DatasetSnapshot {
+ return errors.New("can only send snapshots")
+ }
+ c := command{Command: "zfs", Stdout: output}
+ _, err := c.Run("send", "-i", baseSnapshot.Name, d.Name)
+ return err
+}
+
+// CreateVolume creates a new ZFS volume with the specified name, size, and properties.
+//
+// A full list of available ZFS properties may be found in the ZFS manual:
+// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html.
func CreateVolume(name string, size uint64, properties map[string]string) (*Dataset, error) {
args := make([]string, 4, 5)
args[0] = "create"
@@ -259,17 +264,15 @@ func CreateVolume(name string, size uint64, properties map[string]string) (*Data
args = append(args, propsSlice(properties)...)
}
args = append(args, name)
- _, err := zfs(args...)
- if err != nil {
+ if err := zfs(args...); err != nil {
return nil, err
}
return GetDataset(name)
}
-// Destroy destroys a ZFS dataset. If the destroy bit flag is set, any
-// descendents of the dataset will be recursively destroyed, including snapshots.
-// If the deferred bit flag is set, the snapshot is marked for deferred
-// deletion.
+// Destroy destroys a ZFS dataset.
+// If the destroy bit flag is set, any descendents of the dataset will be recursively destroyed, including snapshots.
+// If the deferred bit flag is set, the snapshot is marked for deferred deletion.
func (d *Dataset) Destroy(flags DestroyFlag) error {
args := make([]string, 1, 3)
args[0] = "destroy"
@@ -290,25 +293,26 @@ func (d *Dataset) Destroy(flags DestroyFlag) error {
}
args = append(args, d.Name)
- _, err := zfs(args...)
+ err := zfs(args...)
return err
}
// SetProperty sets a ZFS property on the receiving dataset.
-// A full list of available ZFS properties may be found here:
-// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+//
+// A full list of available ZFS properties may be found in the ZFS manual:
+// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html.
func (d *Dataset) SetProperty(key, val string) error {
prop := strings.Join([]string{key, val}, "=")
- _, err := zfs("set", prop, d.Name)
+ err := zfs("set", prop, d.Name)
return err
}
-// GetProperty returns the current value of a ZFS property from the
-// receiving dataset.
-// A full list of available ZFS properties may be found here:
-// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+// GetProperty returns the current value of a ZFS property from the receiving dataset.
+//
+// A full list of available ZFS properties may be found in the ZFS manual:
+// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html.
func (d *Dataset) GetProperty(key string) (string, error) {
- out, err := zfs("get", "-H", key, d.Name)
+ out, err := zfsOutput("get", "-H", key, d.Name)
if err != nil {
return "", err
}
@@ -317,7 +321,7 @@ func (d *Dataset) GetProperty(key string) (string, error) {
}
// Rename renames a dataset.
-func (d *Dataset) Rename(name string, createParent bool, recursiveRenameSnapshots bool) (*Dataset, error) {
+func (d *Dataset) Rename(name string, createParent, recursiveRenameSnapshots bool) (*Dataset, error) {
args := make([]string, 3, 5)
args[0] = "rename"
args[1] = d.Name
@@ -328,8 +332,7 @@ func (d *Dataset) Rename(name string, createParent bool, recursiveRenameSnapshot
if recursiveRenameSnapshots {
args = append(args, "-r")
}
- _, err := zfs(args...)
- if err != nil {
+ if err := zfs(args...); err != nil {
return d, err
}
@@ -341,10 +344,10 @@ func (d *Dataset) Snapshots() ([]*Dataset, error) {
return Snapshots(d.Name)
}
-// CreateFilesystem creates a new ZFS filesystem with the specified name and
-// properties.
-// A full list of available ZFS properties may be found here:
-// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+// CreateFilesystem creates a new ZFS filesystem with the specified name and properties.
+//
+// A full list of available ZFS properties may be found in the ZFS manual:
+// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html.
func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) {
args := make([]string, 1, 4)
args[0] = "create"
@@ -354,16 +357,14 @@ func CreateFilesystem(name string, properties map[string]string) (*Dataset, erro
}
args = append(args, name)
- _, err := zfs(args...)
- if err != nil {
+ if err := zfs(args...); err != nil {
return nil, err
}
return GetDataset(name)
}
-// Snapshot creates a new ZFS snapshot of the receiving dataset, using the
-// specified name. Optionally, the snapshot can be taken recursively, creating
-// snapshots of all descendent filesystems in a single, atomic operation.
+// Snapshot creates a new ZFS snapshot of the receiving dataset, using the specified name.
+// Optionally, the snapshot can be taken recursively, creating snapshots of all descendent filesystems in a single, atomic operation.
func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) {
args := make([]string, 1, 4)
args[0] = "snapshot"
@@ -372,17 +373,15 @@ func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) {
}
snapName := fmt.Sprintf("%s@%s", d.Name, name)
args = append(args, snapName)
- _, err := zfs(args...)
- if err != nil {
+ if err := zfs(args...); err != nil {
return nil, err
}
return GetDataset(snapName)
}
// Rollback rolls back the receiving ZFS dataset to a previous snapshot.
-// Optionally, intermediate snapshots can be destroyed. A ZFS snapshot
-// rollback cannot be completed without this option, if more recent
-// snapshots exist.
+// Optionally, intermediate snapshots can be destroyed.
+// A ZFS snapshot rollback cannot be completed without this option, if more recent snapshots exist.
// An error will be returned if the input dataset is not of snapshot type.
func (d *Dataset) Rollback(destroyMoreRecent bool) error {
if d.Type != DatasetSnapshot {
@@ -396,13 +395,12 @@ func (d *Dataset) Rollback(destroyMoreRecent bool) error {
}
args = append(args, d.Name)
- _, err := zfs(args...)
+ err := zfs(args...)
return err
}
// Children returns a slice of children of the receiving ZFS dataset.
-// A recursion depth may be specified, or a depth of 0 allows unlimited
-// recursion.
+// A recursion depth may be specified, or a depth of 0 allows unlimited recursion.
func (d *Dataset) Children(depth uint64) ([]*Dataset, error) {
args := []string{"list"}
if depth > 0 {
@@ -414,7 +412,7 @@ func (d *Dataset) Children(depth uint64) ([]*Dataset, error) {
args = append(args, "-t", "all", "-Hp", "-o", dsPropListOptions)
args = append(args, d.Name)
- out, err := zfs(args...)
+ out, err := zfsOutput(args...)
if err != nil {
return nil, err
}
@@ -436,11 +434,10 @@ func (d *Dataset) Children(depth uint64) ([]*Dataset, error) {
}
// Diff returns changes between a snapshot and the given ZFS dataset.
-// The snapshot name must include the filesystem part as it is possible to
-// compare clones with their origin snapshots.
+// The snapshot name must include the filesystem part as it is possible to compare clones with their origin snapshots.
func (d *Dataset) Diff(snapshot string) ([]*InodeChange, error) {
- args := []string{"diff", "-FH", snapshot, d.Name}[:]
- out, err := zfs(args...)
+ args := []string{"diff", "-FH", snapshot, d.Name}
+ out, err := zfsOutput(args...)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/mistifyio/go-zfs/zpool.go b/vendor/github.com/mistifyio/go-zfs/v3/zpool.go
similarity index 71%
rename from vendor/github.com/mistifyio/go-zfs/zpool.go
rename to vendor/github.com/mistifyio/go-zfs/v3/zpool.go
index d8db945d708..2f7071305f3 100644
--- a/vendor/github.com/mistifyio/go-zfs/zpool.go
+++ b/vendor/github.com/mistifyio/go-zfs/v3/zpool.go
@@ -1,8 +1,9 @@
package zfs
-// ZFS zpool states, which can indicate if a pool is online, offline,
-// degraded, etc. More information regarding zpool states can be found here:
-// https://docs.oracle.com/cd/E19253-01/819-5461/gamno/index.html.
+// ZFS zpool states, which can indicate if a pool is online, offline, degraded, etc.
+//
+// More information regarding zpool states can be found in the ZFS manual:
+// https://openzfs.github.io/openzfs-docs/man/7/zpoolconcepts.7.html#Device_Failure_and_Recovery
const (
ZpoolOnline = "ONLINE"
ZpoolDegraded = "DEGRADED"
@@ -12,8 +13,8 @@ const (
ZpoolRemoved = "REMOVED"
)
-// Zpool is a ZFS zpool. A pool is a top-level structure in ZFS, and can
-// contain many descendent datasets.
+// Zpool is a ZFS zpool.
+// A pool is a top-level structure in ZFS, and can contain many descendent datasets.
type Zpool struct {
Name string
Health string
@@ -27,8 +28,14 @@ type Zpool struct {
DedupRatio float64
}
+// zpool is a helper function to wrap typical calls to zpool and ignores stdout.
+func zpool(arg ...string) error {
+ _, err := zpoolOutput(arg...)
+ return err
+}
+
// zpool is a helper function to wrap typical calls to zpool.
-func zpool(arg ...string) ([][]string, error) {
+func zpoolOutput(arg ...string) ([][]string, error) {
c := command{Command: "zpool"}
return c.Run(arg...)
}
@@ -37,7 +44,7 @@ func zpool(arg ...string) ([][]string, error) {
func GetZpool(name string) (*Zpool, error) {
args := zpoolArgs
args = append(args, name)
- out, err := zpool(args...)
+ out, err := zpoolOutput(args...)
if err != nil {
return nil, err
}
@@ -65,10 +72,11 @@ func (z *Zpool) Snapshots() ([]*Dataset, error) {
return Snapshots(z.Name)
}
-// CreateZpool creates a new ZFS zpool with the specified name, properties,
-// and optional arguments.
-// A full list of available ZFS properties and command-line arguments may be
-// found here: https://www.freebsd.org/cgi/man.cgi?zfs(8).
+// CreateZpool creates a new ZFS zpool with the specified name, properties, and optional arguments.
+//
+// A full list of available ZFS properties and command-line arguments may be found in the ZFS manual:
+// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html.
+// https://openzfs.github.io/openzfs-docs/man/8/zpool-create.8.html
func CreateZpool(name string, properties map[string]string, args ...string) (*Zpool, error) {
cli := make([]string, 1, 4)
cli[0] = "create"
@@ -77,8 +85,7 @@ func CreateZpool(name string, properties map[string]string, args ...string) (*Zp
}
cli = append(cli, name)
cli = append(cli, args...)
- _, err := zpool(cli...)
- if err != nil {
+ if err := zpool(cli...); err != nil {
return nil, err
}
@@ -87,14 +94,14 @@ func CreateZpool(name string, properties map[string]string, args ...string) (*Zp
// Destroy destroys a ZFS zpool by name.
func (z *Zpool) Destroy() error {
- _, err := zpool("destroy", z.Name)
+ err := zpool("destroy", z.Name)
return err
}
// ListZpools list all ZFS zpools accessible on the current system.
func ListZpools() ([]*Zpool, error) {
args := []string{"list", "-Ho", "name"}
- out, err := zpool(args...)
+ out, err := zpoolOutput(args...)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/moby/sys/mountinfo/go.mod b/vendor/github.com/moby/sys/mountinfo/go.mod
index 9749ea96d9d..e1bcdfe79f2 100644
--- a/vendor/github.com/moby/sys/mountinfo/go.mod
+++ b/vendor/github.com/moby/sys/mountinfo/go.mod
@@ -1,5 +1,5 @@
module github.com/moby/sys/mountinfo
-go 1.14
+go 1.16
-require golang.org/x/sys v0.0.0-20200909081042-eff7692f9009
+require golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
diff --git a/vendor/github.com/moby/sys/mountinfo/go.sum b/vendor/github.com/moby/sys/mountinfo/go.sum
index 2a5be7ea843..af14a66ec22 100644
--- a/vendor/github.com/moby/sys/mountinfo/go.sum
+++ b/vendor/github.com/moby/sys/mountinfo/go.sum
@@ -1,2 +1,2 @@
-golang.org/x/sys v0.0.0-20200909081042-eff7692f9009 h1:W0lCpv29Hv0UaM1LXb9QlBHLNP8UFfcKjblhVCWftOM=
-golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go
index bc9f6b2ad3d..e78e726196e 100644
--- a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go
+++ b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go
@@ -7,6 +7,34 @@ import (
"golang.org/x/sys/unix"
)
+// MountedFast is a method of detecting a mount point without reading
+// mountinfo from procfs. A caller can only trust the result if no error
+// and sure == true are returned. Otherwise, other methods (e.g. parsing
+// /proc/mounts) have to be used. If unsure, use Mounted instead (which
+// uses MountedFast, but falls back to parsing mountinfo if needed).
+//
+// If a non-existent path is specified, an appropriate error is returned.
+// In case the caller is not interested in this particular error, it should
+// be handled separately using e.g. errors.Is(err, fs.ErrNotExist).
+//
+// This function is only available on Linux. When available (since kernel
+// v5.6), openat2(2) syscall is used to reliably detect all mounts. Otherwise,
+// the implementation falls back to using stat(2), which can reliably detect
+// normal (but not bind) mounts.
+func MountedFast(path string) (mounted, sure bool, err error) {
+ // Root is always mounted.
+ if path == string(os.PathSeparator) {
+ return true, true, nil
+ }
+
+ path, err = normalizePath(path)
+ if err != nil {
+ return false, false, err
+ }
+ mounted, sure, err = mountedFast(path)
+ return
+}
+
// mountedByOpenat2 is a method of detecting a mount that works for all kinds
// of mounts (incl. bind mounts), but requires a recent (v5.6+) linux kernel.
func mountedByOpenat2(path string) (bool, error) {
@@ -16,9 +44,6 @@ func mountedByOpenat2(path string) (bool, error) {
Flags: unix.O_PATH | unix.O_CLOEXEC,
})
if err != nil {
- if err == unix.ENOENT { // not a mount
- return false, nil
- }
return false, &os.PathError{Op: "openat2", Path: dir, Err: err}
}
fd, err := unix.Openat2(dirfd, last, &unix.OpenHow{
@@ -26,33 +51,51 @@ func mountedByOpenat2(path string) (bool, error) {
Resolve: unix.RESOLVE_NO_XDEV,
})
_ = unix.Close(dirfd)
- switch err {
+ switch err { //nolint:errorlint // unix errors are bare
case nil: // definitely not a mount
_ = unix.Close(fd)
return false, nil
case unix.EXDEV: // definitely a mount
return true, nil
- case unix.ENOENT: // not a mount
- return false, nil
}
// not sure
return false, &os.PathError{Op: "openat2", Path: path, Err: err}
}
-func mounted(path string) (bool, error) {
+// mountedFast is similar to MountedFast, except it expects a normalized path.
+func mountedFast(path string) (mounted, sure bool, err error) {
+ // Root is always mounted.
+ if path == string(os.PathSeparator) {
+ return true, true, nil
+ }
+
// Try a fast path, using openat2() with RESOLVE_NO_XDEV.
- mounted, err := mountedByOpenat2(path)
+ mounted, err = mountedByOpenat2(path)
if err == nil {
- return mounted, nil
+ return mounted, true, nil
}
+
// Another fast path: compare st.st_dev fields.
mounted, err = mountedByStat(path)
// This does not work for bind mounts, so false negative
// is possible, therefore only trust if return is true.
if mounted && err == nil {
+ return true, true, nil
+ }
+
+ return
+}
+
+func mounted(path string) (bool, error) {
+ path, err := normalizePath(path)
+ if err != nil {
+ return false, err
+ }
+ mounted, sure, err := mountedFast(path)
+ if sure && err == nil {
return mounted, nil
}
- // Fallback to parsing mountinfo
+ // Fallback to parsing mountinfo.
return mountedByMountinfo(path)
}
diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go
index efb03978b1e..c7b7678f9a0 100644
--- a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go
+++ b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go
@@ -1,10 +1,9 @@
-// +build linux freebsd,cgo openbsd,cgo
+//go:build linux || freebsd || openbsd || darwin
+// +build linux freebsd openbsd darwin
package mountinfo
import (
- "errors"
- "fmt"
"os"
"path/filepath"
@@ -15,10 +14,6 @@ func mountedByStat(path string) (bool, error) {
var st unix.Stat_t
if err := unix.Lstat(path, &st); err != nil {
- if err == unix.ENOENT {
- // Treat ENOENT as "not mounted".
- return false, nil
- }
return false, &os.PathError{Op: "stat", Path: path, Err: err}
}
dev := st.Dev
@@ -37,26 +32,18 @@ func mountedByStat(path string) (bool, error) {
func normalizePath(path string) (realPath string, err error) {
if realPath, err = filepath.Abs(path); err != nil {
- return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err)
+ return "", err
}
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
- return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err)
+ return "", err
}
if _, err := os.Stat(realPath); err != nil {
- return "", fmt.Errorf("failed to stat target of %q: %w", path, err)
+ return "", err
}
return realPath, nil
}
func mountedByMountinfo(path string) (bool, error) {
- path, err := normalizePath(path)
- if err != nil {
- if errors.Is(err, unix.ENOENT) {
- // treat ENOENT as "not mounted"
- return false, nil
- }
- return false, err
- }
entries, err := GetMounts(SingleEntryFilter(path))
if err != nil {
return false, err
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo.go b/vendor/github.com/moby/sys/mountinfo/mountinfo.go
index 403a893310b..574aeb8767a 100644
--- a/vendor/github.com/moby/sys/mountinfo/mountinfo.go
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo.go
@@ -10,11 +10,12 @@ func GetMounts(f FilterFunc) ([]*Info, error) {
return parseMountTable(f)
}
-// Mounted determines if a specified path is a mount point.
+// Mounted determines if a specified path is a mount point. In case of any
+// error, false (and an error) is returned.
//
-// The argument must be an absolute path, with all symlinks resolved, and clean.
-// One way to ensure it is to process the path using filepath.Abs followed by
-// filepath.EvalSymlinks before calling this function.
+// If a non-existent path is specified, an appropriate error is returned.
+// In case the caller is not interested in this particular error, it should
+// be handled separately using e.g. errors.Is(err, fs.ErrNotExist).
func Mounted(path string) (bool, error) {
// root is always mounted
if path == string(os.PathSeparator) {
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go
index b1c12d02b51..8420f58c7a9 100644
--- a/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go
@@ -1,52 +1,37 @@
-// +build freebsd,cgo openbsd,cgo
+//go:build freebsd || openbsd || darwin
+// +build freebsd openbsd darwin
package mountinfo
-/*
-#include
-#include
-#include
-*/
-import "C"
-
-import (
- "fmt"
- "reflect"
- "unsafe"
-)
+import "golang.org/x/sys/unix"
// parseMountTable returns information about mounted filesystems
func parseMountTable(filter FilterFunc) ([]*Info, error) {
- var rawEntries *C.struct_statfs
-
- count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
- if count == 0 {
- return nil, fmt.Errorf("Failed to call getmntinfo")
+ count, err := unix.Getfsstat(nil, unix.MNT_WAIT)
+ if err != nil {
+ return nil, err
}
- var entries []C.struct_statfs
- header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
- header.Cap = count
- header.Len = count
- header.Data = uintptr(unsafe.Pointer(rawEntries))
+ entries := make([]unix.Statfs_t, count)
+ _, err = unix.Getfsstat(entries, unix.MNT_WAIT)
+ if err != nil {
+ return nil, err
+ }
var out []*Info
for _, entry := range entries {
- var mountinfo Info
var skip, stop bool
- mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
- mountinfo.FSType = C.GoString(&entry.f_fstypename[0])
- mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
+ mountinfo := getMountinfo(&entry)
if filter != nil {
// filter out entries we're not interested in
- skip, stop = filter(&mountinfo)
+ skip, stop = filter(mountinfo)
if skip {
continue
}
}
- out = append(out, &mountinfo)
+ out = append(out, mountinfo)
if stop {
break
}
@@ -55,6 +40,10 @@ func parseMountTable(filter FilterFunc) ([]*Info, error) {
}
func mounted(path string) (bool, error) {
+ path, err := normalizePath(path)
+ if err != nil {
+ return false, err
+ }
// Fast path: compare st.st_dev fields.
// This should always work for FreeBSD and OpenBSD.
mounted, err := mountedByStat(path)
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go
new file mode 100644
index 00000000000..ecaaa7a9c11
--- /dev/null
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go
@@ -0,0 +1,14 @@
+//go:build freebsd || darwin
+// +build freebsd darwin
+
+package mountinfo
+
+import "golang.org/x/sys/unix"
+
+func getMountinfo(entry *unix.Statfs_t) *Info {
+ return &Info{
+ Mountpoint: unix.ByteSliceToString(entry.Mntonname[:]),
+ FSType: unix.ByteSliceToString(entry.Fstypename[:]),
+ Source: unix.ByteSliceToString(entry.Mntfromname[:]),
+ }
+}
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go
index f09a70fa0d9..59332b07bf4 100644
--- a/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go
@@ -52,7 +52,7 @@ func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) {
numFields := len(fields)
if numFields < 10 {
// should be at least 10 fields
- return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields)
+ return nil, fmt.Errorf("parsing '%s' failed: not enough fields (%d)", text, numFields)
}
// separator field
@@ -67,7 +67,7 @@ func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) {
for fields[sepIdx] != "-" {
sepIdx--
if sepIdx == 5 {
- return nil, fmt.Errorf("Parsing '%s' failed: missing - separator", text)
+ return nil, fmt.Errorf("parsing '%s' failed: missing - separator", text)
}
}
@@ -75,46 +75,39 @@ func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) {
p.Mountpoint, err = unescape(fields[4])
if err != nil {
- return nil, fmt.Errorf("Parsing '%s' failed: mount point: %w", fields[4], err)
+ return nil, fmt.Errorf("parsing '%s' failed: mount point: %w", fields[4], err)
}
p.FSType, err = unescape(fields[sepIdx+1])
if err != nil {
- return nil, fmt.Errorf("Parsing '%s' failed: fstype: %w", fields[sepIdx+1], err)
+ return nil, fmt.Errorf("parsing '%s' failed: fstype: %w", fields[sepIdx+1], err)
}
p.Source, err = unescape(fields[sepIdx+2])
if err != nil {
- return nil, fmt.Errorf("Parsing '%s' failed: source: %w", fields[sepIdx+2], err)
+ return nil, fmt.Errorf("parsing '%s' failed: source: %w", fields[sepIdx+2], err)
}
p.VFSOptions = fields[sepIdx+3]
// ignore any numbers parsing errors, as there should not be any
p.ID, _ = strconv.Atoi(fields[0])
p.Parent, _ = strconv.Atoi(fields[1])
- mm := strings.Split(fields[2], ":")
+ mm := strings.SplitN(fields[2], ":", 3)
if len(mm) != 2 {
- return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm)
+ return nil, fmt.Errorf("parsing '%s' failed: unexpected major:minor pair %s", text, mm)
}
p.Major, _ = strconv.Atoi(mm[0])
p.Minor, _ = strconv.Atoi(mm[1])
p.Root, err = unescape(fields[3])
if err != nil {
- return nil, fmt.Errorf("Parsing '%s' failed: root: %w", fields[3], err)
+ return nil, fmt.Errorf("parsing '%s' failed: root: %w", fields[3], err)
}
p.Options = fields[5]
// zero or more optional fields
- switch {
- case sepIdx == 6:
- // zero, do nothing
- case sepIdx == 7:
- p.Optional = fields[6]
- default:
- p.Optional = strings.Join(fields[6:sepIdx-1], " ")
- }
+ p.Optional = strings.Join(fields[6:sepIdx], " ")
- // Run the filter after parsing all of the fields.
+ // Run the filter after parsing all fields.
var skip, stop bool
if filter != nil {
skip, stop = filter(p)
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go
new file mode 100644
index 00000000000..f682c2d3b59
--- /dev/null
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go
@@ -0,0 +1,11 @@
+package mountinfo
+
+import "golang.org/x/sys/unix"
+
+func getMountinfo(entry *unix.Statfs_t) *Info {
+ return &Info{
+ Mountpoint: unix.ByteSliceToString(entry.F_mntonname[:]),
+ FSType: unix.ByteSliceToString(entry.F_fstypename[:]),
+ Source: unix.ByteSliceToString(entry.F_mntfromname[:]),
+ }
+}
diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go
index d33ebca0968..c2e64bc81c7 100644
--- a/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go
+++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go
@@ -1,4 +1,5 @@
-// +build !windows,!linux,!freebsd,!openbsd freebsd,!cgo openbsd,!cgo
+//go:build !windows && !linux && !freebsd && !openbsd && !darwin
+// +build !windows,!linux,!freebsd,!openbsd,!darwin
package mountinfo
diff --git a/vendor/github.com/modern-go/reflect2/.travis.yml b/vendor/github.com/modern-go/reflect2/.travis.yml
index fbb43744d94..b097728dbff 100644
--- a/vendor/github.com/modern-go/reflect2/.travis.yml
+++ b/vendor/github.com/modern-go/reflect2/.travis.yml
@@ -1,7 +1,7 @@
language: go
go:
- - 1.8.x
+ - 1.9.x
- 1.x
before_install:
diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.lock b/vendor/github.com/modern-go/reflect2/Gopkg.lock
index 2a3a69893b5..10ef811182d 100644
--- a/vendor/github.com/modern-go/reflect2/Gopkg.lock
+++ b/vendor/github.com/modern-go/reflect2/Gopkg.lock
@@ -1,15 +1,9 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-[[projects]]
- name = "github.com/modern-go/concurrent"
- packages = ["."]
- revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
- version = "1.0.0"
-
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
- inputs-digest = "daee8a88b3498b61c5640056665b8b9eea062006f5e596bbb6a3ed9119a11ec7"
+ input-imports = []
solver-name = "gps-cdcl"
solver-version = 1
diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.toml b/vendor/github.com/modern-go/reflect2/Gopkg.toml
index 2f4f4dbdcc5..a9bc5061b04 100644
--- a/vendor/github.com/modern-go/reflect2/Gopkg.toml
+++ b/vendor/github.com/modern-go/reflect2/Gopkg.toml
@@ -26,10 +26,6 @@
ignored = []
-[[constraint]]
- name = "github.com/modern-go/concurrent"
- version = "1.0.0"
-
[prune]
go-tests = true
unused-packages = true
diff --git a/vendor/github.com/modern-go/reflect2/go.mod b/vendor/github.com/modern-go/reflect2/go.mod
new file mode 100644
index 00000000000..9057e9b33f2
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/go.mod
@@ -0,0 +1,3 @@
+module github.com/modern-go/reflect2
+
+go 1.12
diff --git a/vendor/github.com/modern-go/reflect2/go_above_118.go b/vendor/github.com/modern-go/reflect2/go_above_118.go
new file mode 100644
index 00000000000..2b4116f6c9b
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/go_above_118.go
@@ -0,0 +1,23 @@
+//+build go1.18
+
+package reflect2
+
+import (
+ "unsafe"
+)
+
+// m escapes into the return value, but the caller of mapiterinit
+// doesn't let the return value escape.
+//go:noescape
+//go:linkname mapiterinit reflect.mapiterinit
+func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer, it *hiter)
+
+func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
+ var it hiter
+ mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj), &it)
+ return &UnsafeMapIterator{
+ hiter: &it,
+ pKeyRType: type2.pKeyRType,
+ pElemRType: type2.pElemRType,
+ }
+}
\ No newline at end of file
diff --git a/vendor/github.com/modern-go/reflect2/go_above_17.go b/vendor/github.com/modern-go/reflect2/go_above_17.go
deleted file mode 100644
index 5c1cea8683a..00000000000
--- a/vendor/github.com/modern-go/reflect2/go_above_17.go
+++ /dev/null
@@ -1,8 +0,0 @@
-//+build go1.7
-
-package reflect2
-
-import "unsafe"
-
-//go:linkname resolveTypeOff reflect.resolveTypeOff
-func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
diff --git a/vendor/github.com/modern-go/reflect2/go_above_19.go b/vendor/github.com/modern-go/reflect2/go_above_19.go
index c7e3b780116..974f7685e49 100644
--- a/vendor/github.com/modern-go/reflect2/go_above_19.go
+++ b/vendor/github.com/modern-go/reflect2/go_above_19.go
@@ -6,6 +6,9 @@ import (
"unsafe"
)
+//go:linkname resolveTypeOff reflect.resolveTypeOff
+func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
+
//go:linkname makemap reflect.makemap
func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer)
diff --git a/vendor/github.com/modern-go/reflect2/go_below_118.go b/vendor/github.com/modern-go/reflect2/go_below_118.go
new file mode 100644
index 00000000000..00003dbd7c5
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/go_below_118.go
@@ -0,0 +1,21 @@
+//+build !go1.18
+
+package reflect2
+
+import (
+ "unsafe"
+)
+
+// m escapes into the return value, but the caller of mapiterinit
+// doesn't let the return value escape.
+//go:noescape
+//go:linkname mapiterinit reflect.mapiterinit
+func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) (val *hiter)
+
+func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
+ return &UnsafeMapIterator{
+ hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)),
+ pKeyRType: type2.pKeyRType,
+ pElemRType: type2.pElemRType,
+ }
+}
\ No newline at end of file
diff --git a/vendor/github.com/modern-go/reflect2/go_below_17.go b/vendor/github.com/modern-go/reflect2/go_below_17.go
deleted file mode 100644
index 65a93c889b7..00000000000
--- a/vendor/github.com/modern-go/reflect2/go_below_17.go
+++ /dev/null
@@ -1,9 +0,0 @@
-//+build !go1.7
-
-package reflect2
-
-import "unsafe"
-
-func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
- return nil
-}
diff --git a/vendor/github.com/modern-go/reflect2/go_below_19.go b/vendor/github.com/modern-go/reflect2/go_below_19.go
deleted file mode 100644
index b050ef70cdd..00000000000
--- a/vendor/github.com/modern-go/reflect2/go_below_19.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//+build !go1.9
-
-package reflect2
-
-import (
- "unsafe"
-)
-
-//go:linkname makemap reflect.makemap
-func makemap(rtype unsafe.Pointer) (m unsafe.Pointer)
-
-func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer {
- return makemap(rtype)
-}
diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go
index 63b49c79919..c43c8b9d629 100644
--- a/vendor/github.com/modern-go/reflect2/reflect2.go
+++ b/vendor/github.com/modern-go/reflect2/reflect2.go
@@ -1,8 +1,9 @@
package reflect2
import (
- "github.com/modern-go/concurrent"
"reflect"
+ "runtime"
+ "sync"
"unsafe"
)
@@ -130,13 +131,13 @@ var ConfigSafe = Config{UseSafeImplementation: true}.Froze()
type frozenConfig struct {
useSafeImplementation bool
- cache *concurrent.Map
+ cache *sync.Map
}
func (cfg Config) Froze() *frozenConfig {
return &frozenConfig{
useSafeImplementation: cfg.UseSafeImplementation,
- cache: concurrent.NewMap(),
+ cache: new(sync.Map),
}
}
@@ -288,11 +289,12 @@ func NoEscape(p unsafe.Pointer) unsafe.Pointer {
}
func UnsafeCastString(str string) []byte {
+ bytes := make([]byte, 0)
stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str))
- sliceHeader := &reflect.SliceHeader{
- Data: stringHeader.Data,
- Cap: stringHeader.Len,
- Len: stringHeader.Len,
- }
- return *(*[]byte)(unsafe.Pointer(sliceHeader))
+ sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&bytes))
+ sliceHeader.Data = stringHeader.Data
+ sliceHeader.Cap = stringHeader.Len
+ sliceHeader.Len = stringHeader.Len
+ runtime.KeepAlive(str)
+ return bytes
}
diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh
deleted file mode 100644
index 3d2b9768ce6..00000000000
--- a/vendor/github.com/modern-go/reflect2/test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-echo "" > coverage.txt
-
-for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do
- go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d
- if [ -f profile.out ]; then
- cat profile.out >> coverage.txt
- rm profile.out
- fi
-done
diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go
index 3acfb55803a..4b13c3155c8 100644
--- a/vendor/github.com/modern-go/reflect2/type_map.go
+++ b/vendor/github.com/modern-go/reflect2/type_map.go
@@ -1,17 +1,13 @@
+// +build !gccgo
+
package reflect2
import (
"reflect"
- "runtime"
- "strings"
"sync"
"unsafe"
)
-// typelinks1 for 1.5 ~ 1.6
-//go:linkname typelinks1 reflect.typelinks
-func typelinks1() [][]unsafe.Pointer
-
// typelinks2 for 1.7 ~
//go:linkname typelinks2 reflect.typelinks
func typelinks2() (sections []unsafe.Pointer, offset [][]int32)
@@ -27,49 +23,10 @@ func discoverTypes() {
types = make(map[string]reflect.Type)
packages = make(map[string]map[string]reflect.Type)
- ver := runtime.Version()
- if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") {
- loadGo15Types()
- } else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") {
- loadGo15Types()
- } else {
- loadGo17Types()
- }
-}
-
-func loadGo15Types() {
- var obj interface{} = reflect.TypeOf(0)
- typePtrss := typelinks1()
- for _, typePtrs := range typePtrss {
- for _, typePtr := range typePtrs {
- (*emptyInterface)(unsafe.Pointer(&obj)).word = typePtr
- typ := obj.(reflect.Type)
- if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct {
- loadedType := typ.Elem()
- pkgTypes := packages[loadedType.PkgPath()]
- if pkgTypes == nil {
- pkgTypes = map[string]reflect.Type{}
- packages[loadedType.PkgPath()] = pkgTypes
- }
- types[loadedType.String()] = loadedType
- pkgTypes[loadedType.Name()] = loadedType
- }
- if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Ptr &&
- typ.Elem().Elem().Kind() == reflect.Struct {
- loadedType := typ.Elem().Elem()
- pkgTypes := packages[loadedType.PkgPath()]
- if pkgTypes == nil {
- pkgTypes = map[string]reflect.Type{}
- packages[loadedType.PkgPath()] = pkgTypes
- }
- types[loadedType.String()] = loadedType
- pkgTypes[loadedType.Name()] = loadedType
- }
- }
- }
+ loadGoTypes()
}
-func loadGo17Types() {
+func loadGoTypes() {
var obj interface{} = reflect.TypeOf(0)
sections, offset := typelinks2()
for i, offs := range offset {
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_link.go b/vendor/github.com/modern-go/reflect2/unsafe_link.go
index 57229c8db41..b49f614efc5 100644
--- a/vendor/github.com/modern-go/reflect2/unsafe_link.go
+++ b/vendor/github.com/modern-go/reflect2/unsafe_link.go
@@ -19,18 +19,12 @@ func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int
//go:linkname mapassign reflect.mapassign
//go:noescape
-func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key, val unsafe.Pointer)
+func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer, val unsafe.Pointer)
//go:linkname mapaccess reflect.mapaccess
//go:noescape
func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
-// m escapes into the return value, but the caller of mapiterinit
-// doesn't let the return value escape.
-//go:noescape
-//go:linkname mapiterinit reflect.mapiterinit
-func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) *hiter
-
//go:noescape
//go:linkname mapiternext reflect.mapiternext
func mapiternext(it *hiter)
@@ -42,9 +36,21 @@ func ifaceE2I(rtype unsafe.Pointer, src interface{}, dst unsafe.Pointer)
// If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
// the layout of this structure.
type hiter struct {
- key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go).
- value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
- // rest fields are ignored
+ key unsafe.Pointer
+ value unsafe.Pointer
+ t unsafe.Pointer
+ h unsafe.Pointer
+ buckets unsafe.Pointer
+ bptr unsafe.Pointer
+ overflow *[]unsafe.Pointer
+ oldoverflow *[]unsafe.Pointer
+ startBucket uintptr
+ offset uint8
+ wrapped bool
+ B uint8
+ i uint8
+ bucket uintptr
+ checkBucket uintptr
}
// add returns p+x.
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_map.go b/vendor/github.com/modern-go/reflect2/unsafe_map.go
index f2e76e6bb14..37872da8191 100644
--- a/vendor/github.com/modern-go/reflect2/unsafe_map.go
+++ b/vendor/github.com/modern-go/reflect2/unsafe_map.go
@@ -107,14 +107,6 @@ func (type2 *UnsafeMapType) Iterate(obj interface{}) MapIterator {
return type2.UnsafeIterate(objEFace.data)
}
-func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
- return &UnsafeMapIterator{
- hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)),
- pKeyRType: type2.pKeyRType,
- pElemRType: type2.pElemRType,
- }
-}
-
type UnsafeMapIterator struct {
*hiter
pKeyRType unsafe.Pointer
diff --git a/vendor/github.com/mtrmac/gpgme/.appveyor.yml b/vendor/github.com/mtrmac/gpgme/.appveyor.yml
deleted file mode 100644
index 2fdc09ab5c0..00000000000
--- a/vendor/github.com/mtrmac/gpgme/.appveyor.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-version: 0.{build}
-platform: x86
-branches:
- only:
- - master
-
-clone_folder: c:\gopath\src\github.com\proglottis\gpgme
-
-environment:
- GOPATH: c:\gopath
- GOROOT: C:\go-x86
- CGO_LDFLAGS: -LC:\gpg\lib
- CGO_CFLAGS: -IC:\gpg\include
- GPG_DIR: C:\gpg
-
-install:
- - nuget install 7ZipCLI -ExcludeVersion
- - set PATH=%appveyor_build_folder%\7ZipCLI\tools;%PATH%
- - appveyor DownloadFile https://www.gnupg.org/ftp/gcrypt/binary/gnupg-w32-2.1.20_20170403.exe -FileName gnupg-w32-2.1.20_20170403.exe
- - 7z x -o%GPG_DIR% gnupg-w32-2.1.20_20170403.exe
- - copy "%GPG_DIR%\lib\libgpg-error.imp" "%GPG_DIR%\lib\libgpg-error.a"
- - copy "%GPG_DIR%\lib\libassuan.imp" "%GPG_DIR%\lib\libassuan.a"
- - copy "%GPG_DIR%\lib\libgpgme.imp" "%GPG_DIR%\lib\libgpgme.a"
- - set PATH=%GOPATH%\bin;%GOROOT%\bin;%GPG_DIR%\bin;C:\MinGW\bin;%PATH%
- - C:\cygwin\bin\sed -i 's/"GPG_AGENT_INFO"/"GPG_AGENT_INFO="/;s/C.unsetenv(v)/C.putenv(v)/' %APPVEYOR_BUILD_FOLDER%\gpgme.go
-
-test_script:
- - go test -v github.com/proglottis/gpgme
-
-
-build_script:
- - go build -o example_decrypt.exe -i %APPVEYOR_BUILD_FOLDER%\examples\decrypt.go
- - go build -o example_encrypt.exe -i %APPVEYOR_BUILD_FOLDER%\examples\encrypt.go
-
-artifacts:
- - path: example_decrypt.exe
- name: decrypt example binary
- - path: example_encrypt.exe
- name: encrypt example binary
\ No newline at end of file
diff --git a/vendor/github.com/mtrmac/gpgme/.travis.yml b/vendor/github.com/mtrmac/gpgme/.travis.yml
deleted file mode 100644
index 619e3372131..00000000000
--- a/vendor/github.com/mtrmac/gpgme/.travis.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-language: go
-os:
- - linux
- - osx
- - windows
-dist: xenial
-sudo: false
-
-go:
- - 1.11
- - 1.12
- - 1.13
-
-addons:
- apt:
- packages:
- - libgpgme11-dev
- homebrew:
- packages:
- - gnupg
- - gnupg@1.4
- - gpgme
- update: true
-
-matrix:
- allow_failures:
- - os: windows
-
-before_install:
- - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then choco install msys2; fi
- - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then choco install gpg4win; fi
diff --git a/vendor/github.com/mtrmac/gpgme/go.mod b/vendor/github.com/mtrmac/gpgme/go.mod
deleted file mode 100644
index 3dd09c9fbae..00000000000
--- a/vendor/github.com/mtrmac/gpgme/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/mtrmac/gpgme
-
-go 1.11
diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
index 494abdbfbd7..a26bc530f1c 100644
--- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md
+++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
@@ -1,3 +1,10 @@
+## 1.16.5
+
+Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC.
+1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess
+
+You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc`
+
## 1.16.4
### Fixes
diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md
index 05321e6eafc..a25ca5e03a4 100644
--- a/vendor/github.com/onsi/ginkgo/README.md
+++ b/vendor/github.com/onsi/ginkgo/README.md
@@ -1,23 +1,18 @@

-[](https://travis-ci.org/onsi/ginkgo)
[](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster)
Jump to the [docs](https://onsi.github.io/ginkgo/) | [中文文档](https://ke-chain.github.io/ginkgodoc) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
If you have a question, comment, bug report, feature request, etc. please open a GitHub issue, or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW).
-# Ginkgo 2.0 is coming soon!
+# Ginkgo 2.0 Release Candidate is available!
-An effort is underway to develop and deliver Ginkgo 2.0. The work is happening in the [v2](https://github.com/onsi/ginkgo/tree/v2) branch and a changelog and migration guide is being maintained on that branch [here](https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md). Issue [#711](https://github.com/onsi/ginkgo/issues/711) is the central place for discussion and links to the original [proposal doc](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#).
+An effort is underway to develop and deliver Ginkgo 2.0. The work is happening in the [ver2](https://github.com/onsi/ginkgo/tree/ver2) branch and a changelog and migration guide is being maintained on that branch [here](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md). Issue [#711](https://github.com/onsi/ginkgo/issues/711) is the central place for discussion.
-As described in the [changelog](https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md) and [proposal](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#), Ginkgo 2.0 will clean up the Ginkgo codebase, deprecate and remove some v1 functionality, and add several new much-requested features. To help users get ready for the migration, Ginkgo v1 has started emitting deprecation warnings for features that will no longer be supported with links to documentation for how to migrate away from these features. If you have concerns or comments please chime in on [#711](https://github.com/onsi/ginkgo/issues/711).
+As described in the [changelog](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md) and [proposal](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#), Ginkgo 2.0 will clean up the Ginkgo codebase, deprecate and remove some v1 functionality, and add several new much-requested features. To help users get ready for the migration, Ginkgo v1 has started emitting deprecation warnings for features that will no longer be supported with links to documentation for how to migrate away from these features. If you have concerns or comments please chime in on [#711](https://github.com/onsi/ginkgo/issues/711).
-The current timeline for completion of 2.0 looks like:
-
-- Early April 2021: first public release of 2.0, deprecation warnings land in v1.
-- May 2021: first beta/rc of 2.0 with most new functionality in place.
-- June/July 2021: 2.0 ships and fully replaces the 1.x codebase on master.
+Please start exploring and using the V2 release! To get started follow the [Using the Release Candidate](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta) directions in the migration guide.
## TLDR
Ginkgo builds on Go's `testing` package, allowing expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style tests.
diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go
index 5f3f43969b8..3130c778974 100644
--- a/vendor/github.com/onsi/ginkgo/config/config.go
+++ b/vendor/github.com/onsi/ginkgo/config/config.go
@@ -20,7 +20,7 @@ import (
"fmt"
)
-const VERSION = "1.16.4"
+const VERSION = "1.16.5"
type GinkgoConfigType struct {
RandomSeed int64
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
index 4a6e1e1ee78..ccd7685e38e 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
@@ -73,9 +73,15 @@ func GinkgoRandomSeed() int64 {
return config.GinkgoConfig.RandomSeed
}
-//GinkgoParallelNode returns the parallel node number for the current ginkgo process
-//The node number is 1-indexed
+//GinkgoParallelNode is deprecated, use GinkgoParallelProcess instead
func GinkgoParallelNode() int {
+ deprecationTracker.TrackDeprecation(types.Deprecations.ParallelNode(), codelocation.New(1))
+ return GinkgoParallelProcess()
+}
+
+//GinkgoParallelProcess returns the parallel process number for the current ginkgo process
+//The process number is 1-indexed
+func GinkgoParallelProcess() int {
return config.GinkgoConfig.ParallelNode
}
@@ -109,6 +115,7 @@ func GinkgoT(optionalOffset ...int) GinkgoTInterface {
//in the testing package's T.
type GinkgoTInterface interface {
Cleanup(func())
+ Setenv(key, value string)
Error(args ...interface{})
Errorf(format string, args ...interface{})
Fail()
diff --git a/vendor/github.com/onsi/ginkgo/go.mod b/vendor/github.com/onsi/ginkgo/go.mod
index 86a5a97be17..1711443266b 100644
--- a/vendor/github.com/onsi/ginkgo/go.mod
+++ b/vendor/github.com/onsi/ginkgo/go.mod
@@ -1,6 +1,6 @@
module github.com/onsi/ginkgo
-go 1.15
+go 1.16
require (
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0
diff --git a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
index d7bbb7a96bc..4dcfaf4cd88 100644
--- a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
+++ b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
@@ -34,6 +34,11 @@ func (t *ginkgoTestingTProxy) Cleanup(func()) {
// No-op
}
+func (t *ginkgoTestingTProxy) Setenv(kev, value string) {
+ fmt.Println("Setenv is a noop for Ginkgo at the moment but will be implemented in V2")
+ // No-op until Cleanup is implemented
+}
+
func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
t.fail(fmt.Sprintln(args...), t.offset)
}
diff --git a/vendor/github.com/onsi/ginkgo/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/types/deprecation_support.go
index 305c134b787..d5a6658f35f 100644
--- a/vendor/github.com/onsi/ginkgo/types/deprecation_support.go
+++ b/vendor/github.com/onsi/ginkgo/types/deprecation_support.go
@@ -52,6 +52,14 @@ func (d deprecations) Measure() Deprecation {
}
}
+func (d deprecations) ParallelNode() Deprecation {
+ return Deprecation{
+ Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.",
+ DocLink: "renamed-ginkgoparallelnode",
+ Version: "1.16.5",
+ }
+}
+
func (d deprecations) Convert() Deprecation {
return Deprecation{
Message: "The convert command is deprecated in Ginkgo V2",
@@ -99,16 +107,18 @@ func (d *DeprecationTracker) DidTrackDeprecations() bool {
}
func (d *DeprecationTracker) DeprecationsReport() string {
- out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n")
+ out := formatter.F("\n{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n")
out += formatter.F("{{light-yellow}}============================================={{/}}\n")
- out += formatter.F("Ginkgo 2.0 is under active development and will introduce (a small number of) breaking changes.\n")
- out += formatter.F("To learn more, view the migration guide at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md{{/}}\n")
- out += formatter.F("To comment, chime in at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}\n\n")
+ out += formatter.F("{{bold}}{{green}}Ginkgo 2.0{{/}} is under active development and will introduce several new features, improvements, and a small handful of breaking changes.\n")
+ out += formatter.F("A release candidate for 2.0 is now available and 2.0 should GA in Fall 2021. {{bold}}Please give the RC a try and send us feedback!{{/}}\n")
+ out += formatter.F(" - To learn more, view the migration guide at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md{{/}}\n")
+ out += formatter.F(" - For instructions on using the Release Candidate visit {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta{{/}}\n")
+ out += formatter.F(" - To comment, chime in at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}\n\n")
for deprecation, locations := range d.deprecations {
out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n")
if deprecation.DocLink != "" {
- out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md#%s{{/}}\n", deprecation.DocLink)
+ out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#%s{{/}}\n", deprecation.DocLink)
}
for _, location := range locations {
out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location)
diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml
deleted file mode 100644
index 6543dc5539e..00000000000
--- a/vendor/github.com/onsi/gomega/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: go
-arch:
- - amd64
- - ppc64le
-
-go:
- - gotip
- - 1.16.x
- - 1.15.x
-
-env:
- - GO111MODULE=on
-
-install: skip
-
-script:
- - go mod tidy && git diff --exit-code go.mod go.sum
- - make test
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index 18190e8b914..4375bbc6461 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,53 @@
+## 1.19.0
+
+## Features
+- New [`HaveEach`](https://onsi.github.io/gomega/#haveeachelement-interface) matcher to ensure that each and every element in an `array`, `slice`, or `map` satisfies the passed in matcher. (#523) [9fc2ae2] (#524) [c8ba582]
+- Users can now wrap the `Gomega` interface to implement custom behavior on each assertion. (#521) [1f2e714]
+- [`ContainElement`](https://onsi.github.io/gomega/#containelementelement-interface) now accepts an additional pointer argument. Elements that satisfy the matcher are stored in the pointer enabling developers to easily add subsequent, more detailed, assertions against the matching element. (#527) [1a4e27f]
+
+## Fixes
+- update RELEASING instructions to match ginkgo [0917cde]
+- Bump github.com/onsi/ginkgo/v2 from 2.0.0 to 2.1.3 (#519) [49ab4b0]
+- Fix CVE-2021-38561 (#534) [f1b4456]
+- Fix max number of samples in experiments on non-64-bit systems. (#528) [1c84497]
+- Remove dependency on ginkgo v1.16.4 (#530) [4dea8d5]
+- Fix for Go 1.18 (#532) [56d2a29]
+- Document precendence of timeouts (#533) [b607941]
+
+## 1.18.1
+
+## Fixes
+- Add pointer support to HaveField matcher (#495) [79e41a3]
+
+## 1.18.0
+
+## Features
+- Docs now live on the master branch in the docs folder which will make for easier PRs. The docs also use Ginkgo 2.0's new docs html/css/js. [2570272]
+- New HaveValue matcher can handle actuals that are either values (in which case they are passed on unscathed) or pointers (in which case they are indirected). [Docs here.](https://onsi.github.io/gomega/#working-with-values) (#485) [bdc087c]
+- Gmeasure has been declared GA [360db9d]
+
+## Fixes
+- Gomega now uses ioutil for Go 1.15 and lower (#492) - official support is only for the most recent two major versions of Go but this will unblock users who need to stay on older unsupported versions of Go. [c29c1c0]
+
+## Maintenace
+- Remove Travis workflow (#491) [72e6040]
+- Upgrade to Ginkgo 2.0.0 GA [f383637]
+- chore: fix description of HaveField matcher (#487) [2b4b2c0]
+- use tools.go to ensure Ginkgo cli dependencies are included [f58a52b]
+- remove dockerfile and simplify github actions to match ginkgo's actions [3f8160d]
+
+## 1.17.0
+
+### Features
+- Add HaveField matcher [3a26311]
+- add Error() assertions on the final error value of multi-return values (#480) [2f96943]
+- separate out offsets and timeouts (#478) [18a4723]
+- fix transformation error reporting (#479) [e001fab]
+- allow transform functions to report errors (#472) [bf93408]
+
+### Fixes
+Stop using deprecated ioutil package (#467) [07f405d]
+
## 1.16.0
### Features
diff --git a/vendor/github.com/onsi/gomega/Makefile b/vendor/github.com/onsi/gomega/Makefile
deleted file mode 100644
index 1c6d107e175..00000000000
--- a/vendor/github.com/onsi/gomega/Makefile
+++ /dev/null
@@ -1,33 +0,0 @@
-###### Help ###################################################################
-
-.DEFAULT_GOAL = help
-
-.PHONY: help
-
-help: ## list Makefile targets
- @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
-
-###### Targets ################################################################
-
-test: version download fmt vet ginkgo ## Runs all build, static analysis, and test steps
-
-download: ## Download dependencies
- go mod download
-
-vet: ## Run static code analysis
- go vet ./...
-
-ginkgo: ## Run tests using Ginkgo
- go run github.com/onsi/ginkgo/ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race
-
-fmt: ## Checks that the code is formatted correcty
- @@if [ -n "$$(gofmt -s -e -l -d .)" ]; then \
- echo "gofmt check failed: run 'gofmt -s -e -l -w .'"; \
- exit 1; \
- fi
-
-docker_test: ## Run tests in a container via docker-compose
- docker-compose build test && docker-compose run --rm test make test
-
-version: ## Display the version of Go
- @@go version
diff --git a/vendor/github.com/onsi/gomega/RELEASING.md b/vendor/github.com/onsi/gomega/RELEASING.md
index 998d64ee75b..2d30d9992f3 100644
--- a/vendor/github.com/onsi/gomega/RELEASING.md
+++ b/vendor/github.com/onsi/gomega/RELEASING.md
@@ -7,6 +7,11 @@ A Gomega release is a tagged sha and a GitHub release. To cut a release:
- New Features (minor version)
- Fixes (fix version)
- Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
-2. Update GOMEGA_VERSION in `gomega_dsl.go`
-3. Push a commit with the version number as the commit message (e.g. `v1.3.0`)
-4. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes.
+1. Update GOMEGA_VERSION in `gomega_dsl.go`
+1. Commit, push, and release:
+ ```
+ git commit -m "vM.m.p"
+ git push
+ gh release create "vM.m.p"
+ git fetch --tags origin master
+ ```
\ No newline at end of file
diff --git a/vendor/github.com/onsi/gomega/docker-compose.yaml b/vendor/github.com/onsi/gomega/docker-compose.yaml
deleted file mode 100644
index f37496143d5..00000000000
--- a/vendor/github.com/onsi/gomega/docker-compose.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-version: '3.0'
-
-services:
- test:
- build:
- dockerfile: Dockerfile
- context: .
- working_dir: /app
- volumes:
- - ${PWD}:/app
diff --git a/vendor/github.com/onsi/gomega/gexec/build.go b/vendor/github.com/onsi/gomega/gexec/build.go
index 576fc8ee4b0..0ddb2107691 100644
--- a/vendor/github.com/onsi/gomega/gexec/build.go
+++ b/vendor/github.com/onsi/gomega/gexec/build.go
@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"go/build"
- "io/ioutil"
"os"
"os/exec"
"path"
@@ -14,6 +13,8 @@ import (
"runtime"
"strings"
"sync"
+
+ "github.com/onsi/gomega/internal/gutil"
)
var (
@@ -82,11 +83,11 @@ func CompileTest(packagePath string, args ...string) (compiledPath string, err e
GetAndCompileTest is identical to CompileTest but `go get` the package before compiling tests.
*/
func GetAndCompileTest(packagePath string, args ...string) (compiledPath string, err error) {
- if err := getForTest(build.Default.GOPATH, packagePath, nil); err != nil {
+ if err := getForTest(build.Default.GOPATH, packagePath, []string{"GO111MODULE=off"}); err != nil {
return "", err
}
- return doCompileTest(build.Default.GOPATH, packagePath, nil, args...)
+ return doCompileTest(build.Default.GOPATH, packagePath, []string{"GO111MODULE=off"}, args...)
}
/*
@@ -100,11 +101,11 @@ func CompileTestWithEnvironment(packagePath string, env []string, args ...string
GetAndCompileTestWithEnvironment is identical to GetAndCompileTest but allows you to specify env vars to be set at build time.
*/
func GetAndCompileTestWithEnvironment(packagePath string, env []string, args ...string) (compiledPath string, err error) {
- if err := getForTest(build.Default.GOPATH, packagePath, env); err != nil {
+ if err := getForTest(build.Default.GOPATH, packagePath, append(env, "GO111MODULE=off")); err != nil {
return "", err
}
- return doCompileTest(build.Default.GOPATH, packagePath, env, args...)
+ return doCompileTest(build.Default.GOPATH, packagePath, append(env, "GO111MODULE=off"), args...)
}
/*
@@ -118,11 +119,11 @@ func CompileTestIn(gopath string, packagePath string, args ...string) (compiledP
GetAndCompileTestIn is identical to GetAndCompileTest but allows you to specify a custom $GOPATH (the first argument).
*/
func GetAndCompileTestIn(gopath string, packagePath string, args ...string) (compiledPath string, err error) {
- if err := getForTest(gopath, packagePath, nil); err != nil {
+ if err := getForTest(gopath, packagePath, []string{"GO111MODULE=off"}); err != nil {
return "", err
}
- return doCompileTest(gopath, packagePath, nil, args...)
+ return doCompileTest(gopath, packagePath, []string{"GO111MODULE=off"}, args...)
}
func isLocalPackage(packagePath string) bool {
@@ -222,11 +223,11 @@ func temporaryDirectory() (string, error) {
mu.Lock()
defer mu.Unlock()
if tmpDir == "" {
- tmpDir, err = ioutil.TempDir("", "gexec_artifacts")
+ tmpDir, err = gutil.MkdirTemp("", "gexec_artifacts")
if err != nil {
return "", err
}
}
- return ioutil.TempDir(tmpDir, "g")
+ return gutil.MkdirTemp(tmpDir, "g")
}
diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod
index 7fea4ac07a8..276ffb16762 100644
--- a/vendor/github.com/onsi/gomega/go.mod
+++ b/vendor/github.com/onsi/gomega/go.mod
@@ -1,10 +1,16 @@
module github.com/onsi/gomega
-go 1.16
+go 1.18
require (
github.com/golang/protobuf v1.5.2
- github.com/onsi/ginkgo v1.16.4
- golang.org/x/net v0.0.0-20210428140749-89ef3d95e781
+ github.com/onsi/ginkgo/v2 v2.1.3
+ golang.org/x/net v0.0.0-20220225172249-27dd8689420f
gopkg.in/yaml.v2 v2.4.0
)
+
+require (
+ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect
+ golang.org/x/text v0.3.7 // indirect
+ google.golang.org/protobuf v1.26.0 // indirect
+)
diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum
index 56f1b44e227..256e91607bf 100644
--- a/vendor/github.com/onsi/gomega/go.sum
+++ b/vendor/github.com/onsi/gomega/go.sum
@@ -1,10 +1,10 @@
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
@@ -21,41 +21,37 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -63,21 +59,22 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -95,9 +92,7 @@ google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/l
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index 84775142c83..dcb7e8879d3 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -22,7 +22,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.16.0"
+const GOMEGA_VERSION = "1.19.0"
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
@@ -52,7 +52,7 @@ var Default = Gomega(internal.NewGomega(internal.FetchDefaultDurationBundle()))
// rich ecosystem of matchers without causing a test to fail. For example, to aggregate a series of potential failures
// or for use in a non-test setting.
func NewGomega(fail types.GomegaFailHandler) Gomega {
- return internal.NewGomega(Default.(*internal.Gomega).DurationBundle).ConfigureWithFailHandler(fail)
+ return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithFailHandler(fail)
}
// WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage
@@ -69,6 +69,20 @@ type WithT = internal.Gomega
// GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter.
type GomegaWithT = WithT
+// inner is an interface that allows users to provide a wrapper around Default. The wrapper
+// must implement the inner interface and return either the original Default or the result of
+// a call to NewGomega().
+type inner interface {
+ Inner() Gomega
+}
+
+func internalGomega(g Gomega) *internal.Gomega {
+ if v, ok := g.(inner); ok {
+ return v.Inner().(*internal.Gomega)
+ }
+ return g.(*internal.Gomega)
+}
+
// NewWithT takes a *testing.T and returngs a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with
// Gomega's rich ecosystem of matchers in standard `testing` test suits.
//
@@ -79,7 +93,7 @@ type GomegaWithT = WithT
// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
// }
func NewWithT(t types.GomegaTestingT) *WithT {
- return internal.NewGomega(Default.(*internal.Gomega).DurationBundle).ConfigureWithT(t)
+ return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithT(t)
}
// NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter.
@@ -88,20 +102,20 @@ var NewGomegaWithT = NewWithT
// RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails
// the fail handler passed into RegisterFailHandler is called.
func RegisterFailHandler(fail types.GomegaFailHandler) {
- Default.(*internal.Gomega).ConfigureWithFailHandler(fail)
+ internalGomega(Default).ConfigureWithFailHandler(fail)
}
// RegisterFailHandlerWithT is deprecated and will be removed in a future release.
// users should use RegisterFailHandler, or RegisterTestingT
func RegisterFailHandlerWithT(_ types.GomegaTestingT, fail types.GomegaFailHandler) {
fmt.Println("RegisterFailHandlerWithT is deprecated. Please use RegisterFailHandler or RegisterTestingT instead.")
- Default.(*internal.Gomega).ConfigureWithFailHandler(fail)
+ internalGomega(Default).ConfigureWithFailHandler(fail)
}
// RegisterTestingT connects Gomega to Golang's XUnit style
// Testing.T tests. It is now deprecated and you should use NewWithT() instead to get a fresh instance of Gomega for each test.
func RegisterTestingT(t types.GomegaTestingT) {
- Default.(*internal.Gomega).ConfigureWithT(t)
+ internalGomega(Default).ConfigureWithT(t)
}
// InterceptGomegaFailures runs a given callback and returns an array of
@@ -112,13 +126,13 @@ func RegisterTestingT(t types.GomegaTestingT) {
// This is most useful when testing custom matchers, but can also be used to check
// on a value using a Gomega assertion without causing a test failure.
func InterceptGomegaFailures(f func()) []string {
- originalHandler := Default.(*internal.Gomega).Fail
+ originalHandler := internalGomega(Default).Fail
failures := []string{}
- Default.(*internal.Gomega).Fail = func(message string, callerSkip ...int) {
+ internalGomega(Default).Fail = func(message string, callerSkip ...int) {
failures = append(failures, message)
}
defer func() {
- Default.(*internal.Gomega).Fail = originalHandler
+ internalGomega(Default).Fail = originalHandler
}()
f()
return failures
@@ -131,14 +145,14 @@ func InterceptGomegaFailures(f func()) []string {
// does not register a failure with the FailHandler registered via RegisterFailHandler - it is up
// to the user to decide what to do with the returned error
func InterceptGomegaFailure(f func()) (err error) {
- originalHandler := Default.(*internal.Gomega).Fail
- Default.(*internal.Gomega).Fail = func(message string, callerSkip ...int) {
+ originalHandler := internalGomega(Default).Fail
+ internalGomega(Default).Fail = func(message string, callerSkip ...int) {
err = errors.New(message)
panic("stop execution")
}
defer func() {
- Default.(*internal.Gomega).Fail = originalHandler
+ internalGomega(Default).Fail = originalHandler
if e := recover(); e != nil {
if err == nil {
panic(e)
@@ -151,7 +165,7 @@ func InterceptGomegaFailure(f func()) (err error) {
}
func ensureDefaultGomegaIsConfigured() {
- if !Default.(*internal.Gomega).IsConfigured() {
+ if !internalGomega(Default).IsConfigured() {
panic(nilGomegaPanic)
}
}
@@ -204,7 +218,8 @@ func Expect(actual interface{}, extra ...interface{}) Assertion {
// ExpectWithOffset(1, "foo").To(Equal("foo"))
//
// Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
-// that is used to modify the call-stack offset when computing line numbers.
+// that is used to modify the call-stack offset when computing line numbers. It is
+// the same as `Expect(...).WithOffset`.
//
// This is most useful in helper functions that make assertions. If you want Gomega's
// error message to refer to the calling line in the test (as opposed to the line in the helper function)
@@ -300,6 +315,9 @@ For example:
}).Should(Succeed())
will rerun the function until all assertions pass.
+
+`Eventually` specifying a timeout interval (and an optional polling interval) are
+the same as `Eventually(...).WithTimeout` or `Eventually(...).WithTimeout(...).WithPolling`.
*/
func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
@@ -309,6 +327,12 @@ func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion {
// EventuallyWithOffset operates like Eventually but takes an additional
// initial argument to indicate an offset in the call stack. This is useful when building helper
// functions that contain matchers. To learn more, read about `ExpectWithOffset`.
+//
+// `EventuallyWithOffset` is the same as `Eventually(...).WithOffset`.
+//
+// `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are
+// the same as `Eventually(...).WithOffset(...).WithTimeout` or
+// `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`.
func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
return Default.EventuallyWithOffset(offset, actual, intervals...)
@@ -337,6 +361,9 @@ func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion {
// ConsistentlyWithOffset operates like Consistently but takes an additional
// initial argument to indicate an offset in the call stack. This is useful when building helper
// functions that contain matchers. To learn more, read about `ExpectWithOffset`.
+//
+// `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and
+// optional `WithTimeout` and `WithPolling`.
func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
return Default.ConsistentlyWithOffset(offset, actual, intervals...)
diff --git a/vendor/github.com/onsi/gomega/internal/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion.go
index 36b0e8345f0..b3c26889a10 100644
--- a/vendor/github.com/onsi/gomega/internal/assertion.go
+++ b/vendor/github.com/onsi/gomega/internal/assertion.go
@@ -8,44 +8,64 @@ import (
)
type Assertion struct {
- actualInput interface{}
+ actuals []interface{} // actual value plus all extra values
+ actualIndex int // value to pass to the matcher
+ vet vetinari // the vet to call before calling Gomega matcher
offset int
- extra []interface{}
g *Gomega
}
+// ...obligatory discworld reference, as "vetineer" doesn't sound ... quite right.
+type vetinari func(assertion *Assertion, optionalDescription ...interface{}) bool
+
func NewAssertion(actualInput interface{}, g *Gomega, offset int, extra ...interface{}) *Assertion {
return &Assertion{
- actualInput: actualInput,
+ actuals: append([]interface{}{actualInput}, extra...),
+ actualIndex: 0,
+ vet: (*Assertion).vetActuals,
offset: offset,
- extra: extra,
g: g,
}
}
+func (assertion *Assertion) WithOffset(offset int) types.Assertion {
+ assertion.offset = offset
+ return assertion
+}
+
+func (assertion *Assertion) Error() types.Assertion {
+ return &Assertion{
+ actuals: assertion.actuals,
+ actualIndex: len(assertion.actuals) - 1,
+ vet: (*Assertion).vetError,
+ offset: assertion.offset,
+ g: assertion.g,
+ }
+}
+
func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
assertion.g.THelper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
}
func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
assertion.g.THelper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
assertion.g.THelper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
}
func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
assertion.g.THelper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
assertion.g.THelper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
@@ -61,7 +81,8 @@ func (assertion *Assertion) buildDescription(optionalDescription ...interface{})
}
func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
- matches, err := matcher.Match(assertion.actualInput)
+ actualInput := assertion.actuals[assertion.actualIndex]
+ matches, err := matcher.Match(actualInput)
assertion.g.THelper()
if err != nil {
description := assertion.buildDescription(optionalDescription...)
@@ -71,9 +92,9 @@ func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool
if matches != desiredMatch {
var message string
if desiredMatch {
- message = matcher.FailureMessage(assertion.actualInput)
+ message = matcher.FailureMessage(actualInput)
} else {
- message = matcher.NegatedFailureMessage(assertion.actualInput)
+ message = matcher.NegatedFailureMessage(actualInput)
}
description := assertion.buildDescription(optionalDescription...)
assertion.g.Fail(description+message, 2+assertion.offset)
@@ -83,8 +104,11 @@ func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool
return true
}
-func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool {
- success, message := vetExtras(assertion.extra)
+// vetActuals vets the actual values, with the (optional) exception of a
+// specific value, such as the first value in case non-error assertions, or the
+// last value in case of Error()-based assertions.
+func (assertion *Assertion) vetActuals(optionalDescription ...interface{}) bool {
+ success, message := vetActuals(assertion.actuals, assertion.actualIndex)
if success {
return true
}
@@ -95,12 +119,29 @@ func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool {
return false
}
-func vetExtras(extras []interface{}) (bool, string) {
- for i, extra := range extras {
- if extra != nil {
- zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
- if !reflect.DeepEqual(zeroValue, extra) {
- message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
+// vetError vets the actual values, except for the final error value, in case
+// the final error value is non-zero. Otherwise, it doesn't vet the actual
+// values, as these are allowed to take on any values unless there is a non-zero
+// error value.
+func (assertion *Assertion) vetError(optionalDescription ...interface{}) bool {
+ if err := assertion.actuals[assertion.actualIndex]; err != nil {
+ // Go error result idiom: all other actual values must be zero values.
+ return assertion.vetActuals(optionalDescription...)
+ }
+ return true
+}
+
+// vetActuals vets a slice of actual values, optionally skipping a particular
+// value slice element, such as the first or last value slice element.
+func vetActuals(actuals []interface{}, skipIndex int) (bool, string) {
+ for i, actual := range actuals {
+ if i == skipIndex {
+ continue
+ }
+ if actual != nil {
+ zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface()
+ if !reflect.DeepEqual(zeroValue, actual) {
+ message := fmt.Sprintf("Unexpected non-nil/non-zero argument at index %d:\n\t<%T>: %#v", i, actual, actual)
return false, message
}
}
diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go
index ae20c14b808..99f4ebcfe71 100644
--- a/vendor/github.com/onsi/gomega/internal/async_assertion.go
+++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go
@@ -87,6 +87,21 @@ func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g
return out
}
+func (assertion *AsyncAssertion) WithOffset(offset int) types.AsyncAssertion {
+ assertion.offset = offset
+ return assertion
+}
+
+func (assertion *AsyncAssertion) WithTimeout(interval time.Duration) types.AsyncAssertion {
+ assertion.timeoutInterval = interval
+ return assertion
+}
+
+func (assertion *AsyncAssertion) WithPolling(interval time.Duration) types.AsyncAssertion {
+ assertion.pollingInterval = interval
+ return assertion
+}
+
func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
assertion.g.THelper()
return assertion.match(matcher, true, optionalDescription...)
@@ -118,11 +133,11 @@ func (assertion *AsyncAssertion) pollActual() (interface{}, error) {
if err != nil {
return nil, err
}
- extras := []interface{}{}
+ extras := []interface{}{nil}
for _, value := range values[1:] {
extras = append(extras, value.Interface())
}
- success, message := vetExtras(extras)
+ success, message := vetActuals(extras, 0)
if !success {
return nil, errors.New(message)
}
diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go
index f5b5c6b7a0e..d26a67485f1 100644
--- a/vendor/github.com/onsi/gomega/internal/gomega.go
+++ b/vendor/github.com/onsi/gomega/internal/gomega.go
@@ -39,12 +39,12 @@ func (g *Gomega) ConfigureWithT(t types.GomegaTestingT) *Gomega {
return g
}
-func (g *Gomega) Ω(atual interface{}, extra ...interface{}) types.Assertion {
- return g.ExpectWithOffset(0, atual, extra...)
+func (g *Gomega) Ω(actual interface{}, extra ...interface{}) types.Assertion {
+ return g.ExpectWithOffset(0, actual, extra...)
}
-func (g *Gomega) Expect(atual interface{}, extra ...interface{}) types.Assertion {
- return g.ExpectWithOffset(0, atual, extra...)
+func (g *Gomega) Expect(actual interface{}, extra ...interface{}) types.Assertion {
+ return g.ExpectWithOffset(0, actual, extra...)
}
func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) types.Assertion {
diff --git a/vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go b/vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go
new file mode 100644
index 00000000000..6864055a5a7
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go
@@ -0,0 +1,48 @@
+//go:build go1.16
+// +build go1.16
+
+// Package gutil is a replacement for ioutil, which should not be used in new
+// code as of Go 1.16. With Go 1.16 and higher, this implementation
+// uses the ioutil replacement functions in "io" and "os" with some
+// Gomega specifics. This means that we should not get deprecation warnings
+// for ioutil when they are added.
+package gutil
+
+import (
+ "io"
+ "os"
+)
+
+func NopCloser(r io.Reader) io.ReadCloser {
+ return io.NopCloser(r)
+}
+
+func ReadAll(r io.Reader) ([]byte, error) {
+ return io.ReadAll(r)
+}
+
+func ReadDir(dirname string) ([]string, error) {
+ entries, err := os.ReadDir(dirname)
+ if err != nil {
+ return nil, err
+ }
+
+ var names []string
+ for _, entry := range entries {
+ names = append(names, entry.Name())
+ }
+
+ return names, nil
+}
+
+func ReadFile(filename string) ([]byte, error) {
+ return os.ReadFile(filename)
+}
+
+func MkdirTemp(dir, pattern string) (string, error) {
+ return os.MkdirTemp(dir, pattern)
+}
+
+func WriteFile(filename string, data []byte) error {
+ return os.WriteFile(filename, data, 0644)
+}
diff --git a/vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go b/vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go
new file mode 100644
index 00000000000..5c0ce1ee3da
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go
@@ -0,0 +1,47 @@
+//go:build !go1.16
+// +build !go1.16
+
+// Package gutil is a replacement for ioutil, which should not be used in new
+// code as of Go 1.16. With Go 1.15 and lower, this implementation
+// uses the ioutil functions, meaning that although Gomega is not officially
+// supported on these versions, it is still likely to work.
+package gutil
+
+import (
+ "io"
+ "io/ioutil"
+)
+
+func NopCloser(r io.Reader) io.ReadCloser {
+ return ioutil.NopCloser(r)
+}
+
+func ReadAll(r io.Reader) ([]byte, error) {
+ return ioutil.ReadAll(r)
+}
+
+func ReadDir(dirname string) ([]string, error) {
+ files, err := ioutil.ReadDir(dirname)
+ if err != nil {
+ return nil, err
+ }
+
+ var names []string
+ for _, file := range files {
+ names = append(names, file.Name())
+ }
+
+ return names, nil
+}
+
+func ReadFile(filename string) ([]byte, error) {
+ return ioutil.ReadFile(filename)
+}
+
+func MkdirTemp(dir, pattern string) (string, error) {
+ return ioutil.TempDir(dir, pattern)
+}
+
+func WriteFile(filename string, data []byte) error {
+ return ioutil.WriteFile(filename, data, 0644)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go
index 223f6ef5302..b58dd67cb8c 100644
--- a/vendor/github.com/onsi/gomega/matchers.go
+++ b/vendor/github.com/onsi/gomega/matchers.go
@@ -256,16 +256,26 @@ func BeZero() types.GomegaMatcher {
return &matchers.BeZeroMatcher{}
}
-//ContainElement succeeds if actual contains the passed in element.
-//By default ContainElement() uses Equal() to perform the match, however a
-//matcher can be passed in instead:
+//ContainElement succeeds if actual contains the passed in element. By default
+//ContainElement() uses Equal() to perform the match, however a matcher can be
+//passed in instead:
// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar")))
//
-//Actual must be an array, slice or map.
-//For maps, ContainElement searches through the map's values.
-func ContainElement(element interface{}) types.GomegaMatcher {
+//Actual must be an array, slice or map. For maps, ContainElement searches
+//through the map's values.
+//
+//If you want to have a copy of the matching element(s) found you can pass a
+//pointer to a variable of the appropriate type. If the variable isn't a slice
+//or map, then exactly one match will be expected and returned. If the variable
+//is a slice or map, then at least one match is expected and all matches will be
+//stored in the variable.
+//
+// var findings []string
+// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubString("Bar", &findings)))
+func ContainElement(element interface{}, result ...interface{}) types.GomegaMatcher {
return &matchers.ContainElementMatcher{
Element: element,
+ Result: result,
}
}
@@ -320,6 +330,20 @@ func ContainElements(elements ...interface{}) types.GomegaMatcher {
}
}
+//HaveEach succeeds if actual solely contains elements that match the passed in element.
+//Please note that if actual is empty, HaveEach always will succeed.
+//By default HaveEach() uses Equal() to perform the match, however a
+//matcher can be passed in instead:
+// Expect([]string{"Foo", "FooBar"}).Should(HaveEach(ContainSubstring("Foo")))
+//
+//Actual must be an array, slice or map.
+//For maps, HaveEach searches through the map's values.
+func HaveEach(element interface{}) types.GomegaMatcher {
+ return &matchers.HaveEachMatcher{
+ Element: element,
+ }
+}
+
//HaveKey succeeds if actual is a map with the passed in key.
//By default HaveKey uses Equal() to perform the match, however a
//matcher can be passed in instead:
@@ -342,6 +366,54 @@ func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
}
}
+//HaveField succeeds if actual is a struct and the value at the passed in field
+//matches the passed in matcher. By default HaveField used Equal() to perform the match,
+//however a matcher can be passed in in stead.
+//
+//The field must be a string that resolves to the name of a field in the struct. Structs can be traversed
+//using the '.' delimiter. If the field ends with '()' a method named field is assumed to exist on the struct and is invoked.
+//Such methods must take no arguments and return a single value:
+//
+// type Book struct {
+// Title string
+// Author Person
+// }
+// type Person struct {
+// FirstName string
+// LastName string
+// DOB time.Time
+// }
+// Expect(book).To(HaveField("Title", "Les Miserables"))
+// Expect(book).To(HaveField("Title", ContainSubstring("Les"))
+// Expect(book).To(HaveField("Author.FirstName", Equal("Victor"))
+// Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900))
+func HaveField(field string, expected interface{}) types.GomegaMatcher {
+ return &matchers.HaveFieldMatcher{
+ Field: field,
+ Expected: expected,
+ }
+}
+
+// HaveValue applies the given matcher to the value of actual, optionally and
+// repeatedly dereferencing pointers or taking the concrete value of interfaces.
+// Thus, the matcher will always be applied to non-pointer and non-interface
+// values only. HaveValue will fail with an error if a pointer or interface is
+// nil. It will also fail for more than 31 pointer or interface dereferences to
+// guard against mistakenly applying it to arbitrarily deep linked pointers.
+//
+// HaveValue differs from gstruct.PointTo in that it does not expect actual to
+// be a pointer (as gstruct.PointTo does) but instead also accepts non-pointer
+// and even interface values.
+//
+// actual := 42
+// Expect(actual).To(HaveValue(42))
+// Expect(&actual).To(HaveValue(42))
+func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher {
+ return &matchers.HaveValueMatcher{
+ Matcher: matcher,
+ }
+}
+
//BeNumerically performs numerical assertions in a type-agnostic way.
//Actual and expected should be numbers, though the specific type of
//number is irrelevant (float32, float64, uint8, etc...).
@@ -485,10 +557,15 @@ func Not(matcher types.GomegaMatcher) types.GomegaMatcher {
}
//WithTransform applies the `transform` to the actual value and matches it against `matcher`.
-//The given transform must be a function of one parameter that returns one value.
+//The given transform must be either a function of one parameter that returns one value or a
+// function of one parameter that returns two values, where the second value must be of the
+// error type.
// var plus1 = func(i int) int { return i + 1 }
// Expect(1).To(WithTransform(plus1, Equal(2))
//
+// var failingplus1 = func(i int) (int, error) { return 42, "this does not compute" }
+// Expect(1).To(WithTransform(failingplus1, Equal(2)))
+//
//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher {
return matchers.NewWithTransformMatcher(transform, matcher)
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
index 8d6c44c7a10..3d45c9ebc69 100644
--- a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
@@ -3,6 +3,7 @@
package matchers
import (
+ "errors"
"fmt"
"reflect"
@@ -11,6 +12,7 @@ import (
type ContainElementMatcher struct {
Element interface{}
+ Result []interface{}
}
func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) {
@@ -18,6 +20,49 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
}
+ var actualT reflect.Type
+ var result reflect.Value
+ switch l := len(matcher.Result); {
+ case l > 1:
+ return false, errors.New("ContainElement matcher expects at most a single optional pointer to store its findings at")
+ case l == 1:
+ if reflect.ValueOf(matcher.Result[0]).Kind() != reflect.Ptr {
+ return false, fmt.Errorf("ContainElement matcher expects a non-nil pointer to store its findings at. Got\n%s",
+ format.Object(matcher.Result[0], 1))
+ }
+ actualT = reflect.TypeOf(actual)
+ resultReference := matcher.Result[0]
+ result = reflect.ValueOf(resultReference).Elem() // what ResultReference points to, to stash away our findings
+ switch result.Kind() {
+ case reflect.Array:
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ reflect.SliceOf(actualT.Elem()).String(), result.Type().String())
+ case reflect.Slice:
+ if !isArrayOrSlice(actual) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ reflect.MapOf(actualT.Key(), actualT.Elem()).String(), result.Type().String())
+ }
+ if !actualT.Elem().AssignableTo(result.Type().Elem()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.String(), result.Type().String())
+ }
+ case reflect.Map:
+ if !isMap(actual) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.String(), result.Type().String())
+ }
+ if !actualT.AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.String(), result.Type().String())
+ }
+ default:
+ if !actualT.Elem().AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.Elem().String(), result.Type().String())
+ }
+ }
+ }
+
elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher)
if !elementIsMatcher {
elemMatcher = &EqualMatcher{Expected: matcher.Element}
@@ -25,30 +70,99 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
value := reflect.ValueOf(actual)
var valueAt func(int) interface{}
+
+ var getFindings func() reflect.Value
+ var foundAt func(int)
+
if isMap(actual) {
keys := value.MapKeys()
valueAt = func(i int) interface{} {
return value.MapIndex(keys[i]).Interface()
}
+ if result.Kind() != reflect.Invalid {
+ fm := reflect.MakeMap(actualT)
+ getFindings = func() reflect.Value {
+ return fm
+ }
+ foundAt = func(i int) {
+ fm.SetMapIndex(keys[i], value.MapIndex(keys[i]))
+ }
+ }
} else {
valueAt = func(i int) interface{} {
return value.Index(i).Interface()
}
+ if result.Kind() != reflect.Invalid {
+ var f reflect.Value
+ if result.Kind() == reflect.Slice {
+ f = reflect.MakeSlice(result.Type(), 0, 0)
+ } else {
+ f = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0)
+ }
+ getFindings = func() reflect.Value {
+ return f
+ }
+ foundAt = func(i int) {
+ f = reflect.Append(f, value.Index(i))
+ }
+ }
}
var lastError error
for i := 0; i < value.Len(); i++ {
- success, err := elemMatcher.Match(valueAt(i))
+ elem := valueAt(i)
+ success, err := elemMatcher.Match(elem)
if err != nil {
lastError = err
continue
}
if success {
- return true, nil
+ if result.Kind() == reflect.Invalid {
+ return true, nil
+ }
+ foundAt(i)
}
}
- return false, lastError
+ // when the expectation isn't interested in the findings except for success
+ // or non-success, then we're done here and return the last matcher error
+ // seen, if any, as well as non-success.
+ if result.Kind() == reflect.Invalid {
+ return false, lastError
+ }
+
+ // pick up any findings the test is interested in as it specified a non-nil
+ // result reference. However, the expection always is that there are at
+ // least one or multiple findings. So, if a result is expected, but we had
+ // no findings, then this is an error.
+ findings := getFindings()
+ if findings.Len() == 0 {
+ return false, lastError
+ }
+
+ // there's just a single finding and the result is neither a slice nor a map
+ // (so it's a scalar): pick the one and only finding and return it in the
+ // place the reference points to.
+ if findings.Len() == 1 && !isArrayOrSlice(result.Interface()) && !isMap(result.Interface()) {
+ if isMap(actual) {
+ miter := findings.MapRange()
+ miter.Next()
+ result.Set(miter.Value())
+ } else {
+ result.Set(findings.Index(0))
+ }
+ return true, nil
+ }
+
+ // at least one or even multiple findings and a the result references a
+ // slice or a map, so all we need to do is to store our findings where the
+ // reference points to.
+ if !findings.Type().AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return multiple findings. Need *%s, got *%s",
+ findings.Type().String(), result.Type().String())
+ }
+ result.Set(findings)
+ return true, nil
}
func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) {
diff --git a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
new file mode 100644
index 00000000000..025b6e1ac2c
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
@@ -0,0 +1,65 @@
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type HaveEachMatcher struct {
+ Element interface{}
+}
+
+func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isArrayOrSlice(actual) && !isMap(actual) {
+ return false, fmt.Errorf("HaveEach matcher expects an array/slice/map. Got:\n%s",
+ format.Object(actual, 1))
+ }
+
+ elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher)
+ if !elementIsMatcher {
+ elemMatcher = &EqualMatcher{Expected: matcher.Element}
+ }
+
+ value := reflect.ValueOf(actual)
+ if value.Len() == 0 {
+ return false, fmt.Errorf("HaveEach matcher expects a non-empty array/slice/map. Got:\n%s",
+ format.Object(actual, 1))
+ }
+
+ var valueAt func(int) interface{}
+ if isMap(actual) {
+ keys := value.MapKeys()
+ valueAt = func(i int) interface{} {
+ return value.MapIndex(keys[i]).Interface()
+ }
+ } else {
+ valueAt = func(i int) interface{} {
+ return value.Index(i).Interface()
+ }
+ }
+
+ // if there are no elements, then HaveEach will match.
+ for i := 0; i < value.Len(); i++ {
+ success, err := elemMatcher.Match(valueAt(i))
+ if err != nil {
+ return false, err
+ }
+ if !success {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+// FailureMessage returns a suitable failure message.
+func (matcher *HaveEachMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to contain element matching", matcher.Element)
+}
+
+// NegatedFailureMessage returns a suitable negated failure message.
+func (matcher *HaveEachMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to contain element matching", matcher.Element)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_field.go b/vendor/github.com/onsi/gomega/matchers/have_field.go
new file mode 100644
index 00000000000..e1fe934d558
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_field.go
@@ -0,0 +1,87 @@
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/onsi/gomega/format"
+)
+
+func extractField(actual interface{}, field string) (interface{}, error) {
+ fields := strings.SplitN(field, ".", 2)
+ actualValue := reflect.ValueOf(actual)
+
+ if actualValue.Kind() == reflect.Ptr {
+ actualValue = actualValue.Elem()
+ }
+ if actualValue == (reflect.Value{}) {
+ return nil, fmt.Errorf("HaveField encountered nil while dereferencing a pointer of type %T.", actual)
+ }
+
+ if actualValue.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("HaveField encountered:\n%s\nWhich is not a struct.", format.Object(actual, 1))
+ }
+
+ var extractedValue reflect.Value
+
+ if strings.HasSuffix(fields[0], "()") {
+ extractedValue = actualValue.MethodByName(strings.TrimSuffix(fields[0], "()"))
+ if extractedValue == (reflect.Value{}) {
+ return nil, fmt.Errorf("HaveField could not find method named '%s' in struct of type %T.", fields[0], actual)
+ }
+ t := extractedValue.Type()
+ if t.NumIn() != 0 || t.NumOut() != 1 {
+ return nil, fmt.Errorf("HaveField found an invalid method named '%s' in struct of type %T.\nMethods must take no arguments and return exactly one value.", fields[0], actual)
+ }
+ extractedValue = extractedValue.Call([]reflect.Value{})[0]
+ } else {
+ extractedValue = actualValue.FieldByName(fields[0])
+ if extractedValue == (reflect.Value{}) {
+ return nil, fmt.Errorf("HaveField could not find field named '%s' in struct:\n%s", fields[0], format.Object(actual, 1))
+ }
+ }
+
+ if len(fields) == 1 {
+ return extractedValue.Interface(), nil
+ } else {
+ return extractField(extractedValue.Interface(), fields[1])
+ }
+}
+
+type HaveFieldMatcher struct {
+ Field string
+ Expected interface{}
+
+ extractedField interface{}
+ expectedMatcher omegaMatcher
+}
+
+func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) {
+ matcher.extractedField, err = extractField(actual, matcher.Field)
+ if err != nil {
+ return false, err
+ }
+
+ var isMatcher bool
+ matcher.expectedMatcher, isMatcher = matcher.Expected.(omegaMatcher)
+ if !isMatcher {
+ matcher.expectedMatcher = &EqualMatcher{Expected: matcher.Expected}
+ }
+
+ return matcher.expectedMatcher.Match(matcher.extractedField)
+}
+
+func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) {
+ message = fmt.Sprintf("Value for field '%s' failed to satisfy matcher.\n", matcher.Field)
+ message += matcher.expectedMatcher.FailureMessage(matcher.extractedField)
+
+ return message
+}
+
+func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ message = fmt.Sprintf("Value for field '%s' satisfied matcher, but should not have.\n", matcher.Field)
+ message += matcher.expectedMatcher.NegatedFailureMessage(matcher.extractedField)
+
+ return message
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
index 66cbb254a30..6a3dcdc3533 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
@@ -2,11 +2,11 @@ package matchers
import (
"fmt"
- "io/ioutil"
"net/http"
"net/http/httptest"
"github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/internal/gutil"
"github.com/onsi/gomega/types"
)
@@ -81,7 +81,7 @@ func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) {
if a.Body != nil {
defer a.Body.Close()
var err error
- matcher.cachedBody, err = ioutil.ReadAll(a.Body)
+ matcher.cachedBody, err = gutil.ReadAll(a.Body)
if err != nil {
return nil, fmt.Errorf("error reading response body: %w", err)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
index 70f54899add..0f66e46ece4 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
@@ -2,13 +2,13 @@ package matchers
import (
"fmt"
- "io/ioutil"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/internal/gutil"
)
type HaveHTTPStatusMatcher struct {
@@ -78,7 +78,7 @@ func formatHttpResponse(input interface{}) string {
body := ""
if resp.Body != nil {
defer resp.Body.Close()
- data, err := ioutil.ReadAll(resp.Body)
+ data, err := gutil.ReadAll(resp.Body)
if err != nil {
data = []byte("")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_value.go b/vendor/github.com/onsi/gomega/matchers/have_value.go
new file mode 100644
index 00000000000..f6725283570
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_value.go
@@ -0,0 +1,54 @@
+package matchers
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/types"
+)
+
+const maxIndirections = 31
+
+type HaveValueMatcher struct {
+ Matcher types.GomegaMatcher // the matcher to apply to the "resolved" actual value.
+ resolvedActual interface{} // the ("resolved") value.
+}
+
+func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) {
+ val := reflect.ValueOf(actual)
+ for allowedIndirs := maxIndirections; allowedIndirs > 0; allowedIndirs-- {
+ // return an error if value isn't valid. Please note that we cannot
+ // check for nil here, as we might not deal with a pointer or interface
+ // at this point.
+ if !val.IsValid() {
+ return false, errors.New(format.Message(
+ actual, "not to be "))
+ }
+ switch val.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ // resolve pointers and interfaces to their values, then rinse and
+ // repeat.
+ if val.IsNil() {
+ return false, errors.New(format.Message(
+ actual, "not to be "))
+ }
+ val = val.Elem()
+ continue
+ default:
+ // forward the final value to the specified matcher.
+ m.resolvedActual = val.Interface()
+ return m.Matcher.Match(m.resolvedActual)
+ }
+ }
+ // too many indirections: extreme star gazing, indeed...?
+ return false, errors.New(format.Message(actual, "too many indirections"))
+}
+
+func (m *HaveValueMatcher) FailureMessage(_ interface{}) (message string) {
+ return m.Matcher.FailureMessage(m.resolvedActual)
+}
+
+func (m *HaveValueMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+ return m.Matcher.NegatedFailureMessage(m.resolvedActual)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go
index 8a06bd3840e..6f743b1b32d 100644
--- a/vendor/github.com/onsi/gomega/matchers/with_transform.go
+++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go
@@ -9,7 +9,7 @@ import (
type WithTransformMatcher struct {
// input
- Transform interface{} // must be a function of one parameter that returns one value
+ Transform interface{} // must be a function of one parameter that returns one value and an optional error
Matcher types.GomegaMatcher
// cached value
@@ -19,6 +19,9 @@ type WithTransformMatcher struct {
transformedValue interface{}
}
+// reflect.Type for error
+var errorT = reflect.TypeOf((*error)(nil)).Elem()
+
func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher {
if transform == nil {
panic("transform function cannot be nil")
@@ -27,8 +30,10 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher)
if txType.NumIn() != 1 {
panic("transform function must have 1 argument")
}
- if txType.NumOut() != 1 {
- panic("transform function must have 1 return value")
+ if numout := txType.NumOut(); numout != 1 {
+ if numout != 2 || !txType.Out(1).AssignableTo(errorT) {
+ panic("transform function must either have 1 return value, or 1 return value plus 1 error value")
+ }
}
return &WithTransformMatcher{
@@ -57,6 +62,11 @@ func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) {
// call the Transform function with `actual`
fn := reflect.ValueOf(m.Transform)
result := fn.Call([]reflect.Value{param})
+ if len(result) == 2 {
+ if !result[1].IsNil() {
+ return false, fmt.Errorf("Transform function failed: %s", result[1].Interface().(error).Error())
+ }
+ }
m.transformedValue = result[0].Interface() // expect exactly one value
return m.Matcher.Match(m.transformedValue)
diff --git a/vendor/github.com/onsi/gomega/tools b/vendor/github.com/onsi/gomega/tools
new file mode 100644
index 00000000000..e4195cf362f
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/tools
@@ -0,0 +1,8 @@
+//go:build tools
+// +build tools
+
+package main
+
+import (
+ _ "github.com/onsi/ginkgo/v2/ginkgo"
+)
diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go
index c75fcb3cce9..c315ef06567 100644
--- a/vendor/github.com/onsi/gomega/types/types.go
+++ b/vendor/github.com/onsi/gomega/types/types.go
@@ -66,6 +66,10 @@ func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool {
type AsyncAssertion interface {
Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool
ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+
+ WithOffset(offset int) AsyncAssertion
+ WithTimeout(interval time.Duration) AsyncAssertion
+ WithPolling(interval time.Duration) AsyncAssertion
}
// Assertions are returned by Ω and Expect and enable assertions against Gomega matchers
@@ -76,4 +80,8 @@ type Assertion interface {
To(matcher GomegaMatcher, optionalDescription ...interface{}) bool
ToNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
NotTo(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+
+ WithOffset(offset int) Assertion
+
+ Error() Assertion
}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go
index 581cf7cdfad..6f9e6fd3abf 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go
@@ -59,4 +59,13 @@ const (
// AnnotationBaseImageName is the annotation key for the image reference of the image's base image.
AnnotationBaseImageName = "org.opencontainers.image.base.name"
+
+ // AnnotationArtifactCreated is the annotation key for the date and time on which the artifact was built, conforming to RFC 3339.
+ AnnotationArtifactCreated = "org.opencontainers.artifact.created"
+
+ // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact.
+ AnnotationArtifactDescription = "org.opencontainers.artifact.description"
+
+ // AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing.
+ AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied"
)
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go
new file mode 100644
index 00000000000..2a18ce1065b
--- /dev/null
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go
@@ -0,0 +1,34 @@
+// Copyright 2022 The Linux Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+// Artifact describes an artifact manifest.
+// This structure provides `application/vnd.oci.artifact.manifest.v1+json` mediatype when marshalled to JSON.
+type Artifact struct {
+ // MediaType is the media type of the object this schema refers to.
+ MediaType string `json:"mediaType"`
+
+ // ArtifactType is the IANA media type of the artifact this schema refers to.
+ ArtifactType string `json:"artifactType"`
+
+ // Blobs is a collection of blobs referenced by this manifest.
+ Blobs []Descriptor `json:"blobs,omitempty"`
+
+ // Refers is an optional link to any existing manifest within the repository.
+ Refers *Descriptor `json:"refers,omitempty"`
+
+ // Annotations contains arbitrary metadata for the artifact manifest.
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go
index 6e442a0853f..9654aa5af68 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Linux Foundation
+// Copyright 2016-2022 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -35,10 +35,18 @@ type Descriptor struct {
// Annotations contains arbitrary metadata relating to the targeted content.
Annotations map[string]string `json:"annotations,omitempty"`
+ // Data is an embedding of the targeted content. This is encoded as a base64
+ // string when marshalled to JSON (automatically, by encoding/json). If
+ // present, Data can be used directly to avoid fetching the targeted content.
+ Data []byte `json:"data,omitempty"`
+
// Platform describes the platform which the image in the manifest runs on.
//
// This should only be used when referring to a manifest.
Platform *Platform `json:"platform,omitempty"`
+
+ // ArtifactType is the IANA media type of this artifact.
+ ArtifactType string `json:"artifactType,omitempty"`
}
// Platform describes the platform which the image in the manifest runs on.
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go
index 4e6c4b23623..ed4a56e59e8 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go
@@ -21,6 +21,9 @@ import "github.com/opencontainers/image-spec/specs-go"
type Index struct {
specs.Versioned
+ // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json`
+ MediaType string `json:"mediaType,omitempty"`
+
// Manifests references platform specific manifests.
Manifests []Descriptor `json:"manifests"`
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go
index 7ff32c40ba3..7f2df9863c1 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Linux Foundation
+// Copyright 2016-2022 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -20,6 +20,9 @@ import "github.com/opencontainers/image-spec/specs-go"
type Manifest struct {
specs.Versioned
+ // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json`
+ MediaType string `json:"mediaType,omitempty"`
+
// Config references a configuration object for a container, by digest.
// The referenced configuration object is a JSON blob that the runtime uses to set up the container.
Config Descriptor `json:"config"`
@@ -27,6 +30,9 @@ type Manifest struct {
// Layers is an indexed list of layers referenced by the manifest.
Layers []Descriptor `json:"layers"`
+ // Refers is an optional link to any existing manifest within the repository.
+ Refers *Descriptor `json:"refers,omitempty"`
+
// Annotations contains arbitrary metadata for the image manifest.
Annotations map[string]string `json:"annotations,omitempty"`
}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go
index 4f35ac134fe..935b481e3ed 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go
@@ -54,4 +54,7 @@ const (
// MediaTypeImageConfig specifies the media type for the image configuration.
MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json"
+
+ // MediaTypeArtifactManifest specifies the media type for a content descriptor.
+ MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json"
)
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go
index 58f1095ab08..bf4d8cc7e04 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go
@@ -20,12 +20,12 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 1
// VersionMinor is for functionality in a backwards-compatible manner
- VersionMinor = 0
+ VersionMinor = 1
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 1
+ VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
- VersionDev = "-dev"
+ VersionDev = "-rc1"
)
// Version is the specification version that the package types support.
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go
index 744d4e57054..8b1483c7de7 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go
@@ -3,7 +3,6 @@ package apparmor
import (
"errors"
"fmt"
- "io/ioutil"
"os"
"sync"
@@ -19,7 +18,7 @@ var (
func isEnabled() bool {
checkAppArmor.Do(func() {
if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil {
- buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
+ buf, err := os.ReadFile("/sys/module/apparmor/parameters/enabled")
appArmorEnabled = err == nil && len(buf) > 1 && buf[0] == 'Y'
}
})
@@ -52,7 +51,7 @@ func setProcAttr(attr, value string) error {
// changeOnExec reimplements aa_change_onexec from libapparmor in Go
func changeOnExec(name string) error {
if err := setProcAttr("exec", "exec "+name); err != nil {
- return fmt.Errorf("apparmor failed to apply profile: %s", err)
+ return fmt.Errorf("apparmor failed to apply profile: %w", err)
}
return nil
}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_unsupported.go
index 1adadafec8e..684248f2559 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_unsupported.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package apparmor
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go
index 6d5b3d09df3..7d8e9fc3104 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go
@@ -1,10 +1,10 @@
+//go:build !windows
// +build !windows
package devices
import (
"errors"
- "io/ioutil"
"os"
"path/filepath"
@@ -16,8 +16,8 @@ var ErrNotADevice = errors.New("not a device node")
// Testing dependencies
var (
- unixLstat = unix.Lstat
- ioutilReadDir = ioutil.ReadDir
+ unixLstat = unix.Lstat
+ osReadDir = os.ReadDir
)
func mkDev(d *Rule) (uint64, error) {
@@ -40,7 +40,7 @@ func DeviceFromPath(path, permissions string) (*Device, error) {
var (
devType Type
mode = stat.Mode
- devNumber = uint64(stat.Rdev)
+ devNumber = uint64(stat.Rdev) //nolint:unconvert // Rdev is uint32 on e.g. MIPS.
major = unix.Major(devNumber)
minor = unix.Minor(devNumber)
)
@@ -76,7 +76,7 @@ func HostDevices() ([]*Device, error) {
// GetDevices recursively traverses a directory specified by path
// and returns all devices found there.
func GetDevices(path string) ([]*Device, error) {
- files, err := ioutilReadDir(path)
+ files, err := osReadDir(path)
if err != nil {
return nil, err
}
@@ -103,7 +103,7 @@ func GetDevices(path string) ([]*Device, error) {
}
device, err := DeviceFromPath(filepath.Join(path, f.Name()), "rwm")
if err != nil {
- if err == ErrNotADevice {
+ if errors.Is(err, ErrNotADevice) {
continue
}
if os.IsNotExist(err) {
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
index 967717a1b1c..f95c1409fce 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
@@ -1,3 +1,4 @@
+//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package user
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go
index cc7a106be5d..2473c5eaddc 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go
@@ -120,7 +120,7 @@ func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error)
func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
if r == nil {
- return nil, fmt.Errorf("nil source for passwd-formatted data")
+ return nil, errors.New("nil source for passwd-formatted data")
}
var (
@@ -178,7 +178,7 @@ func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error)
func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
if r == nil {
- return nil, fmt.Errorf("nil source for group-formatted data")
+ return nil, errors.New("nil source for group-formatted data")
}
rd := bufio.NewReader(r)
out := []Group{}
@@ -339,7 +339,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (
if userArg == "" {
userArg = strconv.Itoa(user.Uid)
}
- return nil, fmt.Errorf("unable to find user %s: %v", userArg, err)
+ return nil, fmt.Errorf("unable to find user %s: %w", userArg, err)
}
var matchedUserName string
@@ -355,7 +355,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (
if uidErr != nil {
// Not numeric.
- return nil, fmt.Errorf("unable to find user %s: %v", userArg, ErrNoPasswdEntries)
+ return nil, fmt.Errorf("unable to find user %s: %w", userArg, ErrNoPasswdEntries)
}
user.Uid = uidArg
@@ -390,7 +390,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (
return g.Name == groupArg
})
if err != nil && group != nil {
- return nil, fmt.Errorf("unable to find groups for spec %v: %v", matchedUserName, err)
+ return nil, fmt.Errorf("unable to find groups for spec %v: %w", matchedUserName, err)
}
// Only start modifying user.Gid if it is in explicit form.
@@ -404,7 +404,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (
if gidErr != nil {
// Not numeric.
- return nil, fmt.Errorf("unable to find group %s: %v", groupArg, ErrNoGroupEntries)
+ return nil, fmt.Errorf("unable to find group %s: %w", groupArg, ErrNoGroupEntries)
}
user.Gid = gidArg
@@ -445,7 +445,7 @@ func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, err
return false
})
if err != nil {
- return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err)
+ return nil, fmt.Errorf("Unable to find additional groups %v: %w", additionalGroups, err)
}
}
@@ -468,7 +468,8 @@ func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, err
if !found {
gid, err := strconv.ParseInt(ag, 10, 64)
if err != nil {
- return nil, fmt.Errorf("Unable to find group %s", ag)
+ // Not a numeric ID either.
+ return nil, fmt.Errorf("Unable to find group %s: %w", ag, ErrNoGroupEntries)
}
// Ensure gid is inside gid range.
if gid < minID || gid > maxID {
@@ -521,7 +522,7 @@ func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error)
func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
if r == nil {
- return nil, fmt.Errorf("nil source for subid-formatted data")
+ return nil, errors.New("nil source for subid-formatted data")
}
var (
@@ -574,7 +575,7 @@ func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error)
func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
if r == nil {
- return nil, fmt.Errorf("nil source for idmap-formatted data")
+ return nil, errors.New("nil source for idmap-formatted data")
}
var (
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go
index 8c9bb5df39c..e018eae614e 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go
@@ -1,3 +1,4 @@
+//go:build gofuzz
// +build gofuzz
package user
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go
index 529f8eaea2f..1e00ab8b505 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go
@@ -1,3 +1,4 @@
+//go:build gofuzz
// +build gofuzz
package userns
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go
index f45bb0c3156..f35c13a10e0 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package userns
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go
index c8a9364d54d..7ef9da21fd2 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go
@@ -1,5 +1,3 @@
-// +build linux
-
package utils
/*
@@ -88,6 +86,11 @@ func SendFd(socket *os.File, name string, fd uintptr) error {
if len(name) >= MaxNameLen {
return fmt.Errorf("sendfd: filename too long: %s", name)
}
- oob := unix.UnixRights(int(fd))
- return unix.Sendmsg(int(socket.Fd()), []byte(name), oob, nil, 0)
+ return SendFds(socket, []byte(name), int(fd))
+}
+
+// SendFds sends a list of files descriptor and msg over the given AF_UNIX socket.
+func SendFds(socket *os.File, msg []byte, fds ...int) error {
+ oob := unix.UnixRights(fds...)
+ return unix.Sendmsg(int(socket.Fd()), msg, oob, nil, 0)
}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
index cd78f23e1bd..6b9fc343522 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
@@ -11,7 +11,7 @@ import (
"strings"
"unsafe"
- "github.com/cyphar/filepath-securejoin"
+ securejoin "github.com/cyphar/filepath-securejoin"
"golang.org/x/sys/unix"
)
@@ -33,16 +33,6 @@ func init() {
}
}
-// ResolveRootfs ensures that the current working directory is
-// not a symlink and returns the absolute path to the rootfs
-func ResolveRootfs(uncleanRootfs string) (string, error) {
- rootfs, err := filepath.Abs(uncleanRootfs)
- if err != nil {
- return "", err
- }
- return filepath.EvalSymlinks(rootfs)
-}
-
// ExitStatus returns the correct exit status for a process based on if it
// was signaled or exited cleanly
func ExitStatus(status unix.WaitStatus) int {
@@ -120,7 +110,7 @@ func WithProcfd(root, unsafePath string, fn func(procfd string) error) error {
unsafePath = stripRoot(root, unsafePath)
path, err := securejoin.SecureJoin(root, unsafePath)
if err != nil {
- return fmt.Errorf("resolving path inside rootfs failed: %v", err)
+ return fmt.Errorf("resolving path inside rootfs failed: %w", err)
}
// Open the target path.
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go
index 1576f2d4ab6..220d0b43937 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package utils
@@ -14,7 +15,7 @@ import (
func EnsureProcHandle(fh *os.File) error {
var buf unix.Statfs_t
if err := unix.Fstatfs(int(fh.Fd()), &buf); err != nil {
- return fmt.Errorf("ensure %s is on procfs: %v", fh.Name(), err)
+ return fmt.Errorf("ensure %s is on procfs: %w", fh.Name(), err)
}
if buf.Type != unix.PROC_SUPER_MAGIC {
return fmt.Errorf("%s is not on procfs", fh.Name())
@@ -52,7 +53,7 @@ func CloseExecFrom(minFd int) error {
// Intentionally ignore errors from unix.CloseOnExec -- the cases where
// this might fail are basically file descriptors that have already
// been closed (including and especially the one that was created when
- // ioutil.ReadDir did the "opendir" syscall).
+ // os.ReadDir did the "opendir" syscall).
unix.CloseOnExec(fd)
}
return nil
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/doc.go b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go
index 0ac7d819e6d..57a15c9a11e 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/doc.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go
@@ -9,6 +9,5 @@ Usage:
if selinux.EnforceMode() != selinux.Enforcing {
selinux.SetEnforceMode(selinux.Enforcing)
}
-
*/
package selinux
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go
index 14e1e38c248..f61a560158b 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go
@@ -3,8 +3,6 @@ package label
import (
"errors"
"fmt"
- "os"
- "os/user"
"strings"
"github.com/opencontainers/selinux/go-selinux"
@@ -103,58 +101,16 @@ func SetFileCreateLabel(fileLabel string) error {
return selinux.SetFSCreateLabel(fileLabel)
}
-// Relabel changes the label of path to the filelabel string.
+// Relabel changes the label of path and all the entries beneath the path.
// It changes the MCS label to s0 if shared is true.
// This will allow all containers to share the content.
+//
+// The path itself is guaranteed to be relabeled last.
func Relabel(path string, fileLabel string, shared bool) error {
if !selinux.GetEnabled() || fileLabel == "" {
return nil
}
- exclude_paths := map[string]bool{
- "/": true,
- "/bin": true,
- "/boot": true,
- "/dev": true,
- "/etc": true,
- "/etc/passwd": true,
- "/etc/pki": true,
- "/etc/shadow": true,
- "/home": true,
- "/lib": true,
- "/lib64": true,
- "/media": true,
- "/opt": true,
- "/proc": true,
- "/root": true,
- "/run": true,
- "/sbin": true,
- "/srv": true,
- "/sys": true,
- "/tmp": true,
- "/usr": true,
- "/var": true,
- "/var/lib": true,
- "/var/log": true,
- }
-
- if home := os.Getenv("HOME"); home != "" {
- exclude_paths[home] = true
- }
-
- if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" {
- if usr, err := user.Lookup(sudoUser); err == nil {
- exclude_paths[usr.HomeDir] = true
- }
- }
-
- if path != "/" {
- path = strings.TrimSuffix(path, "/")
- }
- if exclude_paths[path] {
- return fmt.Errorf("SELinux relabeling of %s is not allowed", path)
- }
-
if shared {
c, err := selinux.NewContext(fileLabel)
if err != nil {
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go
index 02d206239c7..f21c80c5ab0 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package label
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go
index 897ecbac41c..8bff2935579 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go
@@ -1,3 +1,4 @@
+//go:build linux && go1.16
// +build linux,go1.16
package selinux
@@ -11,8 +12,19 @@ import (
)
func rchcon(fpath, label string) error {
+ fastMode := false
+ // If the current label matches the new label, assume
+ // other labels are correct.
+ if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label {
+ fastMode = true
+ }
return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error {
- e := setFileLabel(p, label)
+ if fastMode {
+ if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label {
+ return nil
+ }
+ }
+ e := lSetFileLabel(p, label)
// Walk a file tree can race with removal, so ignore ENOENT.
if errors.Is(e, os.ErrNotExist) {
return nil
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go
index 2c8b033ce05..303cb1890ad 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go
@@ -1,3 +1,4 @@
+//go:build linux && !go1.16
// +build linux,!go1.16
package selinux
@@ -11,7 +12,7 @@ import (
func rchcon(fpath, label string) error {
return pwalk.Walk(fpath, func(p string, _ os.FileInfo, _ error) error {
- e := setFileLabel(p, label)
+ e := lSetFileLabel(p, label)
// Walk a file tree can race with removal, so ignore ENOENT.
if errors.Is(e, os.ErrNotExist) {
return nil
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
index 0eedcaa780c..5a59d151f67 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
@@ -61,16 +61,30 @@ func ClassIndex(class string) (int, error) {
return classIndex(class)
}
-// SetFileLabel sets the SELinux label for this path or returns an error.
+// SetFileLabel sets the SELinux label for this path, following symlinks,
+// or returns an error.
func SetFileLabel(fpath string, label string) error {
return setFileLabel(fpath, label)
}
-// FileLabel returns the SELinux label for this path or returns an error.
+// LsetFileLabel sets the SELinux label for this path, not following symlinks,
+// or returns an error.
+func LsetFileLabel(fpath string, label string) error {
+ return lSetFileLabel(fpath, label)
+}
+
+// FileLabel returns the SELinux label for this path, following symlinks,
+// or returns an error.
func FileLabel(fpath string) (string, error) {
return fileLabel(fpath)
}
+// LfileLabel returns the SELinux label for this path, not following symlinks,
+// or returns an error.
+func LfileLabel(fpath string) (string, error) {
+ return lFileLabel(fpath)
+}
+
// SetFSCreateLabel tells the kernel what label to use for all file system objects
// created by this task.
// Set the label to an empty string to return to the default label. Calls to SetFSCreateLabel
@@ -255,6 +269,8 @@ func CopyLevel(src, dest string) (string, error) {
// Chcon changes the fpath file object to the SELinux label label.
// If fpath is a directory and recurse is true, then Chcon walks the
// directory tree setting the label.
+//
+// The fpath itself is guaranteed to be relabeled last.
func Chcon(fpath string, label string, recurse bool) error {
return chcon(fpath, label, recurse)
}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
index 295b2bc4e06..4582cc9e0a2 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
@@ -9,14 +9,15 @@ import (
"fmt"
"io"
"io/ioutil"
+ "math/big"
"os"
+ "os/user"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
- "github.com/bits-and-blooms/bitset"
"golang.org/x/sys/unix"
)
@@ -44,7 +45,7 @@ type selinuxState struct {
type level struct {
sens uint
- cats *bitset.BitSet
+ cats *big.Int
}
type mlsRange struct {
@@ -316,8 +317,9 @@ func classIndex(class string) (int, error) {
return index, nil
}
-// setFileLabel sets the SELinux label for this path or returns an error.
-func setFileLabel(fpath string, label string) error {
+// lSetFileLabel sets the SELinux label for this path, not following symlinks,
+// or returns an error.
+func lSetFileLabel(fpath string, label string) error {
if fpath == "" {
return ErrEmptyPath
}
@@ -334,12 +336,50 @@ func setFileLabel(fpath string, label string) error {
return nil
}
-// fileLabel returns the SELinux label for this path or returns an error.
+// setFileLabel sets the SELinux label for this path, following symlinks,
+// or returns an error.
+func setFileLabel(fpath string, label string) error {
+ if fpath == "" {
+ return ErrEmptyPath
+ }
+ for {
+ err := unix.Setxattr(fpath, xattrNameSelinux, []byte(label), 0)
+ if err == nil {
+ break
+ }
+ if err != unix.EINTR { //nolint:errorlint // unix errors are bare
+ return &os.PathError{Op: "setxattr", Path: fpath, Err: err}
+ }
+ }
+
+ return nil
+}
+
+// fileLabel returns the SELinux label for this path, following symlinks,
+// or returns an error.
func fileLabel(fpath string) (string, error) {
if fpath == "" {
return "", ErrEmptyPath
}
+ label, err := getxattr(fpath, xattrNameSelinux)
+ if err != nil {
+ return "", &os.PathError{Op: "getxattr", Path: fpath, Err: err}
+ }
+ // Trim the NUL byte at the end of the byte buffer, if present.
+ if len(label) > 0 && label[len(label)-1] == '\x00' {
+ label = label[:len(label)-1]
+ }
+ return string(label), nil
+}
+
+// lFileLabel returns the SELinux label for this path, not following symlinks,
+// or returns an error.
+func lFileLabel(fpath string) (string, error) {
+ if fpath == "" {
+ return "", ErrEmptyPath
+ }
+
label, err := lgetxattr(fpath, xattrNameSelinux)
if err != nil {
return "", &os.PathError{Op: "lgetxattr", Path: fpath, Err: err}
@@ -455,8 +495,8 @@ func computeCreateContext(source string, target string, class string) (string, e
}
// catsToBitset stores categories in a bitset.
-func catsToBitset(cats string) (*bitset.BitSet, error) {
- bitset := &bitset.BitSet{}
+func catsToBitset(cats string) (*big.Int, error) {
+ bitset := new(big.Int)
catlist := strings.Split(cats, ",")
for _, r := range catlist {
@@ -471,14 +511,14 @@ func catsToBitset(cats string) (*bitset.BitSet, error) {
return nil, err
}
for i := catstart; i <= catend; i++ {
- bitset.Set(i)
+ bitset.SetBit(bitset, int(i), 1)
}
} else {
cat, err := parseLevelItem(ranges[0], category)
if err != nil {
return nil, err
}
- bitset.Set(cat)
+ bitset.SetBit(bitset, int(cat), 1)
}
}
@@ -548,37 +588,30 @@ func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) {
// bitsetToStr takes a category bitset and returns it in the
// canonical selinux syntax
-func bitsetToStr(c *bitset.BitSet) string {
+func bitsetToStr(c *big.Int) string {
var str string
- i, e := c.NextSet(0)
- len := 0
- for e {
- if len == 0 {
+
+ length := 0
+ for i := int(c.TrailingZeroBits()); i < c.BitLen(); i++ {
+ if c.Bit(i) == 0 {
+ continue
+ }
+ if length == 0 {
if str != "" {
str += ","
}
- str += "c" + strconv.Itoa(int(i))
- }
-
- next, e := c.NextSet(i + 1)
- if e {
- // consecutive cats
- if next == i+1 {
- len++
- i = next
- continue
- }
+ str += "c" + strconv.Itoa(i)
}
- if len == 1 {
- str += ",c" + strconv.Itoa(int(i))
- } else if len > 1 {
- str += ".c" + strconv.Itoa(int(i))
+ if c.Bit(i+1) == 1 {
+ length++
+ continue
}
- if !e {
- break
+ if length == 1 {
+ str += ",c" + strconv.Itoa(i)
+ } else if length > 1 {
+ str += ".c" + strconv.Itoa(i)
}
- len = 0
- i = next
+ length = 0
}
return str
@@ -591,13 +624,16 @@ func (l1 *level) equal(l2 *level) bool {
if l1.sens != l2.sens {
return false
}
- return l1.cats.Equal(l2.cats)
+ if l2.cats == nil || l1.cats == nil {
+ return l2.cats == l1.cats
+ }
+ return l1.cats.Cmp(l2.cats) == 0
}
// String returns an mlsRange as a string.
func (m mlsRange) String() string {
low := "s" + strconv.Itoa(int(m.low.sens))
- if m.low.cats != nil && m.low.cats.Count() > 0 {
+ if m.low.cats != nil && m.low.cats.BitLen() > 0 {
low += ":" + bitsetToStr(m.low.cats)
}
@@ -606,7 +642,7 @@ func (m mlsRange) String() string {
}
high := "s" + strconv.Itoa(int(m.high.sens))
- if m.high.cats != nil && m.high.cats.Count() > 0 {
+ if m.high.cats != nil && m.high.cats.BitLen() > 0 {
high += ":" + bitsetToStr(m.high.cats)
}
@@ -656,10 +692,12 @@ func calculateGlbLub(sourceRange, targetRange string) (string, error) {
/* find the intersecting categories */
if s.low.cats != nil && t.low.cats != nil {
- outrange.low.cats = s.low.cats.Intersection(t.low.cats)
+ outrange.low.cats = new(big.Int)
+ outrange.low.cats.And(s.low.cats, t.low.cats)
}
if s.high.cats != nil && t.high.cats != nil {
- outrange.high.cats = s.high.cats.Intersection(t.high.cats)
+ outrange.high.cats = new(big.Int)
+ outrange.high.cats.And(s.high.cats, t.high.cats)
}
return outrange.String(), nil
@@ -1035,21 +1073,6 @@ func copyLevel(src, dest string) (string, error) {
return tcon.Get(), nil
}
-// Prevent users from relabeling system files
-func badPrefix(fpath string) error {
- if fpath == "" {
- return ErrEmptyPath
- }
-
- badPrefixes := []string{"/usr"}
- for _, prefix := range badPrefixes {
- if strings.HasPrefix(fpath, prefix) {
- return fmt.Errorf("relabeling content in %s is not allowed", prefix)
- }
- }
- return nil
-}
-
// chcon changes the fpath file object to the SELinux label label.
// If fpath is a directory and recurse is true, then chcon walks the
// directory tree setting the label.
@@ -1060,12 +1083,70 @@ func chcon(fpath string, label string, recurse bool) error {
if label == "" {
return nil
}
- if err := badPrefix(fpath); err != nil {
- return err
+
+ exclude_paths := map[string]bool{
+ "/": true,
+ "/bin": true,
+ "/boot": true,
+ "/dev": true,
+ "/etc": true,
+ "/etc/passwd": true,
+ "/etc/pki": true,
+ "/etc/shadow": true,
+ "/home": true,
+ "/lib": true,
+ "/lib64": true,
+ "/media": true,
+ "/opt": true,
+ "/proc": true,
+ "/root": true,
+ "/run": true,
+ "/sbin": true,
+ "/srv": true,
+ "/sys": true,
+ "/tmp": true,
+ "/usr": true,
+ "/var": true,
+ "/var/lib": true,
+ "/var/log": true,
+ }
+
+ if home := os.Getenv("HOME"); home != "" {
+ exclude_paths[home] = true
+ }
+
+ if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" {
+ if usr, err := user.Lookup(sudoUser); err == nil {
+ exclude_paths[usr.HomeDir] = true
+ }
+ }
+
+ if fpath != "/" {
+ fpath = strings.TrimSuffix(fpath, "/")
+ }
+ if exclude_paths[fpath] {
+ return fmt.Errorf("SELinux relabeling of %s is not allowed", fpath)
}
if !recurse {
- return setFileLabel(fpath, label)
+ err := lSetFileLabel(fpath, label)
+ if err != nil {
+ // Check if file doesn't exist, must have been removed
+ if errors.Is(err, os.ErrNotExist) {
+ return nil
+ }
+ // Check if current label is correct on disk
+ flabel, nerr := lFileLabel(fpath)
+ if nerr == nil && flabel == label {
+ return nil
+ }
+ // Check if file doesn't exist, must have been removed
+ if errors.Is(nerr, os.ErrNotExist) {
+ return nil
+ }
+ return err
+ }
+ return nil
}
return rchcon(fpath, label)
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
index 42657759c38..20d88803121 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package selinux
@@ -17,10 +18,18 @@ func setFileLabel(fpath string, label string) error {
return nil
}
+func lSetFileLabel(fpath string, label string) error {
+ return nil
+}
+
func fileLabel(fpath string) (string, error) {
return "", nil
}
+func lFileLabel(fpath string) (string, error) {
+ return "", nil
+}
+
func setFSCreateLabel(label string) error {
return nil
}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go
index c6b0a7f2655..9e473ca168f 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go
@@ -36,3 +36,36 @@ func doLgetxattr(path, attr string, dest []byte) (int, error) {
}
}
}
+
+// getxattr returns a []byte slice containing the value of
+// an extended attribute attr set for path.
+func getxattr(path, attr string) ([]byte, error) {
+ // Start with a 128 length byte array
+ dest := make([]byte, 128)
+ sz, errno := dogetxattr(path, attr, dest)
+ for errno == unix.ERANGE { //nolint:errorlint // unix errors are bare
+ // Buffer too small, use zero-sized buffer to get the actual size
+ sz, errno = dogetxattr(path, attr, []byte{})
+ if errno != nil {
+ return nil, errno
+ }
+
+ dest = make([]byte, sz)
+ sz, errno = dogetxattr(path, attr, dest)
+ }
+ if errno != nil {
+ return nil, errno
+ }
+
+ return dest[:sz], nil
+}
+
+// dogetxattr is a wrapper that retries on EINTR
+func dogetxattr(path, attr string, dest []byte) (int, error) {
+ for {
+ sz, err := unix.Getxattr(path, attr, dest)
+ if err != unix.EINTR { //nolint:errorlint // unix errors are bare
+ return sz, err
+ }
+ }
+}
diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go
index 011fe862aad..202c80da59c 100644
--- a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go
+++ b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go
@@ -51,6 +51,9 @@ func WalkN(root string, walkFn WalkFunc, num int) error {
var (
err error
wg sync.WaitGroup
+
+ rootLen = len(root)
+ rootEntry *walkArgs
)
wg.Add(1)
go func() {
@@ -59,6 +62,11 @@ func WalkN(root string, walkFn WalkFunc, num int) error {
close(files)
return err
}
+ if len(p) == rootLen {
+ // Root entry is processed separately below.
+ rootEntry = &walkArgs{path: p, info: &info}
+ return nil
+ }
// add a file to the queue unless a callback sent an error
select {
case e := <-errCh:
@@ -92,6 +100,10 @@ func WalkN(root string, walkFn WalkFunc, num int) error {
wg.Wait()
+ if err == nil {
+ err = walkFn(rootEntry.path, *rootEntry.info, nil)
+ }
+
return err
}
diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go
index 222820750c3..a5796b2c4f1 100644
--- a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go
+++ b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go
@@ -1,3 +1,4 @@
+//go:build go1.16
// +build go1.16
package pwalkdir
@@ -51,6 +52,9 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error {
var (
err error
wg sync.WaitGroup
+
+ rootLen = len(root)
+ rootEntry *walkArgs
)
wg.Add(1)
go func() {
@@ -59,6 +63,11 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error {
close(files)
return err
}
+ if len(p) == rootLen {
+ // Root entry is processed separately below.
+ rootEntry = &walkArgs{path: p, entry: entry}
+ return nil
+ }
// Add a file to the queue unless a callback sent an error.
select {
case e := <-errCh:
@@ -92,6 +101,10 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error {
wg.Wait()
+ if err == nil {
+ err = walkFn(rootEntry.path, rootEntry.entry, nil)
+ }
+
return err
}
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go
index 30572ea87ff..6a6acfd9ab3 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go
@@ -56,6 +56,9 @@ func (v *GVariant) native() *C.GVariant {
}
func (v *GVariant) Ptr() unsafe.Pointer {
+ if v == nil {
+ return nil
+ }
return v.ptr
}
diff --git a/vendor/github.com/mtrmac/gpgme/.gitignore b/vendor/github.com/proglottis/gpgme/.gitignore
similarity index 100%
rename from vendor/github.com/mtrmac/gpgme/.gitignore
rename to vendor/github.com/proglottis/gpgme/.gitignore
diff --git a/vendor/github.com/mtrmac/gpgme/LICENSE b/vendor/github.com/proglottis/gpgme/LICENSE
similarity index 100%
rename from vendor/github.com/mtrmac/gpgme/LICENSE
rename to vendor/github.com/proglottis/gpgme/LICENSE
diff --git a/vendor/github.com/mtrmac/gpgme/README.md b/vendor/github.com/proglottis/gpgme/README.md
similarity index 100%
rename from vendor/github.com/mtrmac/gpgme/README.md
rename to vendor/github.com/proglottis/gpgme/README.md
diff --git a/vendor/github.com/mtrmac/gpgme/callbacks.go b/vendor/github.com/proglottis/gpgme/callbacks.go
similarity index 100%
rename from vendor/github.com/mtrmac/gpgme/callbacks.go
rename to vendor/github.com/proglottis/gpgme/callbacks.go
diff --git a/vendor/github.com/mtrmac/gpgme/data.go b/vendor/github.com/proglottis/gpgme/data.go
similarity index 100%
rename from vendor/github.com/mtrmac/gpgme/data.go
rename to vendor/github.com/proglottis/gpgme/data.go
diff --git a/vendor/github.com/proglottis/gpgme/go.mod b/vendor/github.com/proglottis/gpgme/go.mod
new file mode 100644
index 00000000000..5badc8e6988
--- /dev/null
+++ b/vendor/github.com/proglottis/gpgme/go.mod
@@ -0,0 +1,3 @@
+module github.com/proglottis/gpgme
+
+go 1.11
diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.c b/vendor/github.com/proglottis/gpgme/go_gpgme.c
similarity index 100%
rename from vendor/github.com/mtrmac/gpgme/go_gpgme.c
rename to vendor/github.com/proglottis/gpgme/go_gpgme.c
diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.h b/vendor/github.com/proglottis/gpgme/go_gpgme.h
similarity index 92%
rename from vendor/github.com/mtrmac/gpgme/go_gpgme.h
rename to vendor/github.com/proglottis/gpgme/go_gpgme.h
index d4826ab368e..eb3a4ba88b1 100644
--- a/vendor/github.com/mtrmac/gpgme/go_gpgme.h
+++ b/vendor/github.com/proglottis/gpgme/go_gpgme.h
@@ -6,11 +6,6 @@
#include
-/* GPGME_VERSION_NUMBER was introduced in 1.4.0 */
-#if !defined(GPGME_VERSION_NUMBER) || GPGME_VERSION_NUMBER < 0x010402
-typedef off_t gpgme_off_t; /* Introduced in 1.4.2 */
-#endif
-
extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size);
extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size);
extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence);
diff --git a/vendor/github.com/mtrmac/gpgme/gpgme.go b/vendor/github.com/proglottis/gpgme/gpgme.go
similarity index 96%
rename from vendor/github.com/mtrmac/gpgme/gpgme.go
rename to vendor/github.com/proglottis/gpgme/gpgme.go
index c19b9aebc5c..8f495ddf5d5 100644
--- a/vendor/github.com/mtrmac/gpgme/gpgme.go
+++ b/vendor/github.com/proglottis/gpgme/gpgme.go
@@ -1,7 +1,7 @@
// Package gpgme provides a Go wrapper for the GPGME library
package gpgme
-// #cgo LDFLAGS: -lgpgme -lassuan -lgpg-error
+// #cgo pkg-config: gpgme
// #cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64
// #include
// #include
@@ -53,13 +53,13 @@ const (
type PinEntryMode int
-// const ( // Unavailable in 1.3.2
-// PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT
-// PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK
-// PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL
-// PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR
-// PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK
-// )
+const (
+ PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT
+ PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK
+ PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL
+ PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR
+ PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK
+)
type EncryptFlag uint
@@ -348,19 +348,17 @@ func (c *Context) KeyListMode() KeyListMode {
return res
}
-// Unavailable in 1.3.2:
-// func (c *Context) SetPinEntryMode(m PinEntryMode) error {
-// err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m)))
-// runtime.KeepAlive(c)
-// return err
-// }
+func (c *Context) SetPinEntryMode(m PinEntryMode) error {
+ err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m)))
+ runtime.KeepAlive(c)
+ return err
+}
-// Unavailable in 1.3.2:
-// func (c *Context) PinEntryMode() PinEntryMode {
-// res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx))
-// runtime.KeepAlive(c)
-// return res
-// }
+func (c *Context) PinEntryMode() PinEntryMode {
+ res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx))
+ runtime.KeepAlive(c)
+ return res
+}
func (c *Context) SetCallback(callback Callback) error {
var err error
diff --git a/vendor/github.com/mtrmac/gpgme/unset_agent_info.go b/vendor/github.com/proglottis/gpgme/unset_agent_info.go
similarity index 100%
rename from vendor/github.com/mtrmac/gpgme/unset_agent_info.go
rename to vendor/github.com/proglottis/gpgme/unset_agent_info.go
diff --git a/vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go b/vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go
similarity index 100%
rename from vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go
rename to vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go
diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE
deleted file mode 100644
index 261eeb9e9f8..00000000000
--- a/vendor/github.com/prometheus/client_golang/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
deleted file mode 100644
index dd878a30ee9..00000000000
--- a/vendor/github.com/prometheus/client_golang/NOTICE
+++ /dev/null
@@ -1,23 +0,0 @@
-Prometheus instrumentation library for Go applications
-Copyright 2012-2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
-
-
-The following components are included in this product:
-
-perks - a fork of https://github.com/bmizerany/perks
-https://github.com/beorn7/perks
-Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
-See https://github.com/beorn7/perks/blob/master/README.md for license details.
-
-Go support for Protocol Buffers - Google's data interchange format
-http://github.com/golang/protobuf/
-Copyright 2010 The Go Authors
-See source code for license details.
-
-Support for streaming Protocol Buffer messages for the Go language (golang).
-https://github.com/matttproud/golang_protobuf_extensions
-Copyright 2013 Matt T. Proud
-Licensed under the Apache License, Version 2.0
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
deleted file mode 100644
index 3460f0346d9..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-command-line-arguments.test
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
deleted file mode 100644
index 44986bff06b..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/README.md
+++ /dev/null
@@ -1 +0,0 @@
-See [](https://godoc.org/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go
deleted file mode 100644
index 288f0e85488..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build go1.12
-
-package prometheus
-
-import "runtime/debug"
-
-// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+.
-func readBuildInfo() (path, version, sum string) {
- path, version, sum = "unknown", "unknown", "unknown"
- if bi, ok := debug.ReadBuildInfo(); ok {
- path = bi.Main.Path
- version = bi.Main.Version
- sum = bi.Main.Sum
- }
- return
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
deleted file mode 100644
index 6609e2877c0..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !go1.12
-
-package prometheus
-
-// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before
-// 1.12. Remove this whole file once the minimum supported Go version is 1.12.
-func readBuildInfo() (path, version, sum string) {
- return "unknown", "unknown", "unknown"
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
deleted file mode 100644
index 1e839650d4d..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-// Collector is the interface implemented by anything that can be used by
-// Prometheus to collect metrics. A Collector has to be registered for
-// collection. See Registerer.Register.
-//
-// The stock metrics provided by this package (Gauge, Counter, Summary,
-// Histogram, Untyped) are also Collectors (which only ever collect one metric,
-// namely itself). An implementer of Collector may, however, collect multiple
-// metrics in a coordinated fashion and/or create metrics on the fly. Examples
-// for collectors already implemented in this library are the metric vectors
-// (i.e. collection of multiple instances of the same Metric but with different
-// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
-type Collector interface {
- // Describe sends the super-set of all possible descriptors of metrics
- // collected by this Collector to the provided channel and returns once
- // the last descriptor has been sent. The sent descriptors fulfill the
- // consistency and uniqueness requirements described in the Desc
- // documentation.
- //
- // It is valid if one and the same Collector sends duplicate
- // descriptors. Those duplicates are simply ignored. However, two
- // different Collectors must not send duplicate descriptors.
- //
- // Sending no descriptor at all marks the Collector as “unchecked”,
- // i.e. no checks will be performed at registration time, and the
- // Collector may yield any Metric it sees fit in its Collect method.
- //
- // This method idempotently sends the same descriptors throughout the
- // lifetime of the Collector. It may be called concurrently and
- // therefore must be implemented in a concurrency safe way.
- //
- // If a Collector encounters an error while executing this method, it
- // must send an invalid descriptor (created with NewInvalidDesc) to
- // signal the error to the registry.
- Describe(chan<- *Desc)
- // Collect is called by the Prometheus registry when collecting
- // metrics. The implementation sends each collected metric via the
- // provided channel and returns once the last metric has been sent. The
- // descriptor of each sent metric is one of those returned by Describe
- // (unless the Collector is unchecked, see above). Returned metrics that
- // share the same descriptor must differ in their variable label
- // values.
- //
- // This method may be called concurrently and must therefore be
- // implemented in a concurrency safe way. Blocking occurs at the expense
- // of total performance of rendering all registered metrics. Ideally,
- // Collector implementations support concurrent readers.
- Collect(chan<- Metric)
-}
-
-// DescribeByCollect is a helper to implement the Describe method of a custom
-// Collector. It collects the metrics from the provided Collector and sends
-// their descriptors to the provided channel.
-//
-// If a Collector collects the same metrics throughout its lifetime, its
-// Describe method can simply be implemented as:
-//
-// func (c customCollector) Describe(ch chan<- *Desc) {
-// DescribeByCollect(c, ch)
-// }
-//
-// However, this will not work if the metrics collected change dynamically over
-// the lifetime of the Collector in a way that their combined set of descriptors
-// changes as well. The shortcut implementation will then violate the contract
-// of the Describe method. If a Collector sometimes collects no metrics at all
-// (for example vectors like CounterVec, GaugeVec, etc., which only collect
-// metrics after a metric with a fully specified label set has been accessed),
-// it might even get registered as an unchecked Collector (cf. the Register
-// method of the Registerer interface). Hence, only use this shortcut
-// implementation of Describe if you are certain to fulfill the contract.
-//
-// The Collector example demonstrates a use of DescribeByCollect.
-func DescribeByCollect(c Collector, descs chan<- *Desc) {
- metrics := make(chan Metric)
- go func() {
- c.Collect(metrics)
- close(metrics)
- }()
- for m := range metrics {
- descs <- m.Desc()
- }
-}
-
-// selfCollector implements Collector for a single Metric so that the Metric
-// collects itself. Add it as an anonymous field to a struct that implements
-// Metric, and call init with the Metric itself as an argument.
-type selfCollector struct {
- self Metric
-}
-
-// init provides the selfCollector with a reference to the metric it is supposed
-// to collect. It is usually called within the factory function to create a
-// metric. See example.
-func (c *selfCollector) init(self Metric) {
- c.self = self
-}
-
-// Describe implements Collector.
-func (c *selfCollector) Describe(ch chan<- *Desc) {
- ch <- c.self.Desc()
-}
-
-// Collect implements Collector.
-func (c *selfCollector) Collect(ch chan<- Metric) {
- ch <- c.self
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
deleted file mode 100644
index 0e1b48c03f1..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ /dev/null
@@ -1,321 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
- "math"
- "sync/atomic"
- "time"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// Counter is a Metric that represents a single numerical value that only ever
-// goes up. That implies that it cannot be used to count items whose number can
-// also go down, e.g. the number of currently running goroutines. Those
-// "counters" are represented by Gauges.
-//
-// A Counter is typically used to count requests served, tasks completed, errors
-// occurred, etc.
-//
-// To create Counter instances, use NewCounter.
-type Counter interface {
- Metric
- Collector
-
- // Inc increments the counter by 1. Use Add to increment it by arbitrary
- // non-negative values.
- Inc()
- // Add adds the given value to the counter. It panics if the value is <
- // 0.
- Add(float64)
-}
-
-// ExemplarAdder is implemented by Counters that offer the option of adding a
-// value to the Counter together with an exemplar. Its AddWithExemplar method
-// works like the Add method of the Counter interface but also replaces the
-// currently saved exemplar (if any) with a new one, created from the provided
-// value, the current time as timestamp, and the provided labels. Empty Labels
-// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
-// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
-// of the provided labels are invalid, or if the provided labels contain more
-// than 64 runes in total.
-type ExemplarAdder interface {
- AddWithExemplar(value float64, exemplar Labels)
-}
-
-// CounterOpts is an alias for Opts. See there for doc comments.
-type CounterOpts Opts
-
-// NewCounter creates a new Counter based on the provided CounterOpts.
-//
-// The returned implementation also implements ExemplarAdder. It is safe to
-// perform the corresponding type assertion.
-//
-// The returned implementation tracks the counter value in two separate
-// variables, a float64 and a uint64. The latter is used to track calls of the
-// Inc method and calls of the Add method with a value that can be represented
-// as a uint64. This allows atomic increments of the counter with optimal
-// performance. (It is common to have an Inc call in very hot execution paths.)
-// Both internal tracking values are added up in the Write method. This has to
-// be taken into account when it comes to precision and overflow behavior.
-func NewCounter(opts CounterOpts) Counter {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- )
- result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
- result.init(result) // Init self-collection.
- return result
-}
-
-type counter struct {
- // valBits contains the bits of the represented float64 value, while
- // valInt stores values that are exact integers. Both have to go first
- // in the struct to guarantee alignment for atomic operations.
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- valBits uint64
- valInt uint64
-
- selfCollector
- desc *Desc
-
- labelPairs []*dto.LabelPair
- exemplar atomic.Value // Containing nil or a *dto.Exemplar.
-
- now func() time.Time // To mock out time.Now() for testing.
-}
-
-func (c *counter) Desc() *Desc {
- return c.desc
-}
-
-func (c *counter) Add(v float64) {
- if v < 0 {
- panic(errors.New("counter cannot decrease in value"))
- }
-
- ival := uint64(v)
- if float64(ival) == v {
- atomic.AddUint64(&c.valInt, ival)
- return
- }
-
- for {
- oldBits := atomic.LoadUint64(&c.valBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
- if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
- return
- }
- }
-}
-
-func (c *counter) AddWithExemplar(v float64, e Labels) {
- c.Add(v)
- c.updateExemplar(v, e)
-}
-
-func (c *counter) Inc() {
- atomic.AddUint64(&c.valInt, 1)
-}
-
-func (c *counter) Write(out *dto.Metric) error {
- fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
- ival := atomic.LoadUint64(&c.valInt)
- val := fval + float64(ival)
-
- var exemplar *dto.Exemplar
- if e := c.exemplar.Load(); e != nil {
- exemplar = e.(*dto.Exemplar)
- }
-
- return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
-}
-
-func (c *counter) updateExemplar(v float64, l Labels) {
- if l == nil {
- return
- }
- e, err := newExemplar(v, c.now(), l)
- if err != nil {
- panic(err)
- }
- c.exemplar.Store(e)
-}
-
-// CounterVec is a Collector that bundles a set of Counters that all share the
-// same Desc, but have different values for their variable labels. This is used
-// if you want to count the same thing partitioned by various dimensions
-// (e.g. number of HTTP requests, partitioned by response code and
-// method). Create instances with NewCounterVec.
-type CounterVec struct {
- *metricVec
-}
-
-// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
-// partitioned by the given label names.
-func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- labelNames,
- opts.ConstLabels,
- )
- return &CounterVec{
- metricVec: newMetricVec(desc, func(lvs ...string) Metric {
- if len(lvs) != len(desc.variableLabels) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
- }
- result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now}
- result.init(result) // Init self-collection.
- return result
- }),
- }
-}
-
-// GetMetricWithLabelValues returns the Counter for the given slice of label
-// values (same order as the VariableLabels in Desc). If that combination of
-// label values is accessed for the first time, a new Counter is created.
-//
-// It is possible to call this method without using the returned Counter to only
-// create the new Counter but leave it at its starting value 0. See also the
-// SummaryVec example.
-//
-// Keeping the Counter for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Counter from the CounterVec. In that case,
-// the Counter will still exist, but it will not be exported anymore, even if a
-// Counter with the same label values is created later.
-//
-// An error is returned if the number of label values is not the same as the
-// number of VariableLabels in Desc (minus any curried labels).
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the GaugeVec example.
-func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
- metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Counter), err
- }
- return nil, err
-}
-
-// GetMetricWith returns the Counter for the given Labels map (the label names
-// must match those of the VariableLabels in Desc). If that label map is
-// accessed for the first time, a new Counter is created. Implications of
-// creating a Counter without using it and keeping the Counter for later use are
-// the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the VariableLabels in Desc (minus any curried labels).
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
- metric, err := v.metricVec.getMetricWith(labels)
- if metric != nil {
- return metric.(Counter), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. Not returning an
-// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Add(42)
-func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
- c, err := v.GetMetricWithLabelValues(lvs...)
- if err != nil {
- panic(err)
- }
- return c
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
-func (v *CounterVec) With(labels Labels) Counter {
- c, err := v.GetMetricWith(labels)
- if err != nil {
- panic(err)
- }
- return c
-}
-
-// CurryWith returns a vector curried with the provided labels, i.e. the
-// returned vector has those labels pre-set for all labeled operations performed
-// on it. The cardinality of the curried vector is reduced accordingly. The
-// order of the remaining labels stays the same (just with the curried labels
-// taken out of the sequence – which is relevant for the
-// (GetMetric)WithLabelValues methods). It is possible to curry a curried
-// vector, but only with labels not yet used for currying before.
-//
-// The metrics contained in the CounterVec are shared between the curried and
-// uncurried vectors. They are just accessed differently. Curried and uncurried
-// vectors behave identically in terms of collection. Only one must be
-// registered with a given registry (usually the uncurried version). The Reset
-// method deletes all metrics, even if called on a curried vector.
-func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
- vec, err := v.curryWith(labels)
- if vec != nil {
- return &CounterVec{vec}, err
- }
- return nil, err
-}
-
-// MustCurryWith works as CurryWith but panics where CurryWith would have
-// returned an error.
-func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
- vec, err := v.CurryWith(labels)
- if err != nil {
- panic(err)
- }
- return vec
-}
-
-// CounterFunc is a Counter whose value is determined at collect time by calling a
-// provided function.
-//
-// To create CounterFunc instances, use NewCounterFunc.
-type CounterFunc interface {
- Metric
- Collector
-}
-
-// NewCounterFunc creates a new CounterFunc based on the provided
-// CounterOpts. The value reported is determined by calling the given function
-// from within the Write method. Take into account that metric collection may
-// happen concurrently. If that results in concurrent calls to Write, like in
-// the case where a CounterFunc is directly registered with Prometheus, the
-// provided function must be concurrency-safe. The function should also honor
-// the contract for a Counter (values only go up, not down), but compliance will
-// not be checked.
-//
-// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
-func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
- return newValueFunc(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), CounterValue, function)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
deleted file mode 100644
index 2f19f5e1e7e..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2016 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
- "fmt"
- "sort"
- "strings"
-
- "github.com/cespare/xxhash/v2"
- //lint:ignore SA1019 Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/model"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// Desc is the descriptor used by every Prometheus Metric. It is essentially
-// the immutable meta-data of a Metric. The normal Metric implementations
-// included in this package manage their Desc under the hood. Users only have to
-// deal with Desc if they use advanced features like the ExpvarCollector or
-// custom Collectors and Metrics.
-//
-// Descriptors registered with the same registry have to fulfill certain
-// consistency and uniqueness criteria if they share the same fully-qualified
-// name: They must have the same help string and the same label names (aka label
-// dimensions) in each, constLabels and variableLabels, but they must differ in
-// the values of the constLabels.
-//
-// Descriptors that share the same fully-qualified names and the same label
-// values of their constLabels are considered equal.
-//
-// Use NewDesc to create new Desc instances.
-type Desc struct {
- // fqName has been built from Namespace, Subsystem, and Name.
- fqName string
- // help provides some helpful information about this metric.
- help string
- // constLabelPairs contains precalculated DTO label pairs based on
- // the constant labels.
- constLabelPairs []*dto.LabelPair
- // VariableLabels contains names of labels for which the metric
- // maintains variable values.
- variableLabels []string
- // id is a hash of the values of the ConstLabels and fqName. This
- // must be unique among all registered descriptors and can therefore be
- // used as an identifier of the descriptor.
- id uint64
- // dimHash is a hash of the label names (preset and variable) and the
- // Help string. Each Desc with the same fqName must have the same
- // dimHash.
- dimHash uint64
- // err is an error that occurred during construction. It is reported on
- // registration time.
- err error
-}
-
-// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
-// and will be reported on registration time. variableLabels and constLabels can
-// be nil if no such labels should be set. fqName must not be empty.
-//
-// variableLabels only contain the label names. Their label values are variable
-// and therefore not part of the Desc. (They are managed within the Metric.)
-//
-// For constLabels, the label values are constant. Therefore, they are fully
-// specified in the Desc. See the Collector example for a usage pattern.
-func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
- d := &Desc{
- fqName: fqName,
- help: help,
- variableLabels: variableLabels,
- }
- if !model.IsValidMetricName(model.LabelValue(fqName)) {
- d.err = fmt.Errorf("%q is not a valid metric name", fqName)
- return d
- }
- // labelValues contains the label values of const labels (in order of
- // their sorted label names) plus the fqName (at position 0).
- labelValues := make([]string, 1, len(constLabels)+1)
- labelValues[0] = fqName
- labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
- labelNameSet := map[string]struct{}{}
- // First add only the const label names and sort them...
- for labelName := range constLabels {
- if !checkLabelName(labelName) {
- d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
- return d
- }
- labelNames = append(labelNames, labelName)
- labelNameSet[labelName] = struct{}{}
- }
- sort.Strings(labelNames)
- // ... so that we can now add const label values in the order of their names.
- for _, labelName := range labelNames {
- labelValues = append(labelValues, constLabels[labelName])
- }
- // Validate the const label values. They can't have a wrong cardinality, so
- // use in len(labelValues) as expectedNumberOfValues.
- if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
- d.err = err
- return d
- }
- // Now add the variable label names, but prefix them with something that
- // cannot be in a regular label name. That prevents matching the label
- // dimension with a different mix between preset and variable labels.
- for _, labelName := range variableLabels {
- if !checkLabelName(labelName) {
- d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
- return d
- }
- labelNames = append(labelNames, "$"+labelName)
- labelNameSet[labelName] = struct{}{}
- }
- if len(labelNames) != len(labelNameSet) {
- d.err = errors.New("duplicate label names")
- return d
- }
-
- xxh := xxhash.New()
- for _, val := range labelValues {
- xxh.WriteString(val)
- xxh.Write(separatorByteSlice)
- }
- d.id = xxh.Sum64()
- // Sort labelNames so that order doesn't matter for the hash.
- sort.Strings(labelNames)
- // Now hash together (in this order) the help string and the sorted
- // label names.
- xxh.Reset()
- xxh.WriteString(help)
- xxh.Write(separatorByteSlice)
- for _, labelName := range labelNames {
- xxh.WriteString(labelName)
- xxh.Write(separatorByteSlice)
- }
- d.dimHash = xxh.Sum64()
-
- d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
- for n, v := range constLabels {
- d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
- Name: proto.String(n),
- Value: proto.String(v),
- })
- }
- sort.Sort(labelPairSorter(d.constLabelPairs))
- return d
-}
-
-// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
-// provided error set. If a collector returning such a descriptor is registered,
-// registration will fail with the provided error. NewInvalidDesc can be used by
-// a Collector to signal inability to describe itself.
-func NewInvalidDesc(err error) *Desc {
- return &Desc{
- err: err,
- }
-}
-
-func (d *Desc) String() string {
- lpStrings := make([]string, 0, len(d.constLabelPairs))
- for _, lp := range d.constLabelPairs {
- lpStrings = append(
- lpStrings,
- fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
- )
- }
- return fmt.Sprintf(
- "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
- d.fqName,
- d.help,
- strings.Join(lpStrings, ","),
- d.variableLabels,
- )
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
deleted file mode 100644
index 98450125d6a..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package prometheus is the core instrumentation package. It provides metrics
-// primitives to instrument code for monitoring. It also offers a registry for
-// metrics. Sub-packages allow to expose the registered metrics via HTTP
-// (package promhttp) or push them to a Pushgateway (package push). There is
-// also a sub-package promauto, which provides metrics constructors with
-// automatic registration.
-//
-// All exported functions and methods are safe to be used concurrently unless
-// specified otherwise.
-//
-// A Basic Example
-//
-// As a starting point, a very basic usage example:
-//
-// package main
-//
-// import (
-// "log"
-// "net/http"
-//
-// "github.com/prometheus/client_golang/prometheus"
-// "github.com/prometheus/client_golang/prometheus/promhttp"
-// )
-//
-// var (
-// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
-// Name: "cpu_temperature_celsius",
-// Help: "Current temperature of the CPU.",
-// })
-// hdFailures = prometheus.NewCounterVec(
-// prometheus.CounterOpts{
-// Name: "hd_errors_total",
-// Help: "Number of hard-disk errors.",
-// },
-// []string{"device"},
-// )
-// )
-//
-// func init() {
-// // Metrics have to be registered to be exposed:
-// prometheus.MustRegister(cpuTemp)
-// prometheus.MustRegister(hdFailures)
-// }
-//
-// func main() {
-// cpuTemp.Set(65.3)
-// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
-//
-// // The Handler function provides a default handler to expose metrics
-// // via an HTTP server. "/metrics" is the usual endpoint for that.
-// http.Handle("/metrics", promhttp.Handler())
-// log.Fatal(http.ListenAndServe(":8080", nil))
-// }
-//
-//
-// This is a complete program that exports two metrics, a Gauge and a Counter,
-// the latter with a label attached to turn it into a (one-dimensional) vector.
-//
-// Metrics
-//
-// The number of exported identifiers in this package might appear a bit
-// overwhelming. However, in addition to the basic plumbing shown in the example
-// above, you only need to understand the different metric types and their
-// vector versions for basic usage. Furthermore, if you are not concerned with
-// fine-grained control of when and how to register metrics with the registry,
-// have a look at the promauto package, which will effectively allow you to
-// ignore registration altogether in simple cases.
-//
-// Above, you have already touched the Counter and the Gauge. There are two more
-// advanced metric types: the Summary and Histogram. A more thorough description
-// of those four metric types can be found in the Prometheus docs:
-// https://prometheus.io/docs/concepts/metric_types/
-//
-// In addition to the fundamental metric types Gauge, Counter, Summary, and
-// Histogram, a very important part of the Prometheus data model is the
-// partitioning of samples along dimensions called labels, which results in
-// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
-// and HistogramVec.
-//
-// While only the fundamental metric types implement the Metric interface, both
-// the metrics and their vector versions implement the Collector interface. A
-// Collector manages the collection of a number of Metrics, but for convenience,
-// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
-// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
-// and HistogramVec are not.
-//
-// To create instances of Metrics and their vector versions, you need a suitable
-// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
-//
-// Custom Collectors and constant Metrics
-//
-// While you could create your own implementations of Metric, most likely you
-// will only ever implement the Collector interface on your own. At a first
-// glance, a custom Collector seems handy to bundle Metrics for common
-// registration (with the prime example of the different metric vectors above,
-// which bundle all the metrics of the same name but with different labels).
-//
-// There is a more involved use case, too: If you already have metrics
-// available, created outside of the Prometheus context, you don't need the
-// interface of the various Metric types. You essentially want to mirror the
-// existing numbers into Prometheus Metrics during collection. An own
-// implementation of the Collector interface is perfect for that. You can create
-// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
-// NewConstSummary (and their respective Must… versions). NewConstMetric is used
-// for all metric types with just a float64 as their value: Counter, Gauge, and
-// a special “type” called Untyped. Use the latter if you are not sure if the
-// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
-// happens in the Collect method. The Describe method has to return separate
-// Desc instances, representative of the “throw-away” metrics to be created
-// later. NewDesc comes in handy to create those Desc instances. Alternatively,
-// you could return no Desc at all, which will mark the Collector “unchecked”.
-// No checks are performed at registration time, but metric consistency will
-// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
-// errors. Thus, with unchecked Collectors, the responsibility to not collect
-// metrics that lead to inconsistencies in the total scrape result lies with the
-// implementer of the Collector. While this is not a desirable state, it is
-// sometimes necessary. The typical use case is a situation where the exact
-// metrics to be returned by a Collector cannot be predicted at registration
-// time, but the implementer has sufficient knowledge of the whole system to
-// guarantee metric consistency.
-//
-// The Collector example illustrates the use case. You can also look at the
-// source code of the processCollector (mirroring process metrics), the
-// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
-// metrics) as examples that are used in this package itself.
-//
-// If you just need to call a function to get a single float value to collect as
-// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
-// shortcuts.
-//
-// Advanced Uses of the Registry
-//
-// While MustRegister is the by far most common way of registering a Collector,
-// sometimes you might want to handle the errors the registration might cause.
-// As suggested by the name, MustRegister panics if an error occurs. With the
-// Register function, the error is returned and can be handled.
-//
-// An error is returned if the registered Collector is incompatible or
-// inconsistent with already registered metrics. The registry aims for
-// consistency of the collected metrics according to the Prometheus data model.
-// Inconsistencies are ideally detected at registration time, not at collect
-// time. The former will usually be detected at start-up time of a program,
-// while the latter will only happen at scrape time, possibly not even on the
-// first scrape if the inconsistency only becomes relevant later. That is the
-// main reason why a Collector and a Metric have to describe themselves to the
-// registry.
-//
-// So far, everything we did operated on the so-called default registry, as it
-// can be found in the global DefaultRegisterer variable. With NewRegistry, you
-// can create a custom registry, or you can even implement the Registerer or
-// Gatherer interfaces yourself. The methods Register and Unregister work in the
-// same way on a custom registry as the global functions Register and Unregister
-// on the default registry.
-//
-// There are a number of uses for custom registries: You can use registries with
-// special properties, see NewPedanticRegistry. You can avoid global state, as
-// it is imposed by the DefaultRegisterer. You can use multiple registries at
-// the same time to expose different metrics in different ways. You can use
-// separate registries for testing purposes.
-//
-// Also note that the DefaultRegisterer comes registered with a Collector for Go
-// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
-// NewProcessCollector). With a custom registry, you are in control and decide
-// yourself about the Collectors to register.
-//
-// HTTP Exposition
-//
-// The Registry implements the Gatherer interface. The caller of the Gather
-// method can then expose the gathered metrics in some way. Usually, the metrics
-// are served via HTTP on the /metrics endpoint. That's happening in the example
-// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
-//
-// Pushing to the Pushgateway
-//
-// Function for pushing to the Pushgateway can be found in the push sub-package.
-//
-// Graphite Bridge
-//
-// Functions and examples to push metrics from a Gatherer to Graphite can be
-// found in the graphite sub-package.
-//
-// Other Means of Exposition
-//
-// More ways of exposing metrics can easily be added by following the approaches
-// of the existing implementations.
-package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
deleted file mode 100644
index 18a99d5faaa..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "encoding/json"
- "expvar"
-)
-
-type expvarCollector struct {
- exports map[string]*Desc
-}
-
-// NewExpvarCollector returns a newly allocated expvar Collector that still has
-// to be registered with a Prometheus registry.
-//
-// An expvar Collector collects metrics from the expvar interface. It provides a
-// quick way to expose numeric values that are already exported via expvar as
-// Prometheus metrics. Note that the data models of expvar and Prometheus are
-// fundamentally different, and that the expvar Collector is inherently slower
-// than native Prometheus metrics. Thus, the expvar Collector is probably great
-// for experiments and prototying, but you should seriously consider a more
-// direct implementation of Prometheus metrics for monitoring production
-// systems.
-//
-// The exports map has the following meaning:
-//
-// The keys in the map correspond to expvar keys, i.e. for every expvar key you
-// want to export as Prometheus metric, you need an entry in the exports
-// map. The descriptor mapped to each key describes how to export the expvar
-// value. It defines the name and the help string of the Prometheus metric
-// proxying the expvar value. The type will always be Untyped.
-//
-// For descriptors without variable labels, the expvar value must be a number or
-// a bool. The number is then directly exported as the Prometheus sample
-// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
-// that are not numbers or bools are silently ignored.
-//
-// If the descriptor has one variable label, the expvar value must be an expvar
-// map. The keys in the expvar map become the various values of the one
-// Prometheus label. The values in the expvar map must be numbers or bools again
-// as above.
-//
-// For descriptors with more than one variable label, the expvar must be a
-// nested expvar map, i.e. where the values of the topmost map are maps again
-// etc. until a depth is reached that corresponds to the number of labels. The
-// leaves of that structure must be numbers or bools as above to serve as the
-// sample values.
-//
-// Anything that does not fit into the scheme above is silently ignored.
-func NewExpvarCollector(exports map[string]*Desc) Collector {
- return &expvarCollector{
- exports: exports,
- }
-}
-
-// Describe implements Collector.
-func (e *expvarCollector) Describe(ch chan<- *Desc) {
- for _, desc := range e.exports {
- ch <- desc
- }
-}
-
-// Collect implements Collector.
-func (e *expvarCollector) Collect(ch chan<- Metric) {
- for name, desc := range e.exports {
- var m Metric
- expVar := expvar.Get(name)
- if expVar == nil {
- continue
- }
- var v interface{}
- labels := make([]string, len(desc.variableLabels))
- if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
- ch <- NewInvalidMetric(desc, err)
- continue
- }
- var processValue func(v interface{}, i int)
- processValue = func(v interface{}, i int) {
- if i >= len(labels) {
- copiedLabels := append(make([]string, 0, len(labels)), labels...)
- switch v := v.(type) {
- case float64:
- m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
- case bool:
- if v {
- m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
- } else {
- m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
- }
- default:
- return
- }
- ch <- m
- return
- }
- vm, ok := v.(map[string]interface{})
- if !ok {
- return
- }
- for lv, val := range vm {
- labels[i] = lv
- processValue(val, i+1)
- }
- }
- processValue(v, 0)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
deleted file mode 100644
index 3d383a735c3..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-// Inline and byte-free variant of hash/fnv's fnv64a.
-
-const (
- offset64 = 14695981039346656037
- prime64 = 1099511628211
-)
-
-// hashNew initializies a new fnv64a hash value.
-func hashNew() uint64 {
- return offset64
-}
-
-// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
-func hashAdd(h uint64, s string) uint64 {
- for i := 0; i < len(s); i++ {
- h ^= uint64(s[i])
- h *= prime64
- }
- return h
-}
-
-// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
-func hashAddByte(h uint64, b byte) uint64 {
- h ^= uint64(b)
- h *= prime64
- return h
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
deleted file mode 100644
index d67573f767a..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ /dev/null
@@ -1,289 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "math"
- "sync/atomic"
- "time"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// Gauge is a Metric that represents a single numerical value that can
-// arbitrarily go up and down.
-//
-// A Gauge is typically used for measured values like temperatures or current
-// memory usage, but also "counts" that can go up and down, like the number of
-// running goroutines.
-//
-// To create Gauge instances, use NewGauge.
-type Gauge interface {
- Metric
- Collector
-
- // Set sets the Gauge to an arbitrary value.
- Set(float64)
- // Inc increments the Gauge by 1. Use Add to increment it by arbitrary
- // values.
- Inc()
- // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
- // values.
- Dec()
- // Add adds the given value to the Gauge. (The value can be negative,
- // resulting in a decrease of the Gauge.)
- Add(float64)
- // Sub subtracts the given value from the Gauge. (The value can be
- // negative, resulting in an increase of the Gauge.)
- Sub(float64)
-
- // SetToCurrentTime sets the Gauge to the current Unix time in seconds.
- SetToCurrentTime()
-}
-
-// GaugeOpts is an alias for Opts. See there for doc comments.
-type GaugeOpts Opts
-
-// NewGauge creates a new Gauge based on the provided GaugeOpts.
-//
-// The returned implementation is optimized for a fast Set method. If you have a
-// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
-// the former. For example, the Inc method of the returned Gauge is slower than
-// the Inc method of a Counter returned by NewCounter. This matches the typical
-// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
-// the latter Inc-heavy.
-func NewGauge(opts GaugeOpts) Gauge {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- )
- result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
- result.init(result) // Init self-collection.
- return result
-}
-
-type gauge struct {
- // valBits contains the bits of the represented float64 value. It has
- // to go first in the struct to guarantee alignment for atomic
- // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- valBits uint64
-
- selfCollector
-
- desc *Desc
- labelPairs []*dto.LabelPair
-}
-
-func (g *gauge) Desc() *Desc {
- return g.desc
-}
-
-func (g *gauge) Set(val float64) {
- atomic.StoreUint64(&g.valBits, math.Float64bits(val))
-}
-
-func (g *gauge) SetToCurrentTime() {
- g.Set(float64(time.Now().UnixNano()) / 1e9)
-}
-
-func (g *gauge) Inc() {
- g.Add(1)
-}
-
-func (g *gauge) Dec() {
- g.Add(-1)
-}
-
-func (g *gauge) Add(val float64) {
- for {
- oldBits := atomic.LoadUint64(&g.valBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
- if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
- return
- }
- }
-}
-
-func (g *gauge) Sub(val float64) {
- g.Add(val * -1)
-}
-
-func (g *gauge) Write(out *dto.Metric) error {
- val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
- return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
-}
-
-// GaugeVec is a Collector that bundles a set of Gauges that all share the same
-// Desc, but have different values for their variable labels. This is used if
-// you want to count the same thing partitioned by various dimensions
-// (e.g. number of operations queued, partitioned by user and operation
-// type). Create instances with NewGaugeVec.
-type GaugeVec struct {
- *metricVec
-}
-
-// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
-// partitioned by the given label names.
-func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- labelNames,
- opts.ConstLabels,
- )
- return &GaugeVec{
- metricVec: newMetricVec(desc, func(lvs ...string) Metric {
- if len(lvs) != len(desc.variableLabels) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
- }
- result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
- result.init(result) // Init self-collection.
- return result
- }),
- }
-}
-
-// GetMetricWithLabelValues returns the Gauge for the given slice of label
-// values (same order as the VariableLabels in Desc). If that combination of
-// label values is accessed for the first time, a new Gauge is created.
-//
-// It is possible to call this method without using the returned Gauge to only
-// create the new Gauge but leave it at its starting value 0. See also the
-// SummaryVec example.
-//
-// Keeping the Gauge for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
-// Gauge will still exist, but it will not be exported anymore, even if a
-// Gauge with the same label values is created later. See also the CounterVec
-// example.
-//
-// An error is returned if the number of label values is not the same as the
-// number of VariableLabels in Desc (minus any curried labels).
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
- metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Gauge), err
- }
- return nil, err
-}
-
-// GetMetricWith returns the Gauge for the given Labels map (the label names
-// must match those of the VariableLabels in Desc). If that label map is
-// accessed for the first time, a new Gauge is created. Implications of
-// creating a Gauge without using it and keeping the Gauge for later use are
-// the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the VariableLabels in Desc (minus any curried labels).
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
- metric, err := v.metricVec.getMetricWith(labels)
- if metric != nil {
- return metric.(Gauge), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. Not returning an
-// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Add(42)
-func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
- g, err := v.GetMetricWithLabelValues(lvs...)
- if err != nil {
- panic(err)
- }
- return g
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
-func (v *GaugeVec) With(labels Labels) Gauge {
- g, err := v.GetMetricWith(labels)
- if err != nil {
- panic(err)
- }
- return g
-}
-
-// CurryWith returns a vector curried with the provided labels, i.e. the
-// returned vector has those labels pre-set for all labeled operations performed
-// on it. The cardinality of the curried vector is reduced accordingly. The
-// order of the remaining labels stays the same (just with the curried labels
-// taken out of the sequence – which is relevant for the
-// (GetMetric)WithLabelValues methods). It is possible to curry a curried
-// vector, but only with labels not yet used for currying before.
-//
-// The metrics contained in the GaugeVec are shared between the curried and
-// uncurried vectors. They are just accessed differently. Curried and uncurried
-// vectors behave identically in terms of collection. Only one must be
-// registered with a given registry (usually the uncurried version). The Reset
-// method deletes all metrics, even if called on a curried vector.
-func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
- vec, err := v.curryWith(labels)
- if vec != nil {
- return &GaugeVec{vec}, err
- }
- return nil, err
-}
-
-// MustCurryWith works as CurryWith but panics where CurryWith would have
-// returned an error.
-func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
- vec, err := v.CurryWith(labels)
- if err != nil {
- panic(err)
- }
- return vec
-}
-
-// GaugeFunc is a Gauge whose value is determined at collect time by calling a
-// provided function.
-//
-// To create GaugeFunc instances, use NewGaugeFunc.
-type GaugeFunc interface {
- Metric
- Collector
-}
-
-// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
-// value reported is determined by calling the given function from within the
-// Write method. Take into account that metric collection may happen
-// concurrently. Therefore, it must be safe to call the provided function
-// concurrently.
-//
-// NewGaugeFunc is a good way to create an “info” style metric with a constant
-// value of 1. Example:
-// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56
-func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
- return newValueFunc(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), GaugeValue, function)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
deleted file mode 100644
index ea05cf429f2..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ /dev/null
@@ -1,396 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "runtime"
- "runtime/debug"
- "sync"
- "time"
-)
-
-type goCollector struct {
- goroutinesDesc *Desc
- threadsDesc *Desc
- gcDesc *Desc
- goInfoDesc *Desc
-
- // ms... are memstats related.
- msLast *runtime.MemStats // Previously collected memstats.
- msLastTimestamp time.Time
- msMtx sync.Mutex // Protects msLast and msLastTimestamp.
- msMetrics memStatsMetrics
- msRead func(*runtime.MemStats) // For mocking in tests.
- msMaxWait time.Duration // Wait time for fresh memstats.
- msMaxAge time.Duration // Maximum allowed age of old memstats.
-}
-
-// NewGoCollector returns a collector that exports metrics about the current Go
-// process. This includes memory stats. To collect those, runtime.ReadMemStats
-// is called. This requires to “stop the world”, which usually only happens for
-// garbage collection (GC). Take the following implications into account when
-// deciding whether to use the Go collector:
-//
-// 1. The performance impact of stopping the world is the more relevant the more
-// frequently metrics are collected. However, with Go1.9 or later the
-// stop-the-world time per metrics collection is very short (~25µs) so that the
-// performance impact will only matter in rare cases. However, with older Go
-// versions, the stop-the-world duration depends on the heap size and can be
-// quite significant (~1.7 ms/GiB as per
-// https://go-review.googlesource.com/c/go/+/34937).
-//
-// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the
-// metrics collection happens to coincide with GC, it will only complete after
-// GC has finished. Usually, GC is fast enough to not cause problems. However,
-// with a very large heap, GC might take multiple seconds, which is enough to
-// cause scrape timeouts in common setups. To avoid this problem, the Go
-// collector will use the memstats from a previous collection if
-// runtime.ReadMemStats takes more than 1s. However, if there are no previously
-// collected memstats, or their collection is more than 5m ago, the collection
-// will block until runtime.ReadMemStats succeeds. (The problem might be solved
-// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go
-// issue.)
-func NewGoCollector() Collector {
- return &goCollector{
- goroutinesDesc: NewDesc(
- "go_goroutines",
- "Number of goroutines that currently exist.",
- nil, nil),
- threadsDesc: NewDesc(
- "go_threads",
- "Number of OS threads created.",
- nil, nil),
- gcDesc: NewDesc(
- "go_gc_duration_seconds",
- "A summary of the pause duration of garbage collection cycles.",
- nil, nil),
- goInfoDesc: NewDesc(
- "go_info",
- "Information about the Go environment.",
- nil, Labels{"version": runtime.Version()}),
- msLast: &runtime.MemStats{},
- msRead: runtime.ReadMemStats,
- msMaxWait: time.Second,
- msMaxAge: 5 * time.Minute,
- msMetrics: memStatsMetrics{
- {
- desc: NewDesc(
- memstatNamespace("alloc_bytes"),
- "Number of bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("alloc_bytes_total"),
- "Total number of bytes allocated, even if freed.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("sys_bytes"),
- "Number of bytes obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("lookups_total"),
- "Total number of pointer lookups.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mallocs_total"),
- "Total number of mallocs.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("frees_total"),
- "Total number of frees.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_alloc_bytes"),
- "Number of heap bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_sys_bytes"),
- "Number of heap bytes obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_idle_bytes"),
- "Number of heap bytes waiting to be used.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_inuse_bytes"),
- "Number of heap bytes that are in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_released_bytes"),
- "Number of heap bytes released to OS.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_objects"),
- "Number of allocated objects.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_inuse_bytes"),
- "Number of bytes in use by the stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_sys_bytes"),
- "Number of bytes obtained from system for stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_inuse_bytes"),
- "Number of bytes in use by mspan structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_sys_bytes"),
- "Number of bytes used for mspan structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_inuse_bytes"),
- "Number of bytes in use by mcache structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_sys_bytes"),
- "Number of bytes used for mcache structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("buck_hash_sys_bytes"),
- "Number of bytes used by the profiling bucket hash table.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("gc_sys_bytes"),
- "Number of bytes used for garbage collection system metadata.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("other_sys_bytes"),
- "Number of bytes used for other system allocations.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("next_gc_bytes"),
- "Number of heap bytes when next garbage collection will take place.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("last_gc_time_seconds"),
- "Number of seconds since 1970 of last garbage collection.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("gc_cpu_fraction"),
- "The fraction of this program's available CPU time used by the GC since the program started.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
- valType: GaugeValue,
- },
- },
- }
-}
-
-func memstatNamespace(s string) string {
- return "go_memstats_" + s
-}
-
-// Describe returns all descriptions of the collector.
-func (c *goCollector) Describe(ch chan<- *Desc) {
- ch <- c.goroutinesDesc
- ch <- c.threadsDesc
- ch <- c.gcDesc
- ch <- c.goInfoDesc
- for _, i := range c.msMetrics {
- ch <- i.desc
- }
-}
-
-// Collect returns the current state of all metrics of the collector.
-func (c *goCollector) Collect(ch chan<- Metric) {
- var (
- ms = &runtime.MemStats{}
- done = make(chan struct{})
- )
- // Start reading memstats first as it might take a while.
- go func() {
- c.msRead(ms)
- c.msMtx.Lock()
- c.msLast = ms
- c.msLastTimestamp = time.Now()
- c.msMtx.Unlock()
- close(done)
- }()
-
- ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
- n, _ := runtime.ThreadCreateProfile(nil)
- ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
-
- var stats debug.GCStats
- stats.PauseQuantiles = make([]time.Duration, 5)
- debug.ReadGCStats(&stats)
-
- quantiles := make(map[float64]float64)
- for idx, pq := range stats.PauseQuantiles[1:] {
- quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
- }
- quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
- ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
-
- ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
-
- timer := time.NewTimer(c.msMaxWait)
- select {
- case <-done: // Our own ReadMemStats succeeded in time. Use it.
- timer.Stop() // Important for high collection frequencies to not pile up timers.
- c.msCollect(ch, ms)
- return
- case <-timer.C: // Time out, use last memstats if possible. Continue below.
- }
- c.msMtx.Lock()
- if time.Since(c.msLastTimestamp) < c.msMaxAge {
- // Last memstats are recent enough. Collect from them under the lock.
- c.msCollect(ch, c.msLast)
- c.msMtx.Unlock()
- return
- }
- // If we are here, the last memstats are too old or don't exist. We have
- // to wait until our own ReadMemStats finally completes. For that to
- // happen, we have to release the lock.
- c.msMtx.Unlock()
- <-done
- c.msCollect(ch, ms)
-}
-
-func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
- for _, i := range c.msMetrics {
- ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
- }
-}
-
-// memStatsMetrics provide description, value, and value type for memstat metrics.
-type memStatsMetrics []struct {
- desc *Desc
- eval func(*runtime.MemStats) float64
- valType ValueType
-}
-
-// NewBuildInfoCollector returns a collector collecting a single metric
-// "go_build_info" with the constant value 1 and three labels "path", "version",
-// and "checksum". Their label values contain the main module path, version, and
-// checksum, respectively. The labels will only have meaningful values if the
-// binary is built with Go module support and from source code retrieved from
-// the source repository (rather than the local file system). This is usually
-// accomplished by building from outside of GOPATH, specifying the full address
-// of the main package, e.g. "GO111MODULE=on go run
-// github.com/prometheus/client_golang/examples/random". If built without Go
-// module support, all label values will be "unknown". If built with Go module
-// support but using the source code from the local file system, the "path" will
-// be set appropriately, but "checksum" will be empty and "version" will be
-// "(devel)".
-//
-// This collector uses only the build information for the main module. See
-// https://github.com/povilasv/prommod for an example of a collector for the
-// module dependencies.
-func NewBuildInfoCollector() Collector {
- path, version, sum := readBuildInfo()
- c := &selfCollector{MustNewConstMetric(
- NewDesc(
- "go_build_info",
- "Build information about the main Go module.",
- nil, Labels{"path": path, "version": version, "checksum": sum},
- ),
- GaugeValue, 1)}
- c.init(c.self)
- return c
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
deleted file mode 100644
index d4ea301a33c..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ /dev/null
@@ -1,637 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "math"
- "runtime"
- "sort"
- "sync"
- "sync/atomic"
- "time"
-
- //lint:ignore SA1019 Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// A Histogram counts individual observations from an event or sample stream in
-// configurable buckets. Similar to a summary, it also provides a sum of
-// observations and an observation count.
-//
-// On the Prometheus server, quantiles can be calculated from a Histogram using
-// the histogram_quantile function in the query language.
-//
-// Note that Histograms, in contrast to Summaries, can be aggregated with the
-// Prometheus query language (see the documentation for detailed
-// procedures). However, Histograms require the user to pre-define suitable
-// buckets, and they are in general less accurate. The Observe method of a
-// Histogram has a very low performance overhead in comparison with the Observe
-// method of a Summary.
-//
-// To create Histogram instances, use NewHistogram.
-type Histogram interface {
- Metric
- Collector
-
- // Observe adds a single observation to the histogram.
- Observe(float64)
-}
-
-// bucketLabel is used for the label that defines the upper bound of a
-// bucket of a histogram ("le" -> "less or equal").
-const bucketLabel = "le"
-
-// DefBuckets are the default Histogram buckets. The default buckets are
-// tailored to broadly measure the response time (in seconds) of a network
-// service. Most likely, however, you will be required to define buckets
-// customized to your use case.
-var (
- DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
-
- errBucketLabelNotAllowed = fmt.Errorf(
- "%q is not allowed as label name in histograms", bucketLabel,
- )
-)
-
-// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
-// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
-//
-// The function panics if 'count' is zero or negative.
-func LinearBuckets(start, width float64, count int) []float64 {
- if count < 1 {
- panic("LinearBuckets needs a positive count")
- }
- buckets := make([]float64, count)
- for i := range buckets {
- buckets[i] = start
- start += width
- }
- return buckets
-}
-
-// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
-// upper bound of 'start' and each following bucket's upper bound is 'factor'
-// times the previous bucket's upper bound. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
-//
-// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
-// or if 'factor' is less than or equal 1.
-func ExponentialBuckets(start, factor float64, count int) []float64 {
- if count < 1 {
- panic("ExponentialBuckets needs a positive count")
- }
- if start <= 0 {
- panic("ExponentialBuckets needs a positive start value")
- }
- if factor <= 1 {
- panic("ExponentialBuckets needs a factor greater than 1")
- }
- buckets := make([]float64, count)
- for i := range buckets {
- buckets[i] = start
- start *= factor
- }
- return buckets
-}
-
-// HistogramOpts bundles the options for creating a Histogram metric. It is
-// mandatory to set Name to a non-empty string. All other fields are optional
-// and can safely be left at their zero value, although it is strongly
-// encouraged to set a Help string.
-type HistogramOpts struct {
- // Namespace, Subsystem, and Name are components of the fully-qualified
- // name of the Histogram (created by joining these components with
- // "_"). Only Name is mandatory, the others merely help structuring the
- // name. Note that the fully-qualified name of the Histogram must be a
- // valid Prometheus metric name.
- Namespace string
- Subsystem string
- Name string
-
- // Help provides information about this Histogram.
- //
- // Metrics with the same fully-qualified name must have the same Help
- // string.
- Help string
-
- // ConstLabels are used to attach fixed labels to this metric. Metrics
- // with the same fully-qualified name must have the same label names in
- // their ConstLabels.
- //
- // ConstLabels are only used rarely. In particular, do not use them to
- // attach the same labels to all your metrics. Those use cases are
- // better covered by target labels set by the scraping Prometheus
- // server, or by one specific metric (e.g. a build_info or a
- // machine_role metric). See also
- // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
- ConstLabels Labels
-
- // Buckets defines the buckets into which observations are counted. Each
- // element in the slice is the upper inclusive bound of a bucket. The
- // values must be sorted in strictly increasing order. There is no need
- // to add a highest bucket with +Inf bound, it will be added
- // implicitly. The default value is DefBuckets.
- Buckets []float64
-}
-
-// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
-// panics if the buckets in HistogramOpts are not in strictly increasing order.
-//
-// The returned implementation also implements ExemplarObserver. It is safe to
-// perform the corresponding type assertion. Exemplars are tracked separately
-// for each bucket.
-func NewHistogram(opts HistogramOpts) Histogram {
- return newHistogram(
- NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ),
- opts,
- )
-}
-
-func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
- if len(desc.variableLabels) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
- }
-
- for _, n := range desc.variableLabels {
- if n == bucketLabel {
- panic(errBucketLabelNotAllowed)
- }
- }
- for _, lp := range desc.constLabelPairs {
- if lp.GetName() == bucketLabel {
- panic(errBucketLabelNotAllowed)
- }
- }
-
- if len(opts.Buckets) == 0 {
- opts.Buckets = DefBuckets
- }
-
- h := &histogram{
- desc: desc,
- upperBounds: opts.Buckets,
- labelPairs: makeLabelPairs(desc, labelValues),
- counts: [2]*histogramCounts{{}, {}},
- now: time.Now,
- }
- for i, upperBound := range h.upperBounds {
- if i < len(h.upperBounds)-1 {
- if upperBound >= h.upperBounds[i+1] {
- panic(fmt.Errorf(
- "histogram buckets must be in increasing order: %f >= %f",
- upperBound, h.upperBounds[i+1],
- ))
- }
- } else {
- if math.IsInf(upperBound, +1) {
- // The +Inf bucket is implicit. Remove it here.
- h.upperBounds = h.upperBounds[:i]
- }
- }
- }
- // Finally we know the final length of h.upperBounds and can make buckets
- // for both counts as well as exemplars:
- h.counts[0].buckets = make([]uint64, len(h.upperBounds))
- h.counts[1].buckets = make([]uint64, len(h.upperBounds))
- h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
-
- h.init(h) // Init self-collection.
- return h
-}
-
-type histogramCounts struct {
- // sumBits contains the bits of the float64 representing the sum of all
- // observations. sumBits and count have to go first in the struct to
- // guarantee alignment for atomic operations.
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- sumBits uint64
- count uint64
- buckets []uint64
-}
-
-type histogram struct {
- // countAndHotIdx enables lock-free writes with use of atomic updates.
- // The most significant bit is the hot index [0 or 1] of the count field
- // below. Observe calls update the hot one. All remaining bits count the
- // number of Observe calls. Observe starts by incrementing this counter,
- // and finish by incrementing the count field in the respective
- // histogramCounts, as a marker for completion.
- //
- // Calls of the Write method (which are non-mutating reads from the
- // perspective of the histogram) swap the hot–cold under the writeMtx
- // lock. A cooldown is awaited (while locked) by comparing the number of
- // observations with the initiation count. Once they match, then the
- // last observation on the now cool one has completed. All cool fields must
- // be merged into the new hot before releasing writeMtx.
- //
- // Fields with atomic access first! See alignment constraint:
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- countAndHotIdx uint64
-
- selfCollector
- desc *Desc
- writeMtx sync.Mutex // Only used in the Write method.
-
- // Two counts, one is "hot" for lock-free observations, the other is
- // "cold" for writing out a dto.Metric. It has to be an array of
- // pointers to guarantee 64bit alignment of the histogramCounts, see
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
- counts [2]*histogramCounts
-
- upperBounds []float64
- labelPairs []*dto.LabelPair
- exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
-
- now func() time.Time // To mock out time.Now() for testing.
-}
-
-func (h *histogram) Desc() *Desc {
- return h.desc
-}
-
-func (h *histogram) Observe(v float64) {
- h.observe(v, h.findBucket(v))
-}
-
-func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
- i := h.findBucket(v)
- h.observe(v, i)
- h.updateExemplar(v, i, e)
-}
-
-func (h *histogram) Write(out *dto.Metric) error {
- // For simplicity, we protect this whole method by a mutex. It is not in
- // the hot path, i.e. Observe is called much more often than Write. The
- // complication of making Write lock-free isn't worth it, if possible at
- // all.
- h.writeMtx.Lock()
- defer h.writeMtx.Unlock()
-
- // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
- // without touching the count bits. See the struct comments for a full
- // description of the algorithm.
- n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
- // count is contained unchanged in the lower 63 bits.
- count := n & ((1 << 63) - 1)
- // The most significant bit tells us which counts is hot. The complement
- // is thus the cold one.
- hotCounts := h.counts[n>>63]
- coldCounts := h.counts[(^n)>>63]
-
- // Await cooldown.
- for count != atomic.LoadUint64(&coldCounts.count) {
- runtime.Gosched() // Let observations get work done.
- }
-
- his := &dto.Histogram{
- Bucket: make([]*dto.Bucket, len(h.upperBounds)),
- SampleCount: proto.Uint64(count),
- SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
- }
- var cumCount uint64
- for i, upperBound := range h.upperBounds {
- cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
- his.Bucket[i] = &dto.Bucket{
- CumulativeCount: proto.Uint64(cumCount),
- UpperBound: proto.Float64(upperBound),
- }
- if e := h.exemplars[i].Load(); e != nil {
- his.Bucket[i].Exemplar = e.(*dto.Exemplar)
- }
- }
- // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
- if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
- b := &dto.Bucket{
- CumulativeCount: proto.Uint64(count),
- UpperBound: proto.Float64(math.Inf(1)),
- Exemplar: e.(*dto.Exemplar),
- }
- his.Bucket = append(his.Bucket, b)
- }
-
- out.Histogram = his
- out.Label = h.labelPairs
-
- // Finally add all the cold counts to the new hot counts and reset the cold counts.
- atomic.AddUint64(&hotCounts.count, count)
- atomic.StoreUint64(&coldCounts.count, 0)
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- atomic.StoreUint64(&coldCounts.sumBits, 0)
- break
- }
- }
- for i := range h.upperBounds {
- atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
- atomic.StoreUint64(&coldCounts.buckets[i], 0)
- }
- return nil
-}
-
-// findBucket returns the index of the bucket for the provided value, or
-// len(h.upperBounds) for the +Inf bucket.
-func (h *histogram) findBucket(v float64) int {
- // TODO(beorn7): For small numbers of buckets (<30), a linear search is
- // slightly faster than the binary search. If we really care, we could
- // switch from one search strategy to the other depending on the number
- // of buckets.
- //
- // Microbenchmarks (BenchmarkHistogramNoLabels):
- // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
- // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
- // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
- return sort.SearchFloat64s(h.upperBounds, v)
-}
-
-// observe is the implementation for Observe without the findBucket part.
-func (h *histogram) observe(v float64, bucket int) {
- // We increment h.countAndHotIdx so that the counter in the lower
- // 63 bits gets incremented. At the same time, we get the new value
- // back, which we can use to find the currently-hot counts.
- n := atomic.AddUint64(&h.countAndHotIdx, 1)
- hotCounts := h.counts[n>>63]
-
- if bucket < len(h.upperBounds) {
- atomic.AddUint64(&hotCounts.buckets[bucket], 1)
- }
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- break
- }
- }
- // Increment count last as we take it as a signal that the observation
- // is complete.
- atomic.AddUint64(&hotCounts.count, 1)
-}
-
-// updateExemplar replaces the exemplar for the provided bucket. With empty
-// labels, it's a no-op. It panics if any of the labels is invalid.
-func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
- if l == nil {
- return
- }
- e, err := newExemplar(v, h.now(), l)
- if err != nil {
- panic(err)
- }
- h.exemplars[bucket].Store(e)
-}
-
-// HistogramVec is a Collector that bundles a set of Histograms that all share the
-// same Desc, but have different values for their variable labels. This is used
-// if you want to count the same thing partitioned by various dimensions
-// (e.g. HTTP request latencies, partitioned by status code and method). Create
-// instances with NewHistogramVec.
-type HistogramVec struct {
- *metricVec
-}
-
-// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
-// partitioned by the given label names.
-func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- labelNames,
- opts.ConstLabels,
- )
- return &HistogramVec{
- metricVec: newMetricVec(desc, func(lvs ...string) Metric {
- return newHistogram(desc, opts, lvs...)
- }),
- }
-}
-
-// GetMetricWithLabelValues returns the Histogram for the given slice of label
-// values (same order as the VariableLabels in Desc). If that combination of
-// label values is accessed for the first time, a new Histogram is created.
-//
-// It is possible to call this method without using the returned Histogram to only
-// create the new Histogram but leave it at its starting value, a Histogram without
-// any observations.
-//
-// Keeping the Histogram for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
-// Histogram will still exist, but it will not be exported anymore, even if a
-// Histogram with the same label values is created later. See also the CounterVec
-// example.
-//
-// An error is returned if the number of label values is not the same as the
-// number of VariableLabels in Desc (minus any curried labels).
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the GaugeVec example.
-func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
- metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Observer), err
- }
- return nil, err
-}
-
-// GetMetricWith returns the Histogram for the given Labels map (the label names
-// must match those of the VariableLabels in Desc). If that label map is
-// accessed for the first time, a new Histogram is created. Implications of
-// creating a Histogram without using it and keeping the Histogram for later use
-// are the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the VariableLabels in Desc (minus any curried labels).
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
- metric, err := v.metricVec.getMetricWith(labels)
- if metric != nil {
- return metric.(Observer), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. Not returning an
-// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
- h, err := v.GetMetricWithLabelValues(lvs...)
- if err != nil {
- panic(err)
- }
- return h
-}
-
-// With works as GetMetricWith but panics where GetMetricWithLabels would have
-// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (v *HistogramVec) With(labels Labels) Observer {
- h, err := v.GetMetricWith(labels)
- if err != nil {
- panic(err)
- }
- return h
-}
-
-// CurryWith returns a vector curried with the provided labels, i.e. the
-// returned vector has those labels pre-set for all labeled operations performed
-// on it. The cardinality of the curried vector is reduced accordingly. The
-// order of the remaining labels stays the same (just with the curried labels
-// taken out of the sequence – which is relevant for the
-// (GetMetric)WithLabelValues methods). It is possible to curry a curried
-// vector, but only with labels not yet used for currying before.
-//
-// The metrics contained in the HistogramVec are shared between the curried and
-// uncurried vectors. They are just accessed differently. Curried and uncurried
-// vectors behave identically in terms of collection. Only one must be
-// registered with a given registry (usually the uncurried version). The Reset
-// method deletes all metrics, even if called on a curried vector.
-func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
- vec, err := v.curryWith(labels)
- if vec != nil {
- return &HistogramVec{vec}, err
- }
- return nil, err
-}
-
-// MustCurryWith works as CurryWith but panics where CurryWith would have
-// returned an error.
-func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
- vec, err := v.CurryWith(labels)
- if err != nil {
- panic(err)
- }
- return vec
-}
-
-type constHistogram struct {
- desc *Desc
- count uint64
- sum float64
- buckets map[float64]uint64
- labelPairs []*dto.LabelPair
-}
-
-func (h *constHistogram) Desc() *Desc {
- return h.desc
-}
-
-func (h *constHistogram) Write(out *dto.Metric) error {
- his := &dto.Histogram{}
- buckets := make([]*dto.Bucket, 0, len(h.buckets))
-
- his.SampleCount = proto.Uint64(h.count)
- his.SampleSum = proto.Float64(h.sum)
-
- for upperBound, count := range h.buckets {
- buckets = append(buckets, &dto.Bucket{
- CumulativeCount: proto.Uint64(count),
- UpperBound: proto.Float64(upperBound),
- })
- }
-
- if len(buckets) > 0 {
- sort.Sort(buckSort(buckets))
- }
- his.Bucket = buckets
-
- out.Histogram = his
- out.Label = h.labelPairs
-
- return nil
-}
-
-// NewConstHistogram returns a metric representing a Prometheus histogram with
-// fixed values for the count, sum, and bucket counts. As those parameters
-// cannot be changed, the returned value does not implement the Histogram
-// interface (but only the Metric interface). Users of this package will not
-// have much use for it in regular operations. However, when implementing custom
-// Collectors, it is useful as a throw-away metric that is generated on the fly
-// to send it to Prometheus in the Collect method.
-//
-// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
-// bucket.
-//
-// NewConstHistogram returns an error if the length of labelValues is not
-// consistent with the variable labels in Desc or if Desc is invalid.
-func NewConstHistogram(
- desc *Desc,
- count uint64,
- sum float64,
- buckets map[float64]uint64,
- labelValues ...string,
-) (Metric, error) {
- if desc.err != nil {
- return nil, desc.err
- }
- if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
- return nil, err
- }
- return &constHistogram{
- desc: desc,
- count: count,
- sum: sum,
- buckets: buckets,
- labelPairs: makeLabelPairs(desc, labelValues),
- }, nil
-}
-
-// MustNewConstHistogram is a version of NewConstHistogram that panics where
-// NewConstHistogram would have returned an error.
-func MustNewConstHistogram(
- desc *Desc,
- count uint64,
- sum float64,
- buckets map[float64]uint64,
- labelValues ...string,
-) Metric {
- m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
-
-type buckSort []*dto.Bucket
-
-func (s buckSort) Len() int {
- return len(s)
-}
-
-func (s buckSort) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s buckSort) Less(i, j int) bool {
- return s[i].GetUpperBound() < s[j].GetUpperBound()
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
deleted file mode 100644
index 351c26e1aed..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "sort"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// metricSorter is a sortable slice of *dto.Metric.
-type metricSorter []*dto.Metric
-
-func (s metricSorter) Len() int {
- return len(s)
-}
-
-func (s metricSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s metricSorter) Less(i, j int) bool {
- if len(s[i].Label) != len(s[j].Label) {
- // This should not happen. The metrics are
- // inconsistent. However, we have to deal with the fact, as
- // people might use custom collectors or metric family injection
- // to create inconsistent metrics. So let's simply compare the
- // number of labels in this case. That will still yield
- // reproducible sorting.
- return len(s[i].Label) < len(s[j].Label)
- }
- for n, lp := range s[i].Label {
- vi := lp.GetValue()
- vj := s[j].Label[n].GetValue()
- if vi != vj {
- return vi < vj
- }
- }
-
- // We should never arrive here. Multiple metrics with the same
- // label set in the same scrape will lead to undefined ingestion
- // behavior. However, as above, we have to provide stable sorting
- // here, even for inconsistent metrics. So sort equal metrics
- // by their timestamp, with missing timestamps (implying "now")
- // coming last.
- if s[i].TimestampMs == nil {
- return false
- }
- if s[j].TimestampMs == nil {
- return true
- }
- return s[i].GetTimestampMs() < s[j].GetTimestampMs()
-}
-
-// NormalizeMetricFamilies returns a MetricFamily slice with empty
-// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
-// the slice, with the contained Metrics sorted within each MetricFamily.
-func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
- for _, mf := range metricFamiliesByName {
- sort.Sort(metricSorter(mf.Metric))
- }
- names := make([]string, 0, len(metricFamiliesByName))
- for name, mf := range metricFamiliesByName {
- if len(mf.Metric) > 0 {
- names = append(names, name)
- }
- }
- sort.Strings(names)
- result := make([]*dto.MetricFamily, 0, len(names))
- for _, name := range names {
- result = append(result, metricFamiliesByName[name])
- }
- return result
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
deleted file mode 100644
index 2744443ac22..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
- "fmt"
- "strings"
- "unicode/utf8"
-
- "github.com/prometheus/common/model"
-)
-
-// Labels represents a collection of label name -> value mappings. This type is
-// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
-// metric vector Collectors, e.g.:
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-//
-// The other use-case is the specification of constant label pairs in Opts or to
-// create a Desc.
-type Labels map[string]string
-
-// reservedLabelPrefix is a prefix which is not legal in user-supplied
-// label names.
-const reservedLabelPrefix = "__"
-
-var errInconsistentCardinality = errors.New("inconsistent label cardinality")
-
-func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
- return fmt.Errorf(
- "%s: %q has %d variable labels named %q but %d values %q were provided",
- errInconsistentCardinality, fqName,
- len(labels), labels,
- len(labelValues), labelValues,
- )
-}
-
-func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
- if len(labels) != expectedNumberOfValues {
- return fmt.Errorf(
- "%s: expected %d label values but got %d in %#v",
- errInconsistentCardinality, expectedNumberOfValues,
- len(labels), labels,
- )
- }
-
- for name, val := range labels {
- if !utf8.ValidString(val) {
- return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
- }
- }
-
- return nil
-}
-
-func validateLabelValues(vals []string, expectedNumberOfValues int) error {
- if len(vals) != expectedNumberOfValues {
- return fmt.Errorf(
- "%s: expected %d label values but got %d in %#v",
- errInconsistentCardinality, expectedNumberOfValues,
- len(vals), vals,
- )
- }
-
- for _, val := range vals {
- if !utf8.ValidString(val) {
- return fmt.Errorf("label value %q is not valid UTF-8", val)
- }
- }
-
- return nil
-}
-
-func checkLabelName(l string) bool {
- return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
deleted file mode 100644
index 35bd8bde34c..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "strings"
- "time"
-
- //lint:ignore SA1019 Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/model"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
-
-// A Metric models a single sample value with its meta data being exported to
-// Prometheus. Implementations of Metric in this package are Gauge, Counter,
-// Histogram, Summary, and Untyped.
-type Metric interface {
- // Desc returns the descriptor for the Metric. This method idempotently
- // returns the same descriptor throughout the lifetime of the
- // Metric. The returned descriptor is immutable by contract. A Metric
- // unable to describe itself must return an invalid descriptor (created
- // with NewInvalidDesc).
- Desc() *Desc
- // Write encodes the Metric into a "Metric" Protocol Buffer data
- // transmission object.
- //
- // Metric implementations must observe concurrency safety as reads of
- // this metric may occur at any time, and any blocking occurs at the
- // expense of total performance of rendering all registered
- // metrics. Ideally, Metric implementations should support concurrent
- // readers.
- //
- // While populating dto.Metric, it is the responsibility of the
- // implementation to ensure validity of the Metric protobuf (like valid
- // UTF-8 strings or syntactically valid metric and label names). It is
- // recommended to sort labels lexicographically. Callers of Write should
- // still make sure of sorting if they depend on it.
- Write(*dto.Metric) error
- // TODO(beorn7): The original rationale of passing in a pre-allocated
- // dto.Metric protobuf to save allocations has disappeared. The
- // signature of this method should be changed to "Write() (*dto.Metric,
- // error)".
-}
-
-// Opts bundles the options for creating most Metric types. Each metric
-// implementation XXX has its own XXXOpts type, but in most cases, it is just be
-// an alias of this type (which might change when the requirement arises.)
-//
-// It is mandatory to set Name to a non-empty string. All other fields are
-// optional and can safely be left at their zero value, although it is strongly
-// encouraged to set a Help string.
-type Opts struct {
- // Namespace, Subsystem, and Name are components of the fully-qualified
- // name of the Metric (created by joining these components with
- // "_"). Only Name is mandatory, the others merely help structuring the
- // name. Note that the fully-qualified name of the metric must be a
- // valid Prometheus metric name.
- Namespace string
- Subsystem string
- Name string
-
- // Help provides information about this metric.
- //
- // Metrics with the same fully-qualified name must have the same Help
- // string.
- Help string
-
- // ConstLabels are used to attach fixed labels to this metric. Metrics
- // with the same fully-qualified name must have the same label names in
- // their ConstLabels.
- //
- // ConstLabels are only used rarely. In particular, do not use them to
- // attach the same labels to all your metrics. Those use cases are
- // better covered by target labels set by the scraping Prometheus
- // server, or by one specific metric (e.g. a build_info or a
- // machine_role metric). See also
- // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
- ConstLabels Labels
-}
-
-// BuildFQName joins the given three name components by "_". Empty name
-// components are ignored. If the name parameter itself is empty, an empty
-// string is returned, no matter what. Metric implementations included in this
-// library use this function internally to generate the fully-qualified metric
-// name from the name component in their Opts. Users of the library will only
-// need this function if they implement their own Metric or instantiate a Desc
-// (with NewDesc) directly.
-func BuildFQName(namespace, subsystem, name string) string {
- if name == "" {
- return ""
- }
- switch {
- case namespace != "" && subsystem != "":
- return strings.Join([]string{namespace, subsystem, name}, "_")
- case namespace != "":
- return strings.Join([]string{namespace, name}, "_")
- case subsystem != "":
- return strings.Join([]string{subsystem, name}, "_")
- }
- return name
-}
-
-// labelPairSorter implements sort.Interface. It is used to sort a slice of
-// dto.LabelPair pointers.
-type labelPairSorter []*dto.LabelPair
-
-func (s labelPairSorter) Len() int {
- return len(s)
-}
-
-func (s labelPairSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s labelPairSorter) Less(i, j int) bool {
- return s[i].GetName() < s[j].GetName()
-}
-
-type invalidMetric struct {
- desc *Desc
- err error
-}
-
-// NewInvalidMetric returns a metric whose Write method always returns the
-// provided error. It is useful if a Collector finds itself unable to collect
-// a metric and wishes to report an error to the registry.
-func NewInvalidMetric(desc *Desc, err error) Metric {
- return &invalidMetric{desc, err}
-}
-
-func (m *invalidMetric) Desc() *Desc { return m.desc }
-
-func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
-
-type timestampedMetric struct {
- Metric
- t time.Time
-}
-
-func (m timestampedMetric) Write(pb *dto.Metric) error {
- e := m.Metric.Write(pb)
- pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
- return e
-}
-
-// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
-// way that it has an explicit timestamp set to the provided Time. This is only
-// useful in rare cases as the timestamp of a Prometheus metric should usually
-// be set by the Prometheus server during scraping. Exceptions include mirroring
-// metrics with given timestamps from other metric
-// sources.
-//
-// NewMetricWithTimestamp works best with MustNewConstMetric,
-// MustNewConstHistogram, and MustNewConstSummary, see example.
-//
-// Currently, the exposition formats used by Prometheus are limited to
-// millisecond resolution. Thus, the provided time will be rounded down to the
-// next full millisecond value.
-func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
- return timestampedMetric{Metric: m, t: t}
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
deleted file mode 100644
index 44128016fd1..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-// Observer is the interface that wraps the Observe method, which is used by
-// Histogram and Summary to add observations.
-type Observer interface {
- Observe(float64)
-}
-
-// The ObserverFunc type is an adapter to allow the use of ordinary
-// functions as Observers. If f is a function with the appropriate
-// signature, ObserverFunc(f) is an Observer that calls f.
-//
-// This adapter is usually used in connection with the Timer type, and there are
-// two general use cases:
-//
-// The most common one is to use a Gauge as the Observer for a Timer.
-// See the "Gauge" Timer example.
-//
-// The more advanced use case is to create a function that dynamically decides
-// which Observer to use for observing the duration. See the "Complex" Timer
-// example.
-type ObserverFunc func(float64)
-
-// Observe calls f(value). It implements Observer.
-func (f ObserverFunc) Observe(value float64) {
- f(value)
-}
-
-// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
-type ObserverVec interface {
- GetMetricWith(Labels) (Observer, error)
- GetMetricWithLabelValues(lvs ...string) (Observer, error)
- With(Labels) Observer
- WithLabelValues(...string) Observer
- CurryWith(Labels) (ObserverVec, error)
- MustCurryWith(Labels) ObserverVec
-
- Collector
-}
-
-// ExemplarObserver is implemented by Observers that offer the option of
-// observing a value together with an exemplar. Its ObserveWithExemplar method
-// works like the Observe method of an Observer but also replaces the currently
-// saved exemplar (if any) with a new one, created from the provided value, the
-// current time as timestamp, and the provided Labels. Empty Labels will lead to
-// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
-// left in place. ObserveWithExemplar panics if any of the provided labels are
-// invalid or if the provided labels contain more than 64 runes in total.
-type ExemplarObserver interface {
- ObserveWithExemplar(value float64, exemplar Labels)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
deleted file mode 100644
index 9b809794212..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
- "os"
-)
-
-type processCollector struct {
- collectFn func(chan<- Metric)
- pidFn func() (int, error)
- reportErrors bool
- cpuTotal *Desc
- openFDs, maxFDs *Desc
- vsize, maxVsize *Desc
- rss *Desc
- startTime *Desc
-}
-
-// ProcessCollectorOpts defines the behavior of a process metrics collector
-// created with NewProcessCollector.
-type ProcessCollectorOpts struct {
- // PidFn returns the PID of the process the collector collects metrics
- // for. It is called upon each collection. By default, the PID of the
- // current process is used, as determined on construction time by
- // calling os.Getpid().
- PidFn func() (int, error)
- // If non-empty, each of the collected metrics is prefixed by the
- // provided string and an underscore ("_").
- Namespace string
- // If true, any error encountered during collection is reported as an
- // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
- // and the collected metrics will be incomplete. (Possibly, no metrics
- // will be collected at all.) While that's usually not desired, it is
- // appropriate for the common "mix-in" of process metrics, where process
- // metrics are nice to have, but failing to collect them should not
- // disrupt the collection of the remaining metrics.
- ReportErrors bool
-}
-
-// NewProcessCollector returns a collector which exports the current state of
-// process metrics including CPU, memory and file descriptor usage as well as
-// the process start time. The detailed behavior is defined by the provided
-// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
-// collector for the current process with an empty namespace string and no error
-// reporting.
-//
-// The collector only works on operating systems with a Linux-style proc
-// filesystem and on Microsoft Windows. On other operating systems, it will not
-// collect any metrics.
-func NewProcessCollector(opts ProcessCollectorOpts) Collector {
- ns := ""
- if len(opts.Namespace) > 0 {
- ns = opts.Namespace + "_"
- }
-
- c := &processCollector{
- reportErrors: opts.ReportErrors,
- cpuTotal: NewDesc(
- ns+"process_cpu_seconds_total",
- "Total user and system CPU time spent in seconds.",
- nil, nil,
- ),
- openFDs: NewDesc(
- ns+"process_open_fds",
- "Number of open file descriptors.",
- nil, nil,
- ),
- maxFDs: NewDesc(
- ns+"process_max_fds",
- "Maximum number of open file descriptors.",
- nil, nil,
- ),
- vsize: NewDesc(
- ns+"process_virtual_memory_bytes",
- "Virtual memory size in bytes.",
- nil, nil,
- ),
- maxVsize: NewDesc(
- ns+"process_virtual_memory_max_bytes",
- "Maximum amount of virtual memory available in bytes.",
- nil, nil,
- ),
- rss: NewDesc(
- ns+"process_resident_memory_bytes",
- "Resident memory size in bytes.",
- nil, nil,
- ),
- startTime: NewDesc(
- ns+"process_start_time_seconds",
- "Start time of the process since unix epoch in seconds.",
- nil, nil,
- ),
- }
-
- if opts.PidFn == nil {
- pid := os.Getpid()
- c.pidFn = func() (int, error) { return pid, nil }
- } else {
- c.pidFn = opts.PidFn
- }
-
- // Set up process metric collection if supported by the runtime.
- if canCollectProcess() {
- c.collectFn = c.processCollect
- } else {
- c.collectFn = func(ch chan<- Metric) {
- c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
- }
- }
-
- return c
-}
-
-// Describe returns all descriptions of the collector.
-func (c *processCollector) Describe(ch chan<- *Desc) {
- ch <- c.cpuTotal
- ch <- c.openFDs
- ch <- c.maxFDs
- ch <- c.vsize
- ch <- c.maxVsize
- ch <- c.rss
- ch <- c.startTime
-}
-
-// Collect returns the current state of all metrics of the collector.
-func (c *processCollector) Collect(ch chan<- Metric) {
- c.collectFn(ch)
-}
-
-func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
- if !c.reportErrors {
- return
- }
- if desc == nil {
- desc = NewInvalidDesc(err)
- }
- ch <- NewInvalidMetric(desc, err)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
deleted file mode 100644
index 3117461cde7..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package prometheus
-
-import (
- "github.com/prometheus/procfs"
-)
-
-func canCollectProcess() bool {
- _, err := procfs.NewDefaultFS()
- return err == nil
-}
-
-func (c *processCollector) processCollect(ch chan<- Metric) {
- pid, err := c.pidFn()
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
-
- p, err := procfs.NewProc(pid)
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
-
- if stat, err := p.Stat(); err == nil {
- ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
- ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
- ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
- if startTime, err := stat.StartTime(); err == nil {
- ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
- } else {
- c.reportError(ch, c.startTime, err)
- }
- } else {
- c.reportError(ch, nil, err)
- }
-
- if fds, err := p.FileDescriptorsLen(); err == nil {
- ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
- } else {
- c.reportError(ch, c.openFDs, err)
- }
-
- if limits, err := p.Limits(); err == nil {
- ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
- ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
- } else {
- c.reportError(ch, nil, err)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
deleted file mode 100644
index f973398df2d..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/windows"
-)
-
-func canCollectProcess() bool {
- return true
-}
-
-var (
- modpsapi = syscall.NewLazyDLL("psapi.dll")
- modkernel32 = syscall.NewLazyDLL("kernel32.dll")
-
- procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
- procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
-)
-
-type processMemoryCounters struct {
- // System interface description
- // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
-
- // Refer to the Golang internal implementation
- // https://golang.org/src/internal/syscall/windows/psapi_windows.go
- _ uint32
- PageFaultCount uint32
- PeakWorkingSetSize uintptr
- WorkingSetSize uintptr
- QuotaPeakPagedPoolUsage uintptr
- QuotaPagedPoolUsage uintptr
- QuotaPeakNonPagedPoolUsage uintptr
- QuotaNonPagedPoolUsage uintptr
- PagefileUsage uintptr
- PeakPagefileUsage uintptr
- PrivateUsage uintptr
-}
-
-func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
- mem := processMemoryCounters{}
- r1, _, err := procGetProcessMemoryInfo.Call(
- uintptr(handle),
- uintptr(unsafe.Pointer(&mem)),
- uintptr(unsafe.Sizeof(mem)),
- )
- if r1 != 1 {
- return mem, err
- } else {
- return mem, nil
- }
-}
-
-func getProcessHandleCount(handle windows.Handle) (uint32, error) {
- var count uint32
- r1, _, err := procGetProcessHandleCount.Call(
- uintptr(handle),
- uintptr(unsafe.Pointer(&count)),
- )
- if r1 != 1 {
- return 0, err
- } else {
- return count, nil
- }
-}
-
-func (c *processCollector) processCollect(ch chan<- Metric) {
- h, err := windows.GetCurrentProcess()
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
-
- var startTime, exitTime, kernelTime, userTime windows.Filetime
- err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
- ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
- ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
-
- mem, err := getProcessMemoryInfo(h)
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
- ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
- ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
-
- handles, err := getProcessHandleCount(h)
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
- ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
- ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
-}
-
-func fileTimeToSeconds(ft windows.Filetime) float64 {
- return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
deleted file mode 100644
index 5070e72e28f..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
+++ /dev/null
@@ -1,370 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package promhttp
-
-import (
- "bufio"
- "io"
- "net"
- "net/http"
-)
-
-const (
- closeNotifier = 1 << iota
- flusher
- hijacker
- readerFrom
- pusher
-)
-
-type delegator interface {
- http.ResponseWriter
-
- Status() int
- Written() int64
-}
-
-type responseWriterDelegator struct {
- http.ResponseWriter
-
- status int
- written int64
- wroteHeader bool
- observeWriteHeader func(int)
-}
-
-func (r *responseWriterDelegator) Status() int {
- return r.status
-}
-
-func (r *responseWriterDelegator) Written() int64 {
- return r.written
-}
-
-func (r *responseWriterDelegator) WriteHeader(code int) {
- if r.observeWriteHeader != nil && !r.wroteHeader {
- // Only call observeWriteHeader for the 1st time. It's a bug if
- // WriteHeader is called more than once, but we want to protect
- // against it here. Note that we still delegate the WriteHeader
- // to the original ResponseWriter to not mask the bug from it.
- r.observeWriteHeader(code)
- }
- r.status = code
- r.wroteHeader = true
- r.ResponseWriter.WriteHeader(code)
-}
-
-func (r *responseWriterDelegator) Write(b []byte) (int, error) {
- // If applicable, call WriteHeader here so that observeWriteHeader is
- // handled appropriately.
- if !r.wroteHeader {
- r.WriteHeader(http.StatusOK)
- }
- n, err := r.ResponseWriter.Write(b)
- r.written += int64(n)
- return n, err
-}
-
-type closeNotifierDelegator struct{ *responseWriterDelegator }
-type flusherDelegator struct{ *responseWriterDelegator }
-type hijackerDelegator struct{ *responseWriterDelegator }
-type readerFromDelegator struct{ *responseWriterDelegator }
-type pusherDelegator struct{ *responseWriterDelegator }
-
-func (d closeNotifierDelegator) CloseNotify() <-chan bool {
- //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
- //remove support from client_golang yet.
- return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
-}
-func (d flusherDelegator) Flush() {
- // If applicable, call WriteHeader here so that observeWriteHeader is
- // handled appropriately.
- if !d.wroteHeader {
- d.WriteHeader(http.StatusOK)
- }
- d.ResponseWriter.(http.Flusher).Flush()
-}
-func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- return d.ResponseWriter.(http.Hijacker).Hijack()
-}
-func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
- // If applicable, call WriteHeader here so that observeWriteHeader is
- // handled appropriately.
- if !d.wroteHeader {
- d.WriteHeader(http.StatusOK)
- }
- n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
- d.written += n
- return n, err
-}
-func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
- return d.ResponseWriter.(http.Pusher).Push(target, opts)
-}
-
-var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
-
-func init() {
- // TODO(beorn7): Code generation would help here.
- pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
- return d
- }
- pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
- return closeNotifierDelegator{d}
- }
- pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
- return flusherDelegator{d}
- }
- pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
- return struct {
- *responseWriterDelegator
- http.Flusher
- http.CloseNotifier
- }{d, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
- return hijackerDelegator{d}
- }
- pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
- return struct {
- *responseWriterDelegator
- http.Hijacker
- http.CloseNotifier
- }{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
- return struct {
- *responseWriterDelegator
- http.Hijacker
- http.Flusher
- }{d, hijackerDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
- return struct {
- *responseWriterDelegator
- http.Hijacker
- http.Flusher
- http.CloseNotifier
- }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
- return readerFromDelegator{d}
- }
- pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.CloseNotifier
- }{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Flusher
- }{d, readerFromDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Flusher
- http.CloseNotifier
- }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Hijacker
- }{d, readerFromDelegator{d}, hijackerDelegator{d}}
- }
- pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Hijacker
- http.CloseNotifier
- }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Hijacker
- http.Flusher
- }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Hijacker
- http.Flusher
- http.CloseNotifier
- }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
- return pusherDelegator{d}
- }
- pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.CloseNotifier
- }{d, pusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Flusher
- }{d, pusherDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Flusher
- http.CloseNotifier
- }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Hijacker
- }{d, pusherDelegator{d}, hijackerDelegator{d}}
- }
- pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Hijacker
- http.CloseNotifier
- }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Hijacker
- http.Flusher
- }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Hijacker
- http.Flusher
- http.CloseNotifier
- }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- }{d, pusherDelegator{d}, readerFromDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.CloseNotifier
- }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Flusher
- }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Flusher
- http.CloseNotifier
- }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Hijacker
- }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Hijacker
- http.CloseNotifier
- }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Hijacker
- http.Flusher
- }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Hijacker
- http.Flusher
- http.CloseNotifier
- }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
-}
-
-func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
- d := &responseWriterDelegator{
- ResponseWriter: w,
- observeWriteHeader: observeWriteHeaderFunc,
- }
-
- id := 0
- //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
- //remove support from client_golang yet.
- if _, ok := w.(http.CloseNotifier); ok {
- id += closeNotifier
- }
- if _, ok := w.(http.Flusher); ok {
- id += flusher
- }
- if _, ok := w.(http.Hijacker); ok {
- id += hijacker
- }
- if _, ok := w.(io.ReaderFrom); ok {
- id += readerFrom
- }
- if _, ok := w.(http.Pusher); ok {
- id += pusher
- }
-
- return pickDelegator[id](d)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
deleted file mode 100644
index 5e1c4546ceb..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2016 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package promhttp provides tooling around HTTP servers and clients.
-//
-// First, the package allows the creation of http.Handler instances to expose
-// Prometheus metrics via HTTP. promhttp.Handler acts on the
-// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
-// custom registry or anything that implements the Gatherer interface. It also
-// allows the creation of handlers that act differently on errors or allow to
-// log errors.
-//
-// Second, the package provides tooling to instrument instances of http.Handler
-// via middleware. Middleware wrappers follow the naming scheme
-// InstrumentHandlerX, where X describes the intended use of the middleware.
-// See each function's doc comment for specific details.
-//
-// Finally, the package allows for an http.RoundTripper to be instrumented via
-// middleware. Middleware wrappers follow the naming scheme
-// InstrumentRoundTripperX, where X describes the intended use of the
-// middleware. See each function's doc comment for specific details.
-package promhttp
-
-import (
- "compress/gzip"
- "fmt"
- "io"
- "net/http"
- "strings"
- "sync"
- "time"
-
- "github.com/prometheus/common/expfmt"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-const (
- contentTypeHeader = "Content-Type"
- contentEncodingHeader = "Content-Encoding"
- acceptEncodingHeader = "Accept-Encoding"
-)
-
-var gzipPool = sync.Pool{
- New: func() interface{} {
- return gzip.NewWriter(nil)
- },
-}
-
-// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
-// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
-// no error logging, and it applies compression if requested by the client.
-//
-// The returned http.Handler is already instrumented using the
-// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
-// create multiple http.Handlers by separate calls of the Handler function, the
-// metrics used for instrumentation will be shared between them, providing
-// global scrape counts.
-//
-// This function is meant to cover the bulk of basic use cases. If you are doing
-// anything that requires more customization (including using a non-default
-// Gatherer, different instrumentation, and non-default HandlerOpts), use the
-// HandlerFor function. See there for details.
-func Handler() http.Handler {
- return InstrumentMetricHandler(
- prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
- )
-}
-
-// HandlerFor returns an uninstrumented http.Handler for the provided
-// Gatherer. The behavior of the Handler is defined by the provided
-// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
-// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
-// instrumentation. Use the InstrumentMetricHandler function to apply the same
-// kind of instrumentation as it is used by the Handler function.
-func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
- var (
- inFlightSem chan struct{}
- errCnt = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Name: "promhttp_metric_handler_errors_total",
- Help: "Total number of internal errors encountered by the promhttp metric handler.",
- },
- []string{"cause"},
- )
- )
-
- if opts.MaxRequestsInFlight > 0 {
- inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
- }
- if opts.Registry != nil {
- // Initialize all possibilites that can occur below.
- errCnt.WithLabelValues("gathering")
- errCnt.WithLabelValues("encoding")
- if err := opts.Registry.Register(errCnt); err != nil {
- if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
- errCnt = are.ExistingCollector.(*prometheus.CounterVec)
- } else {
- panic(err)
- }
- }
- }
-
- h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
- if inFlightSem != nil {
- select {
- case inFlightSem <- struct{}{}: // All good, carry on.
- defer func() { <-inFlightSem }()
- default:
- http.Error(rsp, fmt.Sprintf(
- "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
- ), http.StatusServiceUnavailable)
- return
- }
- }
- mfs, err := reg.Gather()
- if err != nil {
- if opts.ErrorLog != nil {
- opts.ErrorLog.Println("error gathering metrics:", err)
- }
- errCnt.WithLabelValues("gathering").Inc()
- switch opts.ErrorHandling {
- case PanicOnError:
- panic(err)
- case ContinueOnError:
- if len(mfs) == 0 {
- // Still report the error if no metrics have been gathered.
- httpError(rsp, err)
- return
- }
- case HTTPErrorOnError:
- httpError(rsp, err)
- return
- }
- }
-
- var contentType expfmt.Format
- if opts.EnableOpenMetrics {
- contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header)
- } else {
- contentType = expfmt.Negotiate(req.Header)
- }
- header := rsp.Header()
- header.Set(contentTypeHeader, string(contentType))
-
- w := io.Writer(rsp)
- if !opts.DisableCompression && gzipAccepted(req.Header) {
- header.Set(contentEncodingHeader, "gzip")
- gz := gzipPool.Get().(*gzip.Writer)
- defer gzipPool.Put(gz)
-
- gz.Reset(w)
- defer gz.Close()
-
- w = gz
- }
-
- enc := expfmt.NewEncoder(w, contentType)
-
- // handleError handles the error according to opts.ErrorHandling
- // and returns true if we have to abort after the handling.
- handleError := func(err error) bool {
- if err == nil {
- return false
- }
- if opts.ErrorLog != nil {
- opts.ErrorLog.Println("error encoding and sending metric family:", err)
- }
- errCnt.WithLabelValues("encoding").Inc()
- switch opts.ErrorHandling {
- case PanicOnError:
- panic(err)
- case HTTPErrorOnError:
- // We cannot really send an HTTP error at this
- // point because we most likely have written
- // something to rsp already. But at least we can
- // stop sending.
- return true
- }
- // Do nothing in all other cases, including ContinueOnError.
- return false
- }
-
- for _, mf := range mfs {
- if handleError(enc.Encode(mf)) {
- return
- }
- }
- if closer, ok := enc.(expfmt.Closer); ok {
- // This in particular takes care of the final "# EOF\n" line for OpenMetrics.
- if handleError(closer.Close()) {
- return
- }
- }
- })
-
- if opts.Timeout <= 0 {
- return h
- }
- return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
- "Exceeded configured timeout of %v.\n",
- opts.Timeout,
- ))
-}
-
-// InstrumentMetricHandler is usually used with an http.Handler returned by the
-// HandlerFor function. It instruments the provided http.Handler with two
-// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
-// scrapes partitioned by HTTP status code, and a gauge
-// "promhttp_metric_handler_requests_in_flight" to track the number of
-// simultaneous scrapes. This function idempotently registers collectors for
-// both metrics with the provided Registerer. It panics if the registration
-// fails. The provided metrics are useful to see how many scrapes hit the
-// monitored target (which could be from different Prometheus servers or other
-// scrapers), and how often they overlap (which would result in more than one
-// scrape in flight at the same time). Note that the scrapes-in-flight gauge
-// will contain the scrape by which it is exposed, while the scrape counter will
-// only get incremented after the scrape is complete (as only then the status
-// code is known). For tracking scrape durations, use the
-// "scrape_duration_seconds" gauge created by the Prometheus server upon each
-// scrape.
-func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
- cnt := prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Name: "promhttp_metric_handler_requests_total",
- Help: "Total number of scrapes by HTTP status code.",
- },
- []string{"code"},
- )
- // Initialize the most likely HTTP status codes.
- cnt.WithLabelValues("200")
- cnt.WithLabelValues("500")
- cnt.WithLabelValues("503")
- if err := reg.Register(cnt); err != nil {
- if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
- cnt = are.ExistingCollector.(*prometheus.CounterVec)
- } else {
- panic(err)
- }
- }
-
- gge := prometheus.NewGauge(prometheus.GaugeOpts{
- Name: "promhttp_metric_handler_requests_in_flight",
- Help: "Current number of scrapes being served.",
- })
- if err := reg.Register(gge); err != nil {
- if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
- gge = are.ExistingCollector.(prometheus.Gauge)
- } else {
- panic(err)
- }
- }
-
- return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
-}
-
-// HandlerErrorHandling defines how a Handler serving metrics will handle
-// errors.
-type HandlerErrorHandling int
-
-// These constants cause handlers serving metrics to behave as described if
-// errors are encountered.
-const (
- // Serve an HTTP status code 500 upon the first error
- // encountered. Report the error message in the body. Note that HTTP
- // errors cannot be served anymore once the beginning of a regular
- // payload has been sent. Thus, in the (unlikely) case that encoding the
- // payload into the negotiated wire format fails, serving the response
- // will simply be aborted. Set an ErrorLog in HandlerOpts to detect
- // those errors.
- HTTPErrorOnError HandlerErrorHandling = iota
- // Ignore errors and try to serve as many metrics as possible. However,
- // if no metrics can be served, serve an HTTP status code 500 and the
- // last error message in the body. Only use this in deliberate "best
- // effort" metrics collection scenarios. In this case, it is highly
- // recommended to provide other means of detecting errors: By setting an
- // ErrorLog in HandlerOpts, the errors are logged. By providing a
- // Registry in HandlerOpts, the exposed metrics include an error counter
- // "promhttp_metric_handler_errors_total", which can be used for
- // alerts.
- ContinueOnError
- // Panic upon the first error encountered (useful for "crash only" apps).
- PanicOnError
-)
-
-// Logger is the minimal interface HandlerOpts needs for logging. Note that
-// log.Logger from the standard library implements this interface, and it is
-// easy to implement by custom loggers, if they don't do so already anyway.
-type Logger interface {
- Println(v ...interface{})
-}
-
-// HandlerOpts specifies options how to serve metrics via an http.Handler. The
-// zero value of HandlerOpts is a reasonable default.
-type HandlerOpts struct {
- // ErrorLog specifies an optional logger for errors collecting and
- // serving metrics. If nil, errors are not logged at all.
- ErrorLog Logger
- // ErrorHandling defines how errors are handled. Note that errors are
- // logged regardless of the configured ErrorHandling provided ErrorLog
- // is not nil.
- ErrorHandling HandlerErrorHandling
- // If Registry is not nil, it is used to register a metric
- // "promhttp_metric_handler_errors_total", partitioned by "cause". A
- // failed registration causes a panic. Note that this error counter is
- // different from the instrumentation you get from the various
- // InstrumentHandler... helpers. It counts errors that don't necessarily
- // result in a non-2xx HTTP status code. There are two typical cases:
- // (1) Encoding errors that only happen after streaming of the HTTP body
- // has already started (and the status code 200 has been sent). This
- // should only happen with custom collectors. (2) Collection errors with
- // no effect on the HTTP status code because ErrorHandling is set to
- // ContinueOnError.
- Registry prometheus.Registerer
- // If DisableCompression is true, the handler will never compress the
- // response, even if requested by the client.
- DisableCompression bool
- // The number of concurrent HTTP requests is limited to
- // MaxRequestsInFlight. Additional requests are responded to with 503
- // Service Unavailable and a suitable message in the body. If
- // MaxRequestsInFlight is 0 or negative, no limit is applied.
- MaxRequestsInFlight int
- // If handling a request takes longer than Timeout, it is responded to
- // with 503 ServiceUnavailable and a suitable Message. No timeout is
- // applied if Timeout is 0 or negative. Note that with the current
- // implementation, reaching the timeout simply ends the HTTP requests as
- // described above (and even that only if sending of the body hasn't
- // started yet), while the bulk work of gathering all the metrics keeps
- // running in the background (with the eventual result to be thrown
- // away). Until the implementation is improved, it is recommended to
- // implement a separate timeout in potentially slow Collectors.
- Timeout time.Duration
- // If true, the experimental OpenMetrics encoding is added to the
- // possible options during content negotiation. Note that Prometheus
- // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is
- // the only way to transmit exemplars. However, the move to OpenMetrics
- // is not completely transparent. Most notably, the values of "quantile"
- // labels of Summaries and "le" labels of Histograms are formatted with
- // a trailing ".0" if they would otherwise look like integer numbers
- // (which changes the identity of the resulting series on the Prometheus
- // server).
- EnableOpenMetrics bool
-}
-
-// gzipAccepted returns whether the client will accept gzip-encoded content.
-func gzipAccepted(header http.Header) bool {
- a := header.Get(acceptEncodingHeader)
- parts := strings.Split(a, ",")
- for _, part := range parts {
- part = strings.TrimSpace(part)
- if part == "gzip" || strings.HasPrefix(part, "gzip;") {
- return true
- }
- }
- return false
-}
-
-// httpError removes any content-encoding header and then calls http.Error with
-// the provided error and http.StatusInternalServerError. Error contents is
-// supposed to be uncompressed plain text. Same as with a plain http.Error, this
-// must not be called if the header or any payload has already been sent.
-func httpError(rsp http.ResponseWriter, err error) {
- rsp.Header().Del(contentEncodingHeader)
- http.Error(
- rsp,
- "An error has occurred while serving metrics:\n\n"+err.Error(),
- http.StatusInternalServerError,
- )
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
deleted file mode 100644
index 83c49b66a81..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package promhttp
-
-import (
- "crypto/tls"
- "net/http"
- "net/http/httptrace"
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-// The RoundTripperFunc type is an adapter to allow the use of ordinary
-// functions as RoundTrippers. If f is a function with the appropriate
-// signature, RountTripperFunc(f) is a RoundTripper that calls f.
-type RoundTripperFunc func(req *http.Request) (*http.Response, error)
-
-// RoundTrip implements the RoundTripper interface.
-func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
- return rt(r)
-}
-
-// InstrumentRoundTripperInFlight is a middleware that wraps the provided
-// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
-// requests currently handled by the wrapped http.RoundTripper.
-//
-// See the example for ExampleInstrumentRoundTripperDuration for example usage.
-func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
- gauge.Inc()
- defer gauge.Dec()
- return next.RoundTrip(r)
- })
-}
-
-// InstrumentRoundTripperCounter is a middleware that wraps the provided
-// http.RoundTripper to observe the request result with the provided CounterVec.
-// The CounterVec must have zero, one, or two non-const non-curried labels. For
-// those, the only allowed label names are "code" and "method". The function
-// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
-// and/or HTTP method if the respective instance label names are present in the
-// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
-//
-// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
-// is not incremented.
-//
-// See the example for ExampleInstrumentRoundTripperDuration for example usage.
-func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
- code, method := checkLabels(counter)
-
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
- resp, err := next.RoundTrip(r)
- if err == nil {
- counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
- }
- return resp, err
- })
-}
-
-// InstrumentRoundTripperDuration is a middleware that wraps the provided
-// http.RoundTripper to observe the request duration with the provided
-// ObserverVec. The ObserverVec must have zero, one, or two non-const
-// non-curried labels. For those, the only allowed label names are "code" and
-// "method". The function panics otherwise. The Observe method of the Observer
-// in the ObserverVec is called with the request duration in
-// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
-// respective instance label names are present in the ObserverVec. For
-// unpartitioned observations, use an ObserverVec with zero labels. Note that
-// partitioning of Histograms is expensive and should be used judiciously.
-//
-// If the wrapped RoundTripper panics or returns a non-nil error, no values are
-// reported.
-//
-// Note that this method is only guaranteed to never observe negative durations
-// if used with Go1.9+.
-func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
- code, method := checkLabels(obs)
-
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
- start := time.Now()
- resp, err := next.RoundTrip(r)
- if err == nil {
- obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
- }
- return resp, err
- })
-}
-
-// InstrumentTrace is used to offer flexibility in instrumenting the available
-// httptrace.ClientTrace hook functions. Each function is passed a float64
-// representing the time in seconds since the start of the http request. A user
-// may choose to use separately buckets Histograms, or implement custom
-// instance labels on a per function basis.
-type InstrumentTrace struct {
- GotConn func(float64)
- PutIdleConn func(float64)
- GotFirstResponseByte func(float64)
- Got100Continue func(float64)
- DNSStart func(float64)
- DNSDone func(float64)
- ConnectStart func(float64)
- ConnectDone func(float64)
- TLSHandshakeStart func(float64)
- TLSHandshakeDone func(float64)
- WroteHeaders func(float64)
- Wait100Continue func(float64)
- WroteRequest func(float64)
-}
-
-// InstrumentRoundTripperTrace is a middleware that wraps the provided
-// RoundTripper and reports times to hook functions provided in the
-// InstrumentTrace struct. Hook functions that are not present in the provided
-// InstrumentTrace struct are ignored. Times reported to the hook functions are
-// time since the start of the request. Only with Go1.9+, those times are
-// guaranteed to never be negative. (Earlier Go versions are not using a
-// monotonic clock.) Note that partitioning of Histograms is expensive and
-// should be used judiciously.
-//
-// For hook functions that receive an error as an argument, no observations are
-// made in the event of a non-nil error value.
-//
-// See the example for ExampleInstrumentRoundTripperDuration for example usage.
-func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
- start := time.Now()
-
- trace := &httptrace.ClientTrace{
- GotConn: func(_ httptrace.GotConnInfo) {
- if it.GotConn != nil {
- it.GotConn(time.Since(start).Seconds())
- }
- },
- PutIdleConn: func(err error) {
- if err != nil {
- return
- }
- if it.PutIdleConn != nil {
- it.PutIdleConn(time.Since(start).Seconds())
- }
- },
- DNSStart: func(_ httptrace.DNSStartInfo) {
- if it.DNSStart != nil {
- it.DNSStart(time.Since(start).Seconds())
- }
- },
- DNSDone: func(_ httptrace.DNSDoneInfo) {
- if it.DNSDone != nil {
- it.DNSDone(time.Since(start).Seconds())
- }
- },
- ConnectStart: func(_, _ string) {
- if it.ConnectStart != nil {
- it.ConnectStart(time.Since(start).Seconds())
- }
- },
- ConnectDone: func(_, _ string, err error) {
- if err != nil {
- return
- }
- if it.ConnectDone != nil {
- it.ConnectDone(time.Since(start).Seconds())
- }
- },
- GotFirstResponseByte: func() {
- if it.GotFirstResponseByte != nil {
- it.GotFirstResponseByte(time.Since(start).Seconds())
- }
- },
- Got100Continue: func() {
- if it.Got100Continue != nil {
- it.Got100Continue(time.Since(start).Seconds())
- }
- },
- TLSHandshakeStart: func() {
- if it.TLSHandshakeStart != nil {
- it.TLSHandshakeStart(time.Since(start).Seconds())
- }
- },
- TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
- if err != nil {
- return
- }
- if it.TLSHandshakeDone != nil {
- it.TLSHandshakeDone(time.Since(start).Seconds())
- }
- },
- WroteHeaders: func() {
- if it.WroteHeaders != nil {
- it.WroteHeaders(time.Since(start).Seconds())
- }
- },
- Wait100Continue: func() {
- if it.Wait100Continue != nil {
- it.Wait100Continue(time.Since(start).Seconds())
- }
- },
- WroteRequest: func(_ httptrace.WroteRequestInfo) {
- if it.WroteRequest != nil {
- it.WroteRequest(time.Since(start).Seconds())
- }
- },
- }
- r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
-
- return next.RoundTrip(r)
- })
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
deleted file mode 100644
index 9db24380533..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
+++ /dev/null
@@ -1,447 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package promhttp
-
-import (
- "errors"
- "net/http"
- "strconv"
- "strings"
- "time"
-
- dto "github.com/prometheus/client_model/go"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-// magicString is used for the hacky label test in checkLabels. Remove once fixed.
-const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
-
-// InstrumentHandlerInFlight is a middleware that wraps the provided
-// http.Handler. It sets the provided prometheus.Gauge to the number of
-// requests currently handled by the wrapped http.Handler.
-//
-// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- g.Inc()
- defer g.Dec()
- next.ServeHTTP(w, r)
- })
-}
-
-// InstrumentHandlerDuration is a middleware that wraps the provided
-// http.Handler to observe the request duration with the provided ObserverVec.
-// The ObserverVec must have zero, one, or two non-const non-curried labels. For
-// those, the only allowed label names are "code" and "method". The function
-// panics otherwise. The Observe method of the Observer in the ObserverVec is
-// called with the request duration in seconds. Partitioning happens by HTTP
-// status code and/or HTTP method if the respective instance label names are
-// present in the ObserverVec. For unpartitioned observations, use an
-// ObserverVec with zero labels. Note that partitioning of Histograms is
-// expensive and should be used judiciously.
-//
-// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
-//
-// If the wrapped Handler panics, no values are reported.
-//
-// Note that this method is only guaranteed to never observe negative durations
-// if used with Go1.9+.
-func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
- code, method := checkLabels(obs)
-
- if code {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- now := time.Now()
- d := newDelegator(w, nil)
- next.ServeHTTP(d, r)
-
- obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
- })
- }
-
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- now := time.Now()
- next.ServeHTTP(w, r)
- obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
- })
-}
-
-// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
-// to observe the request result with the provided CounterVec. The CounterVec
-// must have zero, one, or two non-const non-curried labels. For those, the only
-// allowed label names are "code" and "method". The function panics
-// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
-// HTTP method if the respective instance label names are present in the
-// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
-//
-// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
-//
-// If the wrapped Handler panics, the Counter is not incremented.
-//
-// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
- code, method := checkLabels(counter)
-
- if code {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- d := newDelegator(w, nil)
- next.ServeHTTP(d, r)
- counter.With(labels(code, method, r.Method, d.Status())).Inc()
- })
- }
-
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- next.ServeHTTP(w, r)
- counter.With(labels(code, method, r.Method, 0)).Inc()
- })
-}
-
-// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
-// http.Handler to observe with the provided ObserverVec the request duration
-// until the response headers are written. The ObserverVec must have zero, one,
-// or two non-const non-curried labels. For those, the only allowed label names
-// are "code" and "method". The function panics otherwise. The Observe method of
-// the Observer in the ObserverVec is called with the request duration in
-// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
-// respective instance label names are present in the ObserverVec. For
-// unpartitioned observations, use an ObserverVec with zero labels. Note that
-// partitioning of Histograms is expensive and should be used judiciously.
-//
-// If the wrapped Handler panics before calling WriteHeader, no value is
-// reported.
-//
-// Note that this method is only guaranteed to never observe negative durations
-// if used with Go1.9+.
-//
-// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
- code, method := checkLabels(obs)
-
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- now := time.Now()
- d := newDelegator(w, func(status int) {
- obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
- })
- next.ServeHTTP(d, r)
- })
-}
-
-// InstrumentHandlerRequestSize is a middleware that wraps the provided
-// http.Handler to observe the request size with the provided ObserverVec. The
-// ObserverVec must have zero, one, or two non-const non-curried labels. For
-// those, the only allowed label names are "code" and "method". The function
-// panics otherwise. The Observe method of the Observer in the ObserverVec is
-// called with the request size in bytes. Partitioning happens by HTTP status
-// code and/or HTTP method if the respective instance label names are present in
-// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
-// labels. Note that partitioning of Histograms is expensive and should be used
-// judiciously.
-//
-// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
-//
-// If the wrapped Handler panics, no values are reported.
-//
-// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
- code, method := checkLabels(obs)
-
- if code {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- d := newDelegator(w, nil)
- next.ServeHTTP(d, r)
- size := computeApproximateRequestSize(r)
- obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
- })
- }
-
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- next.ServeHTTP(w, r)
- size := computeApproximateRequestSize(r)
- obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
- })
-}
-
-// InstrumentHandlerResponseSize is a middleware that wraps the provided
-// http.Handler to observe the response size with the provided ObserverVec. The
-// ObserverVec must have zero, one, or two non-const non-curried labels. For
-// those, the only allowed label names are "code" and "method". The function
-// panics otherwise. The Observe method of the Observer in the ObserverVec is
-// called with the response size in bytes. Partitioning happens by HTTP status
-// code and/or HTTP method if the respective instance label names are present in
-// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
-// labels. Note that partitioning of Histograms is expensive and should be used
-// judiciously.
-//
-// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
-//
-// If the wrapped Handler panics, no values are reported.
-//
-// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
- code, method := checkLabels(obs)
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- d := newDelegator(w, nil)
- next.ServeHTTP(d, r)
- obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
- })
-}
-
-func checkLabels(c prometheus.Collector) (code bool, method bool) {
- // TODO(beorn7): Remove this hacky way to check for instance labels
- // once Descriptors can have their dimensionality queried.
- var (
- desc *prometheus.Desc
- m prometheus.Metric
- pm dto.Metric
- lvs []string
- )
-
- // Get the Desc from the Collector.
- descc := make(chan *prometheus.Desc, 1)
- c.Describe(descc)
-
- select {
- case desc = <-descc:
- default:
- panic("no description provided by collector")
- }
- select {
- case <-descc:
- panic("more than one description provided by collector")
- default:
- }
-
- close(descc)
-
- // Create a ConstMetric with the Desc. Since we don't know how many
- // variable labels there are, try for as long as it needs.
- for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
- m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
- }
-
- // Write out the metric into a proto message and look at the labels.
- // If the value is not the magicString, it is a constLabel, which doesn't interest us.
- // If the label is curried, it doesn't interest us.
- // In all other cases, only "code" or "method" is allowed.
- if err := m.Write(&pm); err != nil {
- panic("error checking metric for labels")
- }
- for _, label := range pm.Label {
- name, value := label.GetName(), label.GetValue()
- if value != magicString || isLabelCurried(c, name) {
- continue
- }
- switch name {
- case "code":
- code = true
- case "method":
- method = true
- default:
- panic("metric partitioned with non-supported labels")
- }
- }
- return
-}
-
-func isLabelCurried(c prometheus.Collector, label string) bool {
- // This is even hackier than the label test above.
- // We essentially try to curry again and see if it works.
- // But for that, we need to type-convert to the two
- // types we use here, ObserverVec or *CounterVec.
- switch v := c.(type) {
- case *prometheus.CounterVec:
- if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
- return false
- }
- case prometheus.ObserverVec:
- if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
- return false
- }
- default:
- panic("unsupported metric vec type")
- }
- return true
-}
-
-// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
-// unnecessary allocations on each request.
-var emptyLabels = prometheus.Labels{}
-
-func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
- if !(code || method) {
- return emptyLabels
- }
- labels := prometheus.Labels{}
-
- if code {
- labels["code"] = sanitizeCode(status)
- }
- if method {
- labels["method"] = sanitizeMethod(reqMethod)
- }
-
- return labels
-}
-
-func computeApproximateRequestSize(r *http.Request) int {
- s := 0
- if r.URL != nil {
- s += len(r.URL.String())
- }
-
- s += len(r.Method)
- s += len(r.Proto)
- for name, values := range r.Header {
- s += len(name)
- for _, value := range values {
- s += len(value)
- }
- }
- s += len(r.Host)
-
- // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
-
- if r.ContentLength != -1 {
- s += int(r.ContentLength)
- }
- return s
-}
-
-func sanitizeMethod(m string) string {
- switch m {
- case "GET", "get":
- return "get"
- case "PUT", "put":
- return "put"
- case "HEAD", "head":
- return "head"
- case "POST", "post":
- return "post"
- case "DELETE", "delete":
- return "delete"
- case "CONNECT", "connect":
- return "connect"
- case "OPTIONS", "options":
- return "options"
- case "NOTIFY", "notify":
- return "notify"
- default:
- return strings.ToLower(m)
- }
-}
-
-// If the wrapped http.Handler has not set a status code, i.e. the value is
-// currently 0, santizeCode will return 200, for consistency with behavior in
-// the stdlib.
-func sanitizeCode(s int) string {
- switch s {
- case 100:
- return "100"
- case 101:
- return "101"
-
- case 200, 0:
- return "200"
- case 201:
- return "201"
- case 202:
- return "202"
- case 203:
- return "203"
- case 204:
- return "204"
- case 205:
- return "205"
- case 206:
- return "206"
-
- case 300:
- return "300"
- case 301:
- return "301"
- case 302:
- return "302"
- case 304:
- return "304"
- case 305:
- return "305"
- case 307:
- return "307"
-
- case 400:
- return "400"
- case 401:
- return "401"
- case 402:
- return "402"
- case 403:
- return "403"
- case 404:
- return "404"
- case 405:
- return "405"
- case 406:
- return "406"
- case 407:
- return "407"
- case 408:
- return "408"
- case 409:
- return "409"
- case 410:
- return "410"
- case 411:
- return "411"
- case 412:
- return "412"
- case 413:
- return "413"
- case 414:
- return "414"
- case 415:
- return "415"
- case 416:
- return "416"
- case 417:
- return "417"
- case 418:
- return "418"
-
- case 500:
- return "500"
- case 501:
- return "501"
- case 502:
- return "502"
- case 503:
- return "503"
- case 504:
- return "504"
- case 505:
- return "505"
-
- case 428:
- return "428"
- case 429:
- return "429"
- case 431:
- return "431"
- case 511:
- return "511"
-
- default:
- return strconv.Itoa(s)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
deleted file mode 100644
index ba94405af4c..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ /dev/null
@@ -1,948 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "runtime"
- "sort"
- "strings"
- "sync"
- "unicode/utf8"
-
- "github.com/cespare/xxhash/v2"
- //lint:ignore SA1019 Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/expfmt"
-
- dto "github.com/prometheus/client_model/go"
-
- "github.com/prometheus/client_golang/prometheus/internal"
-)
-
-const (
- // Capacity for the channel to collect metrics and descriptors.
- capMetricChan = 1000
- capDescChan = 10
-)
-
-// DefaultRegisterer and DefaultGatherer are the implementations of the
-// Registerer and Gatherer interface a number of convenience functions in this
-// package act on. Initially, both variables point to the same Registry, which
-// has a process collector (currently on Linux only, see NewProcessCollector)
-// and a Go collector (see NewGoCollector, in particular the note about
-// stop-the-world implication with Go versions older than 1.9) already
-// registered. This approach to keep default instances as global state mirrors
-// the approach of other packages in the Go standard library. Note that there
-// are caveats. Change the variables with caution and only if you understand the
-// consequences. Users who want to avoid global state altogether should not use
-// the convenience functions and act on custom instances instead.
-var (
- defaultRegistry = NewRegistry()
- DefaultRegisterer Registerer = defaultRegistry
- DefaultGatherer Gatherer = defaultRegistry
-)
-
-func init() {
- MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
- MustRegister(NewGoCollector())
-}
-
-// NewRegistry creates a new vanilla Registry without any Collectors
-// pre-registered.
-func NewRegistry() *Registry {
- return &Registry{
- collectorsByID: map[uint64]Collector{},
- descIDs: map[uint64]struct{}{},
- dimHashesByName: map[string]uint64{},
- }
-}
-
-// NewPedanticRegistry returns a registry that checks during collection if each
-// collected Metric is consistent with its reported Desc, and if the Desc has
-// actually been registered with the registry. Unchecked Collectors (those whose
-// Describe method does not yield any descriptors) are excluded from the check.
-//
-// Usually, a Registry will be happy as long as the union of all collected
-// Metrics is consistent and valid even if some metrics are not consistent with
-// their own Desc or a Desc provided by their registered Collector. Well-behaved
-// Collectors and Metrics will only provide consistent Descs. This Registry is
-// useful to test the implementation of Collectors and Metrics.
-func NewPedanticRegistry() *Registry {
- r := NewRegistry()
- r.pedanticChecksEnabled = true
- return r
-}
-
-// Registerer is the interface for the part of a registry in charge of
-// registering and unregistering. Users of custom registries should use
-// Registerer as type for registration purposes (rather than the Registry type
-// directly). In that way, they are free to use custom Registerer implementation
-// (e.g. for testing purposes).
-type Registerer interface {
- // Register registers a new Collector to be included in metrics
- // collection. It returns an error if the descriptors provided by the
- // Collector are invalid or if they — in combination with descriptors of
- // already registered Collectors — do not fulfill the consistency and
- // uniqueness criteria described in the documentation of metric.Desc.
- //
- // If the provided Collector is equal to a Collector already registered
- // (which includes the case of re-registering the same Collector), the
- // returned error is an instance of AlreadyRegisteredError, which
- // contains the previously registered Collector.
- //
- // A Collector whose Describe method does not yield any Desc is treated
- // as unchecked. Registration will always succeed. No check for
- // re-registering (see previous paragraph) is performed. Thus, the
- // caller is responsible for not double-registering the same unchecked
- // Collector, and for providing a Collector that will not cause
- // inconsistent metrics on collection. (This would lead to scrape
- // errors.)
- Register(Collector) error
- // MustRegister works like Register but registers any number of
- // Collectors and panics upon the first registration that causes an
- // error.
- MustRegister(...Collector)
- // Unregister unregisters the Collector that equals the Collector passed
- // in as an argument. (Two Collectors are considered equal if their
- // Describe method yields the same set of descriptors.) The function
- // returns whether a Collector was unregistered. Note that an unchecked
- // Collector cannot be unregistered (as its Describe method does not
- // yield any descriptor).
- //
- // Note that even after unregistering, it will not be possible to
- // register a new Collector that is inconsistent with the unregistered
- // Collector, e.g. a Collector collecting metrics with the same name but
- // a different help string. The rationale here is that the same registry
- // instance must only collect consistent metrics throughout its
- // lifetime.
- Unregister(Collector) bool
-}
-
-// Gatherer is the interface for the part of a registry in charge of gathering
-// the collected metrics into a number of MetricFamilies. The Gatherer interface
-// comes with the same general implication as described for the Registerer
-// interface.
-type Gatherer interface {
- // Gather calls the Collect method of the registered Collectors and then
- // gathers the collected metrics into a lexicographically sorted slice
- // of uniquely named MetricFamily protobufs. Gather ensures that the
- // returned slice is valid and self-consistent so that it can be used
- // for valid exposition. As an exception to the strict consistency
- // requirements described for metric.Desc, Gather will tolerate
- // different sets of label names for metrics of the same metric family.
- //
- // Even if an error occurs, Gather attempts to gather as many metrics as
- // possible. Hence, if a non-nil error is returned, the returned
- // MetricFamily slice could be nil (in case of a fatal error that
- // prevented any meaningful metric collection) or contain a number of
- // MetricFamily protobufs, some of which might be incomplete, and some
- // might be missing altogether. The returned error (which might be a
- // MultiError) explains the details. Note that this is mostly useful for
- // debugging purposes. If the gathered protobufs are to be used for
- // exposition in actual monitoring, it is almost always better to not
- // expose an incomplete result and instead disregard the returned
- // MetricFamily protobufs in case the returned error is non-nil.
- Gather() ([]*dto.MetricFamily, error)
-}
-
-// Register registers the provided Collector with the DefaultRegisterer.
-//
-// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
-// details.
-func Register(c Collector) error {
- return DefaultRegisterer.Register(c)
-}
-
-// MustRegister registers the provided Collectors with the DefaultRegisterer and
-// panics if any error occurs.
-//
-// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
-// there for more details.
-func MustRegister(cs ...Collector) {
- DefaultRegisterer.MustRegister(cs...)
-}
-
-// Unregister removes the registration of the provided Collector from the
-// DefaultRegisterer.
-//
-// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
-// more details.
-func Unregister(c Collector) bool {
- return DefaultRegisterer.Unregister(c)
-}
-
-// GathererFunc turns a function into a Gatherer.
-type GathererFunc func() ([]*dto.MetricFamily, error)
-
-// Gather implements Gatherer.
-func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
- return gf()
-}
-
-// AlreadyRegisteredError is returned by the Register method if the Collector to
-// be registered has already been registered before, or a different Collector
-// that collects the same metrics has been registered before. Registration fails
-// in that case, but you can detect from the kind of error what has
-// happened. The error contains fields for the existing Collector and the
-// (rejected) new Collector that equals the existing one. This can be used to
-// find out if an equal Collector has been registered before and switch over to
-// using the old one, as demonstrated in the example.
-type AlreadyRegisteredError struct {
- ExistingCollector, NewCollector Collector
-}
-
-func (err AlreadyRegisteredError) Error() string {
- return "duplicate metrics collector registration attempted"
-}
-
-// MultiError is a slice of errors implementing the error interface. It is used
-// by a Gatherer to report multiple errors during MetricFamily gathering.
-type MultiError []error
-
-func (errs MultiError) Error() string {
- if len(errs) == 0 {
- return ""
- }
- buf := &bytes.Buffer{}
- fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
- for _, err := range errs {
- fmt.Fprintf(buf, "\n* %s", err)
- }
- return buf.String()
-}
-
-// Append appends the provided error if it is not nil.
-func (errs *MultiError) Append(err error) {
- if err != nil {
- *errs = append(*errs, err)
- }
-}
-
-// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
-// contained error as error if len(errs is 1). In all other cases, it returns
-// the MultiError directly. This is helpful for returning a MultiError in a way
-// that only uses the MultiError if needed.
-func (errs MultiError) MaybeUnwrap() error {
- switch len(errs) {
- case 0:
- return nil
- case 1:
- return errs[0]
- default:
- return errs
- }
-}
-
-// Registry registers Prometheus collectors, collects their metrics, and gathers
-// them into MetricFamilies for exposition. It implements both Registerer and
-// Gatherer. The zero value is not usable. Create instances with NewRegistry or
-// NewPedanticRegistry.
-type Registry struct {
- mtx sync.RWMutex
- collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
- descIDs map[uint64]struct{}
- dimHashesByName map[string]uint64
- uncheckedCollectors []Collector
- pedanticChecksEnabled bool
-}
-
-// Register implements Registerer.
-func (r *Registry) Register(c Collector) error {
- var (
- descChan = make(chan *Desc, capDescChan)
- newDescIDs = map[uint64]struct{}{}
- newDimHashesByName = map[string]uint64{}
- collectorID uint64 // All desc IDs XOR'd together.
- duplicateDescErr error
- )
- go func() {
- c.Describe(descChan)
- close(descChan)
- }()
- r.mtx.Lock()
- defer func() {
- // Drain channel in case of premature return to not leak a goroutine.
- for range descChan {
- }
- r.mtx.Unlock()
- }()
- // Conduct various tests...
- for desc := range descChan {
-
- // Is the descriptor valid at all?
- if desc.err != nil {
- return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
- }
-
- // Is the descID unique?
- // (In other words: Is the fqName + constLabel combination unique?)
- if _, exists := r.descIDs[desc.id]; exists {
- duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
- }
- // If it is not a duplicate desc in this collector, XOR it to
- // the collectorID. (We allow duplicate descs within the same
- // collector, but their existence must be a no-op.)
- if _, exists := newDescIDs[desc.id]; !exists {
- newDescIDs[desc.id] = struct{}{}
- collectorID ^= desc.id
- }
-
- // Are all the label names and the help string consistent with
- // previous descriptors of the same name?
- // First check existing descriptors...
- if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
- if dimHash != desc.dimHash {
- return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
- }
- } else {
- // ...then check the new descriptors already seen.
- if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
- if dimHash != desc.dimHash {
- return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
- }
- } else {
- newDimHashesByName[desc.fqName] = desc.dimHash
- }
- }
- }
- // A Collector yielding no Desc at all is considered unchecked.
- if len(newDescIDs) == 0 {
- r.uncheckedCollectors = append(r.uncheckedCollectors, c)
- return nil
- }
- if existing, exists := r.collectorsByID[collectorID]; exists {
- switch e := existing.(type) {
- case *wrappingCollector:
- return AlreadyRegisteredError{
- ExistingCollector: e.unwrapRecursively(),
- NewCollector: c,
- }
- default:
- return AlreadyRegisteredError{
- ExistingCollector: e,
- NewCollector: c,
- }
- }
- }
- // If the collectorID is new, but at least one of the descs existed
- // before, we are in trouble.
- if duplicateDescErr != nil {
- return duplicateDescErr
- }
-
- // Only after all tests have passed, actually register.
- r.collectorsByID[collectorID] = c
- for hash := range newDescIDs {
- r.descIDs[hash] = struct{}{}
- }
- for name, dimHash := range newDimHashesByName {
- r.dimHashesByName[name] = dimHash
- }
- return nil
-}
-
-// Unregister implements Registerer.
-func (r *Registry) Unregister(c Collector) bool {
- var (
- descChan = make(chan *Desc, capDescChan)
- descIDs = map[uint64]struct{}{}
- collectorID uint64 // All desc IDs XOR'd together.
- )
- go func() {
- c.Describe(descChan)
- close(descChan)
- }()
- for desc := range descChan {
- if _, exists := descIDs[desc.id]; !exists {
- collectorID ^= desc.id
- descIDs[desc.id] = struct{}{}
- }
- }
-
- r.mtx.RLock()
- if _, exists := r.collectorsByID[collectorID]; !exists {
- r.mtx.RUnlock()
- return false
- }
- r.mtx.RUnlock()
-
- r.mtx.Lock()
- defer r.mtx.Unlock()
-
- delete(r.collectorsByID, collectorID)
- for id := range descIDs {
- delete(r.descIDs, id)
- }
- // dimHashesByName is left untouched as those must be consistent
- // throughout the lifetime of a program.
- return true
-}
-
-// MustRegister implements Registerer.
-func (r *Registry) MustRegister(cs ...Collector) {
- for _, c := range cs {
- if err := r.Register(c); err != nil {
- panic(err)
- }
- }
-}
-
-// Gather implements Gatherer.
-func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
- var (
- checkedMetricChan = make(chan Metric, capMetricChan)
- uncheckedMetricChan = make(chan Metric, capMetricChan)
- metricHashes = map[uint64]struct{}{}
- wg sync.WaitGroup
- errs MultiError // The collected errors to return in the end.
- registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
- )
-
- r.mtx.RLock()
- goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
- metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
- checkedCollectors := make(chan Collector, len(r.collectorsByID))
- uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
- for _, collector := range r.collectorsByID {
- checkedCollectors <- collector
- }
- for _, collector := range r.uncheckedCollectors {
- uncheckedCollectors <- collector
- }
- // In case pedantic checks are enabled, we have to copy the map before
- // giving up the RLock.
- if r.pedanticChecksEnabled {
- registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
- for id := range r.descIDs {
- registeredDescIDs[id] = struct{}{}
- }
- }
- r.mtx.RUnlock()
-
- wg.Add(goroutineBudget)
-
- collectWorker := func() {
- for {
- select {
- case collector := <-checkedCollectors:
- collector.Collect(checkedMetricChan)
- case collector := <-uncheckedCollectors:
- collector.Collect(uncheckedMetricChan)
- default:
- return
- }
- wg.Done()
- }
- }
-
- // Start the first worker now to make sure at least one is running.
- go collectWorker()
- goroutineBudget--
-
- // Close checkedMetricChan and uncheckedMetricChan once all collectors
- // are collected.
- go func() {
- wg.Wait()
- close(checkedMetricChan)
- close(uncheckedMetricChan)
- }()
-
- // Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
- defer func() {
- if checkedMetricChan != nil {
- for range checkedMetricChan {
- }
- }
- if uncheckedMetricChan != nil {
- for range uncheckedMetricChan {
- }
- }
- }()
-
- // Copy the channel references so we can nil them out later to remove
- // them from the select statements below.
- cmc := checkedMetricChan
- umc := uncheckedMetricChan
-
- for {
- select {
- case metric, ok := <-cmc:
- if !ok {
- cmc = nil
- break
- }
- errs.Append(processMetric(
- metric, metricFamiliesByName,
- metricHashes,
- registeredDescIDs,
- ))
- case metric, ok := <-umc:
- if !ok {
- umc = nil
- break
- }
- errs.Append(processMetric(
- metric, metricFamiliesByName,
- metricHashes,
- nil,
- ))
- default:
- if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
- // All collectors are already being worked on or
- // we have already as many goroutines started as
- // there are collectors. Do the same as above,
- // just without the default.
- select {
- case metric, ok := <-cmc:
- if !ok {
- cmc = nil
- break
- }
- errs.Append(processMetric(
- metric, metricFamiliesByName,
- metricHashes,
- registeredDescIDs,
- ))
- case metric, ok := <-umc:
- if !ok {
- umc = nil
- break
- }
- errs.Append(processMetric(
- metric, metricFamiliesByName,
- metricHashes,
- nil,
- ))
- }
- break
- }
- // Start more workers.
- go collectWorker()
- goroutineBudget--
- runtime.Gosched()
- }
- // Once both checkedMetricChan and uncheckdMetricChan are closed
- // and drained, the contraption above will nil out cmc and umc,
- // and then we can leave the collect loop here.
- if cmc == nil && umc == nil {
- break
- }
- }
- return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
-}
-
-// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
-// Prometheus text format, and writes it to a temporary file. Upon success, the
-// temporary file is renamed to the provided filename.
-//
-// This is intended for use with the textfile collector of the node exporter.
-// Note that the node exporter expects the filename to be suffixed with ".prom".
-func WriteToTextfile(filename string, g Gatherer) error {
- tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
- if err != nil {
- return err
- }
- defer os.Remove(tmp.Name())
-
- mfs, err := g.Gather()
- if err != nil {
- return err
- }
- for _, mf := range mfs {
- if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil {
- return err
- }
- }
- if err := tmp.Close(); err != nil {
- return err
- }
-
- if err := os.Chmod(tmp.Name(), 0644); err != nil {
- return err
- }
- return os.Rename(tmp.Name(), filename)
-}
-
-// processMetric is an internal helper method only used by the Gather method.
-func processMetric(
- metric Metric,
- metricFamiliesByName map[string]*dto.MetricFamily,
- metricHashes map[uint64]struct{},
- registeredDescIDs map[uint64]struct{},
-) error {
- desc := metric.Desc()
- // Wrapped metrics collected by an unchecked Collector can have an
- // invalid Desc.
- if desc.err != nil {
- return desc.err
- }
- dtoMetric := &dto.Metric{}
- if err := metric.Write(dtoMetric); err != nil {
- return fmt.Errorf("error collecting metric %v: %s", desc, err)
- }
- metricFamily, ok := metricFamiliesByName[desc.fqName]
- if ok { // Existing name.
- if metricFamily.GetHelp() != desc.help {
- return fmt.Errorf(
- "collected metric %s %s has help %q but should have %q",
- desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
- )
- }
- // TODO(beorn7): Simplify switch once Desc has type.
- switch metricFamily.GetType() {
- case dto.MetricType_COUNTER:
- if dtoMetric.Counter == nil {
- return fmt.Errorf(
- "collected metric %s %s should be a Counter",
- desc.fqName, dtoMetric,
- )
- }
- case dto.MetricType_GAUGE:
- if dtoMetric.Gauge == nil {
- return fmt.Errorf(
- "collected metric %s %s should be a Gauge",
- desc.fqName, dtoMetric,
- )
- }
- case dto.MetricType_SUMMARY:
- if dtoMetric.Summary == nil {
- return fmt.Errorf(
- "collected metric %s %s should be a Summary",
- desc.fqName, dtoMetric,
- )
- }
- case dto.MetricType_UNTYPED:
- if dtoMetric.Untyped == nil {
- return fmt.Errorf(
- "collected metric %s %s should be Untyped",
- desc.fqName, dtoMetric,
- )
- }
- case dto.MetricType_HISTOGRAM:
- if dtoMetric.Histogram == nil {
- return fmt.Errorf(
- "collected metric %s %s should be a Histogram",
- desc.fqName, dtoMetric,
- )
- }
- default:
- panic("encountered MetricFamily with invalid type")
- }
- } else { // New name.
- metricFamily = &dto.MetricFamily{}
- metricFamily.Name = proto.String(desc.fqName)
- metricFamily.Help = proto.String(desc.help)
- // TODO(beorn7): Simplify switch once Desc has type.
- switch {
- case dtoMetric.Gauge != nil:
- metricFamily.Type = dto.MetricType_GAUGE.Enum()
- case dtoMetric.Counter != nil:
- metricFamily.Type = dto.MetricType_COUNTER.Enum()
- case dtoMetric.Summary != nil:
- metricFamily.Type = dto.MetricType_SUMMARY.Enum()
- case dtoMetric.Untyped != nil:
- metricFamily.Type = dto.MetricType_UNTYPED.Enum()
- case dtoMetric.Histogram != nil:
- metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
- default:
- return fmt.Errorf("empty metric collected: %s", dtoMetric)
- }
- if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
- return err
- }
- metricFamiliesByName[desc.fqName] = metricFamily
- }
- if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
- return err
- }
- if registeredDescIDs != nil {
- // Is the desc registered at all?
- if _, exist := registeredDescIDs[desc.id]; !exist {
- return fmt.Errorf(
- "collected metric %s %s with unregistered descriptor %s",
- metricFamily.GetName(), dtoMetric, desc,
- )
- }
- if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
- return err
- }
- }
- metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
- return nil
-}
-
-// Gatherers is a slice of Gatherer instances that implements the Gatherer
-// interface itself. Its Gather method calls Gather on all Gatherers in the
-// slice in order and returns the merged results. Errors returned from the
-// Gather calls are all returned in a flattened MultiError. Duplicate and
-// inconsistent Metrics are skipped (first occurrence in slice order wins) and
-// reported in the returned error.
-//
-// Gatherers can be used to merge the Gather results from multiple
-// Registries. It also provides a way to directly inject existing MetricFamily
-// protobufs into the gathering by creating a custom Gatherer with a Gather
-// method that simply returns the existing MetricFamily protobufs. Note that no
-// registration is involved (in contrast to Collector registration), so
-// obviously registration-time checks cannot happen. Any inconsistencies between
-// the gathered MetricFamilies are reported as errors by the Gather method, and
-// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
-// (e.g. syntactically invalid metric or label names) will go undetected.
-type Gatherers []Gatherer
-
-// Gather implements Gatherer.
-func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
- var (
- metricFamiliesByName = map[string]*dto.MetricFamily{}
- metricHashes = map[uint64]struct{}{}
- errs MultiError // The collected errors to return in the end.
- )
-
- for i, g := range gs {
- mfs, err := g.Gather()
- if err != nil {
- if multiErr, ok := err.(MultiError); ok {
- for _, err := range multiErr {
- errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
- }
- } else {
- errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
- }
- }
- for _, mf := range mfs {
- existingMF, exists := metricFamiliesByName[mf.GetName()]
- if exists {
- if existingMF.GetHelp() != mf.GetHelp() {
- errs = append(errs, fmt.Errorf(
- "gathered metric family %s has help %q but should have %q",
- mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
- ))
- continue
- }
- if existingMF.GetType() != mf.GetType() {
- errs = append(errs, fmt.Errorf(
- "gathered metric family %s has type %s but should have %s",
- mf.GetName(), mf.GetType(), existingMF.GetType(),
- ))
- continue
- }
- } else {
- existingMF = &dto.MetricFamily{}
- existingMF.Name = mf.Name
- existingMF.Help = mf.Help
- existingMF.Type = mf.Type
- if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
- errs = append(errs, err)
- continue
- }
- metricFamiliesByName[mf.GetName()] = existingMF
- }
- for _, m := range mf.Metric {
- if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
- errs = append(errs, err)
- continue
- }
- existingMF.Metric = append(existingMF.Metric, m)
- }
- }
- }
- return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
-}
-
-// checkSuffixCollisions checks for collisions with the “magic” suffixes the
-// Prometheus text format and the internal metric representation of the
-// Prometheus server add while flattening Summaries and Histograms.
-func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
- var (
- newName = mf.GetName()
- newType = mf.GetType()
- newNameWithoutSuffix = ""
- )
- switch {
- case strings.HasSuffix(newName, "_count"):
- newNameWithoutSuffix = newName[:len(newName)-6]
- case strings.HasSuffix(newName, "_sum"):
- newNameWithoutSuffix = newName[:len(newName)-4]
- case strings.HasSuffix(newName, "_bucket"):
- newNameWithoutSuffix = newName[:len(newName)-7]
- }
- if newNameWithoutSuffix != "" {
- if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
- switch existingMF.GetType() {
- case dto.MetricType_SUMMARY:
- if !strings.HasSuffix(newName, "_bucket") {
- return fmt.Errorf(
- "collected metric named %q collides with previously collected summary named %q",
- newName, newNameWithoutSuffix,
- )
- }
- case dto.MetricType_HISTOGRAM:
- return fmt.Errorf(
- "collected metric named %q collides with previously collected histogram named %q",
- newName, newNameWithoutSuffix,
- )
- }
- }
- }
- if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
- if _, ok := mfs[newName+"_count"]; ok {
- return fmt.Errorf(
- "collected histogram or summary named %q collides with previously collected metric named %q",
- newName, newName+"_count",
- )
- }
- if _, ok := mfs[newName+"_sum"]; ok {
- return fmt.Errorf(
- "collected histogram or summary named %q collides with previously collected metric named %q",
- newName, newName+"_sum",
- )
- }
- }
- if newType == dto.MetricType_HISTOGRAM {
- if _, ok := mfs[newName+"_bucket"]; ok {
- return fmt.Errorf(
- "collected histogram named %q collides with previously collected metric named %q",
- newName, newName+"_bucket",
- )
- }
- }
- return nil
-}
-
-// checkMetricConsistency checks if the provided Metric is consistent with the
-// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
-// name. If the resulting hash is already in the provided metricHashes, an error
-// is returned. If not, it is added to metricHashes.
-func checkMetricConsistency(
- metricFamily *dto.MetricFamily,
- dtoMetric *dto.Metric,
- metricHashes map[uint64]struct{},
-) error {
- name := metricFamily.GetName()
-
- // Type consistency with metric family.
- if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
- metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
- metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
- metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
- metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
- return fmt.Errorf(
- "collected metric %q { %s} is not a %s",
- name, dtoMetric, metricFamily.GetType(),
- )
- }
-
- previousLabelName := ""
- for _, labelPair := range dtoMetric.GetLabel() {
- labelName := labelPair.GetName()
- if labelName == previousLabelName {
- return fmt.Errorf(
- "collected metric %q { %s} has two or more labels with the same name: %s",
- name, dtoMetric, labelName,
- )
- }
- if !checkLabelName(labelName) {
- return fmt.Errorf(
- "collected metric %q { %s} has a label with an invalid name: %s",
- name, dtoMetric, labelName,
- )
- }
- if dtoMetric.Summary != nil && labelName == quantileLabel {
- return fmt.Errorf(
- "collected metric %q { %s} must not have an explicit %q label",
- name, dtoMetric, quantileLabel,
- )
- }
- if !utf8.ValidString(labelPair.GetValue()) {
- return fmt.Errorf(
- "collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
- name, dtoMetric, labelName, labelPair.GetValue())
- }
- previousLabelName = labelName
- }
-
- // Is the metric unique (i.e. no other metric with the same name and the same labels)?
- h := xxhash.New()
- h.WriteString(name)
- h.Write(separatorByteSlice)
- // Make sure label pairs are sorted. We depend on it for the consistency
- // check.
- if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
- // We cannot sort dtoMetric.Label in place as it is immutable by contract.
- copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
- copy(copiedLabels, dtoMetric.Label)
- sort.Sort(labelPairSorter(copiedLabels))
- dtoMetric.Label = copiedLabels
- }
- for _, lp := range dtoMetric.Label {
- h.WriteString(lp.GetName())
- h.Write(separatorByteSlice)
- h.WriteString(lp.GetValue())
- h.Write(separatorByteSlice)
- }
- hSum := h.Sum64()
- if _, exists := metricHashes[hSum]; exists {
- return fmt.Errorf(
- "collected metric %q { %s} was collected before with the same name and label values",
- name, dtoMetric,
- )
- }
- metricHashes[hSum] = struct{}{}
- return nil
-}
-
-func checkDescConsistency(
- metricFamily *dto.MetricFamily,
- dtoMetric *dto.Metric,
- desc *Desc,
-) error {
- // Desc help consistency with metric family help.
- if metricFamily.GetHelp() != desc.help {
- return fmt.Errorf(
- "collected metric %s %s has help %q but should have %q",
- metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
- )
- }
-
- // Is the desc consistent with the content of the metric?
- lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
- copy(lpsFromDesc, desc.constLabelPairs)
- for _, l := range desc.variableLabels {
- lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
- Name: proto.String(l),
- })
- }
- if len(lpsFromDesc) != len(dtoMetric.Label) {
- return fmt.Errorf(
- "labels in collected metric %s %s are inconsistent with descriptor %s",
- metricFamily.GetName(), dtoMetric, desc,
- )
- }
- sort.Sort(labelPairSorter(lpsFromDesc))
- for i, lpFromDesc := range lpsFromDesc {
- lpFromMetric := dtoMetric.Label[i]
- if lpFromDesc.GetName() != lpFromMetric.GetName() ||
- lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
- return fmt.Errorf(
- "labels in collected metric %s %s are inconsistent with descriptor %s",
- metricFamily.GetName(), dtoMetric, desc,
- )
- }
- }
- return nil
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
deleted file mode 100644
index f3c1440d1c6..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ /dev/null
@@ -1,737 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "math"
- "runtime"
- "sort"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/beorn7/perks/quantile"
- //lint:ignore SA1019 Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// quantileLabel is used for the label that defines the quantile in a
-// summary.
-const quantileLabel = "quantile"
-
-// A Summary captures individual observations from an event or sample stream and
-// summarizes them in a manner similar to traditional summary statistics: 1. sum
-// of observations, 2. observation count, 3. rank estimations.
-//
-// A typical use-case is the observation of request latencies. By default, a
-// Summary provides the median, the 90th and the 99th percentile of the latency
-// as rank estimations. However, the default behavior will change in the
-// upcoming v1.0.0 of the library. There will be no rank estimations at all by
-// default. For a sane transition, it is recommended to set the desired rank
-// estimations explicitly.
-//
-// Note that the rank estimations cannot be aggregated in a meaningful way with
-// the Prometheus query language (i.e. you cannot average or add them). If you
-// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
-// queries served across all instances of a service), consider the Histogram
-// metric type. See the Prometheus documentation for more details.
-//
-// To create Summary instances, use NewSummary.
-type Summary interface {
- Metric
- Collector
-
- // Observe adds a single observation to the summary.
- Observe(float64)
-}
-
-var errQuantileLabelNotAllowed = fmt.Errorf(
- "%q is not allowed as label name in summaries", quantileLabel,
-)
-
-// Default values for SummaryOpts.
-const (
- // DefMaxAge is the default duration for which observations stay
- // relevant.
- DefMaxAge time.Duration = 10 * time.Minute
- // DefAgeBuckets is the default number of buckets used to calculate the
- // age of observations.
- DefAgeBuckets = 5
- // DefBufCap is the standard buffer size for collecting Summary observations.
- DefBufCap = 500
-)
-
-// SummaryOpts bundles the options for creating a Summary metric. It is
-// mandatory to set Name to a non-empty string. While all other fields are
-// optional and can safely be left at their zero value, it is recommended to set
-// a help string and to explicitly set the Objectives field to the desired value
-// as the default value will change in the upcoming v1.0.0 of the library.
-type SummaryOpts struct {
- // Namespace, Subsystem, and Name are components of the fully-qualified
- // name of the Summary (created by joining these components with
- // "_"). Only Name is mandatory, the others merely help structuring the
- // name. Note that the fully-qualified name of the Summary must be a
- // valid Prometheus metric name.
- Namespace string
- Subsystem string
- Name string
-
- // Help provides information about this Summary.
- //
- // Metrics with the same fully-qualified name must have the same Help
- // string.
- Help string
-
- // ConstLabels are used to attach fixed labels to this metric. Metrics
- // with the same fully-qualified name must have the same label names in
- // their ConstLabels.
- //
- // Due to the way a Summary is represented in the Prometheus text format
- // and how it is handled by the Prometheus server internally, “quantile”
- // is an illegal label name. Construction of a Summary or SummaryVec
- // will panic if this label name is used in ConstLabels.
- //
- // ConstLabels are only used rarely. In particular, do not use them to
- // attach the same labels to all your metrics. Those use cases are
- // better covered by target labels set by the scraping Prometheus
- // server, or by one specific metric (e.g. a build_info or a
- // machine_role metric). See also
- // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
- ConstLabels Labels
-
- // Objectives defines the quantile rank estimates with their respective
- // absolute error. If Objectives[q] = e, then the value reported for q
- // will be the φ-quantile value for some φ between q-e and q+e. The
- // default value is an empty map, resulting in a summary without
- // quantiles.
- Objectives map[float64]float64
-
- // MaxAge defines the duration for which an observation stays relevant
- // for the summary. Must be positive. The default value is DefMaxAge.
- MaxAge time.Duration
-
- // AgeBuckets is the number of buckets used to exclude observations that
- // are older than MaxAge from the summary. A higher number has a
- // resource penalty, so only increase it if the higher resolution is
- // really required. For very high observation rates, you might want to
- // reduce the number of age buckets. With only one age bucket, you will
- // effectively see a complete reset of the summary each time MaxAge has
- // passed. The default value is DefAgeBuckets.
- AgeBuckets uint32
-
- // BufCap defines the default sample stream buffer size. The default
- // value of DefBufCap should suffice for most uses. If there is a need
- // to increase the value, a multiple of 500 is recommended (because that
- // is the internal buffer size of the underlying package
- // "github.com/bmizerany/perks/quantile").
- BufCap uint32
-}
-
-// Problem with the sliding-window decay algorithm... The Merge method of
-// perk/quantile is actually not working as advertised - and it might be
-// unfixable, as the underlying algorithm is apparently not capable of merging
-// summaries in the first place. To avoid using Merge, we are currently adding
-// observations to _each_ age bucket, i.e. the effort to add a sample is
-// essentially multiplied by the number of age buckets. When rotating age
-// buckets, we empty the previous head stream. On scrape time, we simply take
-// the quantiles from the head stream (no merging required). Result: More effort
-// on observation time, less effort on scrape time, which is exactly the
-// opposite of what we try to accomplish, but at least the results are correct.
-//
-// The quite elegant previous contraption to merge the age buckets efficiently
-// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
-// can't be used anymore.
-
-// NewSummary creates a new Summary based on the provided SummaryOpts.
-func NewSummary(opts SummaryOpts) Summary {
- return newSummary(
- NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ),
- opts,
- )
-}
-
-func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
- if len(desc.variableLabels) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
- }
-
- for _, n := range desc.variableLabels {
- if n == quantileLabel {
- panic(errQuantileLabelNotAllowed)
- }
- }
- for _, lp := range desc.constLabelPairs {
- if lp.GetName() == quantileLabel {
- panic(errQuantileLabelNotAllowed)
- }
- }
-
- if opts.Objectives == nil {
- opts.Objectives = map[float64]float64{}
- }
-
- if opts.MaxAge < 0 {
- panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
- }
- if opts.MaxAge == 0 {
- opts.MaxAge = DefMaxAge
- }
-
- if opts.AgeBuckets == 0 {
- opts.AgeBuckets = DefAgeBuckets
- }
-
- if opts.BufCap == 0 {
- opts.BufCap = DefBufCap
- }
-
- if len(opts.Objectives) == 0 {
- // Use the lock-free implementation of a Summary without objectives.
- s := &noObjectivesSummary{
- desc: desc,
- labelPairs: makeLabelPairs(desc, labelValues),
- counts: [2]*summaryCounts{{}, {}},
- }
- s.init(s) // Init self-collection.
- return s
- }
-
- s := &summary{
- desc: desc,
-
- objectives: opts.Objectives,
- sortedObjectives: make([]float64, 0, len(opts.Objectives)),
-
- labelPairs: makeLabelPairs(desc, labelValues),
-
- hotBuf: make([]float64, 0, opts.BufCap),
- coldBuf: make([]float64, 0, opts.BufCap),
- streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
- }
- s.headStreamExpTime = time.Now().Add(s.streamDuration)
- s.hotBufExpTime = s.headStreamExpTime
-
- for i := uint32(0); i < opts.AgeBuckets; i++ {
- s.streams = append(s.streams, s.newStream())
- }
- s.headStream = s.streams[0]
-
- for qu := range s.objectives {
- s.sortedObjectives = append(s.sortedObjectives, qu)
- }
- sort.Float64s(s.sortedObjectives)
-
- s.init(s) // Init self-collection.
- return s
-}
-
-type summary struct {
- selfCollector
-
- bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
- mtx sync.Mutex // Protects every other moving part.
- // Lock bufMtx before mtx if both are needed.
-
- desc *Desc
-
- objectives map[float64]float64
- sortedObjectives []float64
-
- labelPairs []*dto.LabelPair
-
- sum float64
- cnt uint64
-
- hotBuf, coldBuf []float64
-
- streams []*quantile.Stream
- streamDuration time.Duration
- headStream *quantile.Stream
- headStreamIdx int
- headStreamExpTime, hotBufExpTime time.Time
-}
-
-func (s *summary) Desc() *Desc {
- return s.desc
-}
-
-func (s *summary) Observe(v float64) {
- s.bufMtx.Lock()
- defer s.bufMtx.Unlock()
-
- now := time.Now()
- if now.After(s.hotBufExpTime) {
- s.asyncFlush(now)
- }
- s.hotBuf = append(s.hotBuf, v)
- if len(s.hotBuf) == cap(s.hotBuf) {
- s.asyncFlush(now)
- }
-}
-
-func (s *summary) Write(out *dto.Metric) error {
- sum := &dto.Summary{}
- qs := make([]*dto.Quantile, 0, len(s.objectives))
-
- s.bufMtx.Lock()
- s.mtx.Lock()
- // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
- s.swapBufs(time.Now())
- s.bufMtx.Unlock()
-
- s.flushColdBuf()
- sum.SampleCount = proto.Uint64(s.cnt)
- sum.SampleSum = proto.Float64(s.sum)
-
- for _, rank := range s.sortedObjectives {
- var q float64
- if s.headStream.Count() == 0 {
- q = math.NaN()
- } else {
- q = s.headStream.Query(rank)
- }
- qs = append(qs, &dto.Quantile{
- Quantile: proto.Float64(rank),
- Value: proto.Float64(q),
- })
- }
-
- s.mtx.Unlock()
-
- if len(qs) > 0 {
- sort.Sort(quantSort(qs))
- }
- sum.Quantile = qs
-
- out.Summary = sum
- out.Label = s.labelPairs
- return nil
-}
-
-func (s *summary) newStream() *quantile.Stream {
- return quantile.NewTargeted(s.objectives)
-}
-
-// asyncFlush needs bufMtx locked.
-func (s *summary) asyncFlush(now time.Time) {
- s.mtx.Lock()
- s.swapBufs(now)
-
- // Unblock the original goroutine that was responsible for the mutation
- // that triggered the compaction. But hold onto the global non-buffer
- // state mutex until the operation finishes.
- go func() {
- s.flushColdBuf()
- s.mtx.Unlock()
- }()
-}
-
-// rotateStreams needs mtx AND bufMtx locked.
-func (s *summary) maybeRotateStreams() {
- for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
- s.headStream.Reset()
- s.headStreamIdx++
- if s.headStreamIdx >= len(s.streams) {
- s.headStreamIdx = 0
- }
- s.headStream = s.streams[s.headStreamIdx]
- s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
- }
-}
-
-// flushColdBuf needs mtx locked.
-func (s *summary) flushColdBuf() {
- for _, v := range s.coldBuf {
- for _, stream := range s.streams {
- stream.Insert(v)
- }
- s.cnt++
- s.sum += v
- }
- s.coldBuf = s.coldBuf[0:0]
- s.maybeRotateStreams()
-}
-
-// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
-func (s *summary) swapBufs(now time.Time) {
- if len(s.coldBuf) != 0 {
- panic("coldBuf is not empty")
- }
- s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
- // hotBuf is now empty and gets new expiration set.
- for now.After(s.hotBufExpTime) {
- s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
- }
-}
-
-type summaryCounts struct {
- // sumBits contains the bits of the float64 representing the sum of all
- // observations. sumBits and count have to go first in the struct to
- // guarantee alignment for atomic operations.
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- sumBits uint64
- count uint64
-}
-
-type noObjectivesSummary struct {
- // countAndHotIdx enables lock-free writes with use of atomic updates.
- // The most significant bit is the hot index [0 or 1] of the count field
- // below. Observe calls update the hot one. All remaining bits count the
- // number of Observe calls. Observe starts by incrementing this counter,
- // and finish by incrementing the count field in the respective
- // summaryCounts, as a marker for completion.
- //
- // Calls of the Write method (which are non-mutating reads from the
- // perspective of the summary) swap the hot–cold under the writeMtx
- // lock. A cooldown is awaited (while locked) by comparing the number of
- // observations with the initiation count. Once they match, then the
- // last observation on the now cool one has completed. All cool fields must
- // be merged into the new hot before releasing writeMtx.
-
- // Fields with atomic access first! See alignment constraint:
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- countAndHotIdx uint64
-
- selfCollector
- desc *Desc
- writeMtx sync.Mutex // Only used in the Write method.
-
- // Two counts, one is "hot" for lock-free observations, the other is
- // "cold" for writing out a dto.Metric. It has to be an array of
- // pointers to guarantee 64bit alignment of the histogramCounts, see
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
- counts [2]*summaryCounts
-
- labelPairs []*dto.LabelPair
-}
-
-func (s *noObjectivesSummary) Desc() *Desc {
- return s.desc
-}
-
-func (s *noObjectivesSummary) Observe(v float64) {
- // We increment h.countAndHotIdx so that the counter in the lower
- // 63 bits gets incremented. At the same time, we get the new value
- // back, which we can use to find the currently-hot counts.
- n := atomic.AddUint64(&s.countAndHotIdx, 1)
- hotCounts := s.counts[n>>63]
-
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- break
- }
- }
- // Increment count last as we take it as a signal that the observation
- // is complete.
- atomic.AddUint64(&hotCounts.count, 1)
-}
-
-func (s *noObjectivesSummary) Write(out *dto.Metric) error {
- // For simplicity, we protect this whole method by a mutex. It is not in
- // the hot path, i.e. Observe is called much more often than Write. The
- // complication of making Write lock-free isn't worth it, if possible at
- // all.
- s.writeMtx.Lock()
- defer s.writeMtx.Unlock()
-
- // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
- // without touching the count bits. See the struct comments for a full
- // description of the algorithm.
- n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
- // count is contained unchanged in the lower 63 bits.
- count := n & ((1 << 63) - 1)
- // The most significant bit tells us which counts is hot. The complement
- // is thus the cold one.
- hotCounts := s.counts[n>>63]
- coldCounts := s.counts[(^n)>>63]
-
- // Await cooldown.
- for count != atomic.LoadUint64(&coldCounts.count) {
- runtime.Gosched() // Let observations get work done.
- }
-
- sum := &dto.Summary{
- SampleCount: proto.Uint64(count),
- SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
- }
-
- out.Summary = sum
- out.Label = s.labelPairs
-
- // Finally add all the cold counts to the new hot counts and reset the cold counts.
- atomic.AddUint64(&hotCounts.count, count)
- atomic.StoreUint64(&coldCounts.count, 0)
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- atomic.StoreUint64(&coldCounts.sumBits, 0)
- break
- }
- }
- return nil
-}
-
-type quantSort []*dto.Quantile
-
-func (s quantSort) Len() int {
- return len(s)
-}
-
-func (s quantSort) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s quantSort) Less(i, j int) bool {
- return s[i].GetQuantile() < s[j].GetQuantile()
-}
-
-// SummaryVec is a Collector that bundles a set of Summaries that all share the
-// same Desc, but have different values for their variable labels. This is used
-// if you want to count the same thing partitioned by various dimensions
-// (e.g. HTTP request latencies, partitioned by status code and method). Create
-// instances with NewSummaryVec.
-type SummaryVec struct {
- *metricVec
-}
-
-// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
-// partitioned by the given label names.
-//
-// Due to the way a Summary is represented in the Prometheus text format and how
-// it is handled by the Prometheus server internally, “quantile” is an illegal
-// label name. NewSummaryVec will panic if this label name is used.
-func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
- for _, ln := range labelNames {
- if ln == quantileLabel {
- panic(errQuantileLabelNotAllowed)
- }
- }
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- labelNames,
- opts.ConstLabels,
- )
- return &SummaryVec{
- metricVec: newMetricVec(desc, func(lvs ...string) Metric {
- return newSummary(desc, opts, lvs...)
- }),
- }
-}
-
-// GetMetricWithLabelValues returns the Summary for the given slice of label
-// values (same order as the VariableLabels in Desc). If that combination of
-// label values is accessed for the first time, a new Summary is created.
-//
-// It is possible to call this method without using the returned Summary to only
-// create the new Summary but leave it at its starting value, a Summary without
-// any observations.
-//
-// Keeping the Summary for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Summary from the SummaryVec. In that case,
-// the Summary will still exist, but it will not be exported anymore, even if a
-// Summary with the same label values is created later. See also the CounterVec
-// example.
-//
-// An error is returned if the number of label values is not the same as the
-// number of VariableLabels in Desc (minus any curried labels).
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the GaugeVec example.
-func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
- metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Observer), err
- }
- return nil, err
-}
-
-// GetMetricWith returns the Summary for the given Labels map (the label names
-// must match those of the VariableLabels in Desc). If that label map is
-// accessed for the first time, a new Summary is created. Implications of
-// creating a Summary without using it and keeping the Summary for later use are
-// the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the VariableLabels in Desc (minus any curried labels).
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
- metric, err := v.metricVec.getMetricWith(labels)
- if metric != nil {
- return metric.(Observer), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. Not returning an
-// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
- s, err := v.GetMetricWithLabelValues(lvs...)
- if err != nil {
- panic(err)
- }
- return s
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (v *SummaryVec) With(labels Labels) Observer {
- s, err := v.GetMetricWith(labels)
- if err != nil {
- panic(err)
- }
- return s
-}
-
-// CurryWith returns a vector curried with the provided labels, i.e. the
-// returned vector has those labels pre-set for all labeled operations performed
-// on it. The cardinality of the curried vector is reduced accordingly. The
-// order of the remaining labels stays the same (just with the curried labels
-// taken out of the sequence – which is relevant for the
-// (GetMetric)WithLabelValues methods). It is possible to curry a curried
-// vector, but only with labels not yet used for currying before.
-//
-// The metrics contained in the SummaryVec are shared between the curried and
-// uncurried vectors. They are just accessed differently. Curried and uncurried
-// vectors behave identically in terms of collection. Only one must be
-// registered with a given registry (usually the uncurried version). The Reset
-// method deletes all metrics, even if called on a curried vector.
-func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
- vec, err := v.curryWith(labels)
- if vec != nil {
- return &SummaryVec{vec}, err
- }
- return nil, err
-}
-
-// MustCurryWith works as CurryWith but panics where CurryWith would have
-// returned an error.
-func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
- vec, err := v.CurryWith(labels)
- if err != nil {
- panic(err)
- }
- return vec
-}
-
-type constSummary struct {
- desc *Desc
- count uint64
- sum float64
- quantiles map[float64]float64
- labelPairs []*dto.LabelPair
-}
-
-func (s *constSummary) Desc() *Desc {
- return s.desc
-}
-
-func (s *constSummary) Write(out *dto.Metric) error {
- sum := &dto.Summary{}
- qs := make([]*dto.Quantile, 0, len(s.quantiles))
-
- sum.SampleCount = proto.Uint64(s.count)
- sum.SampleSum = proto.Float64(s.sum)
-
- for rank, q := range s.quantiles {
- qs = append(qs, &dto.Quantile{
- Quantile: proto.Float64(rank),
- Value: proto.Float64(q),
- })
- }
-
- if len(qs) > 0 {
- sort.Sort(quantSort(qs))
- }
- sum.Quantile = qs
-
- out.Summary = sum
- out.Label = s.labelPairs
-
- return nil
-}
-
-// NewConstSummary returns a metric representing a Prometheus summary with fixed
-// values for the count, sum, and quantiles. As those parameters cannot be
-// changed, the returned value does not implement the Summary interface (but
-// only the Metric interface). Users of this package will not have much use for
-// it in regular operations. However, when implementing custom Collectors, it is
-// useful as a throw-away metric that is generated on the fly to send it to
-// Prometheus in the Collect method.
-//
-// quantiles maps ranks to quantile values. For example, a median latency of
-// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
-// map[float64]float64{0.5: 0.23, 0.99: 0.56}
-//
-// NewConstSummary returns an error if the length of labelValues is not
-// consistent with the variable labels in Desc or if Desc is invalid.
-func NewConstSummary(
- desc *Desc,
- count uint64,
- sum float64,
- quantiles map[float64]float64,
- labelValues ...string,
-) (Metric, error) {
- if desc.err != nil {
- return nil, desc.err
- }
- if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
- return nil, err
- }
- return &constSummary{
- desc: desc,
- count: count,
- sum: sum,
- quantiles: quantiles,
- labelPairs: makeLabelPairs(desc, labelValues),
- }, nil
-}
-
-// MustNewConstSummary is a version of NewConstSummary that panics where
-// NewConstMetric would have returned an error.
-func MustNewConstSummary(
- desc *Desc,
- count uint64,
- sum float64,
- quantiles map[float64]float64,
- labelValues ...string,
-) Metric {
- m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
deleted file mode 100644
index 8d5f1052337..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2016 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import "time"
-
-// Timer is a helper type to time functions. Use NewTimer to create new
-// instances.
-type Timer struct {
- begin time.Time
- observer Observer
-}
-
-// NewTimer creates a new Timer. The provided Observer is used to observe a
-// duration in seconds. Timer is usually used to time a function call in the
-// following way:
-// func TimeMe() {
-// timer := NewTimer(myHistogram)
-// defer timer.ObserveDuration()
-// // Do actual work.
-// }
-func NewTimer(o Observer) *Timer {
- return &Timer{
- begin: time.Now(),
- observer: o,
- }
-}
-
-// ObserveDuration records the duration passed since the Timer was created with
-// NewTimer. It calls the Observe method of the Observer provided during
-// construction with the duration in seconds as an argument. The observed
-// duration is also returned. ObserveDuration is usually called with a defer
-// statement.
-//
-// Note that this method is only guaranteed to never observe negative durations
-// if used with Go1.9+.
-func (t *Timer) ObserveDuration() time.Duration {
- d := time.Since(t.begin)
- if t.observer != nil {
- t.observer.Observe(d.Seconds())
- }
- return d
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
deleted file mode 100644
index 0f9ce63f409..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-// UntypedOpts is an alias for Opts. See there for doc comments.
-type UntypedOpts Opts
-
-// UntypedFunc works like GaugeFunc but the collected metric is of type
-// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
-// type.
-//
-// To create UntypedFunc instances, use NewUntypedFunc.
-type UntypedFunc interface {
- Metric
- Collector
-}
-
-// NewUntypedFunc creates a new UntypedFunc based on the provided
-// UntypedOpts. The value reported is determined by calling the given function
-// from within the Write method. Take into account that metric collection may
-// happen concurrently. If that results in concurrent calls to Write, like in
-// the case where an UntypedFunc is directly registered with Prometheus, the
-// provided function must be concurrency-safe.
-func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
- return newValueFunc(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), UntypedValue, function)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
deleted file mode 100644
index 6206928cc67..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "sort"
- "time"
- "unicode/utf8"
-
- //lint:ignore SA1019 Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// ValueType is an enumeration of metric types that represent a simple value.
-type ValueType int
-
-// Possible values for the ValueType enum. Use UntypedValue to mark a metric
-// with an unknown type.
-const (
- _ ValueType = iota
- CounterValue
- GaugeValue
- UntypedValue
-)
-
-// valueFunc is a generic metric for simple values retrieved on collect time
-// from a function. It implements Metric and Collector. Its effective type is
-// determined by ValueType. This is a low-level building block used by the
-// library to back the implementations of CounterFunc, GaugeFunc, and
-// UntypedFunc.
-type valueFunc struct {
- selfCollector
-
- desc *Desc
- valType ValueType
- function func() float64
- labelPairs []*dto.LabelPair
-}
-
-// newValueFunc returns a newly allocated valueFunc with the given Desc and
-// ValueType. The value reported is determined by calling the given function
-// from within the Write method. Take into account that metric collection may
-// happen concurrently. If that results in concurrent calls to Write, like in
-// the case where a valueFunc is directly registered with Prometheus, the
-// provided function must be concurrency-safe.
-func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
- result := &valueFunc{
- desc: desc,
- valType: valueType,
- function: function,
- labelPairs: makeLabelPairs(desc, nil),
- }
- result.init(result)
- return result
-}
-
-func (v *valueFunc) Desc() *Desc {
- return v.desc
-}
-
-func (v *valueFunc) Write(out *dto.Metric) error {
- return populateMetric(v.valType, v.function(), v.labelPairs, nil, out)
-}
-
-// NewConstMetric returns a metric with one fixed value that cannot be
-// changed. Users of this package will not have much use for it in regular
-// operations. However, when implementing custom Collectors, it is useful as a
-// throw-away metric that is generated on the fly to send it to Prometheus in
-// the Collect method. NewConstMetric returns an error if the length of
-// labelValues is not consistent with the variable labels in Desc or if Desc is
-// invalid.
-func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
- if desc.err != nil {
- return nil, desc.err
- }
- if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
- return nil, err
- }
- return &constMetric{
- desc: desc,
- valType: valueType,
- val: value,
- labelPairs: makeLabelPairs(desc, labelValues),
- }, nil
-}
-
-// MustNewConstMetric is a version of NewConstMetric that panics where
-// NewConstMetric would have returned an error.
-func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
- m, err := NewConstMetric(desc, valueType, value, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
-
-type constMetric struct {
- desc *Desc
- valType ValueType
- val float64
- labelPairs []*dto.LabelPair
-}
-
-func (m *constMetric) Desc() *Desc {
- return m.desc
-}
-
-func (m *constMetric) Write(out *dto.Metric) error {
- return populateMetric(m.valType, m.val, m.labelPairs, nil, out)
-}
-
-func populateMetric(
- t ValueType,
- v float64,
- labelPairs []*dto.LabelPair,
- e *dto.Exemplar,
- m *dto.Metric,
-) error {
- m.Label = labelPairs
- switch t {
- case CounterValue:
- m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e}
- case GaugeValue:
- m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
- case UntypedValue:
- m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
- default:
- return fmt.Errorf("encountered unknown type %v", t)
- }
- return nil
-}
-
-func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
- totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
- if totalLen == 0 {
- // Super fast path.
- return nil
- }
- if len(desc.variableLabels) == 0 {
- // Moderately fast path.
- return desc.constLabelPairs
- }
- labelPairs := make([]*dto.LabelPair, 0, totalLen)
- for i, n := range desc.variableLabels {
- labelPairs = append(labelPairs, &dto.LabelPair{
- Name: proto.String(n),
- Value: proto.String(labelValues[i]),
- })
- }
- labelPairs = append(labelPairs, desc.constLabelPairs...)
- sort.Sort(labelPairSorter(labelPairs))
- return labelPairs
-}
-
-// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
-const ExemplarMaxRunes = 64
-
-// newExemplar creates a new dto.Exemplar from the provided values. An error is
-// returned if any of the label names or values are invalid or if the total
-// number of runes in the label names and values exceeds ExemplarMaxRunes.
-func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
- e := &dto.Exemplar{}
- e.Value = proto.Float64(value)
- tsProto, err := ptypes.TimestampProto(ts)
- if err != nil {
- return nil, err
- }
- e.Timestamp = tsProto
- labelPairs := make([]*dto.LabelPair, 0, len(l))
- var runes int
- for name, value := range l {
- if !checkLabelName(name) {
- return nil, fmt.Errorf("exemplar label name %q is invalid", name)
- }
- runes += utf8.RuneCountInString(name)
- if !utf8.ValidString(value) {
- return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
- }
- runes += utf8.RuneCountInString(value)
- labelPairs = append(labelPairs, &dto.LabelPair{
- Name: proto.String(name),
- Value: proto.String(value),
- })
- }
- if runes > ExemplarMaxRunes {
- return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
- }
- e.Label = labelPairs
- return e, nil
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
deleted file mode 100644
index d53848dc481..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ /dev/null
@@ -1,484 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "sync"
-
- "github.com/prometheus/common/model"
-)
-
-// metricVec is a Collector to bundle metrics of the same name that differ in
-// their label values. metricVec is not used directly (and therefore
-// unexported). It is used as a building block for implementations of vectors of
-// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
-// It also handles label currying.
-type metricVec struct {
- *metricMap
-
- curry []curriedLabelValue
-
- // hashAdd and hashAddByte can be replaced for testing collision handling.
- hashAdd func(h uint64, s string) uint64
- hashAddByte func(h uint64, b byte) uint64
-}
-
-// newMetricVec returns an initialized metricVec.
-func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
- return &metricVec{
- metricMap: &metricMap{
- metrics: map[uint64][]metricWithLabelValues{},
- desc: desc,
- newMetric: newMetric,
- },
- hashAdd: hashAdd,
- hashAddByte: hashAddByte,
- }
-}
-
-// DeleteLabelValues removes the metric where the variable labels are the same
-// as those passed in as labels (same order as the VariableLabels in Desc). It
-// returns true if a metric was deleted.
-//
-// It is not an error if the number of label values is not the same as the
-// number of VariableLabels in Desc. However, such inconsistent label count can
-// never match an actual metric, so the method will always return false in that
-// case.
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider Delete(Labels) as an
-// alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the CounterVec example.
-func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
- h, err := m.hashLabelValues(lvs)
- if err != nil {
- return false
- }
-
- return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
-}
-
-// Delete deletes the metric where the variable labels are the same as those
-// passed in as labels. It returns true if a metric was deleted.
-//
-// It is not an error if the number and names of the Labels are inconsistent
-// with those of the VariableLabels in Desc. However, such inconsistent Labels
-// can never match an actual metric, so the method will always return false in
-// that case.
-//
-// This method is used for the same purpose as DeleteLabelValues(...string). See
-// there for pros and cons of the two methods.
-func (m *metricVec) Delete(labels Labels) bool {
- h, err := m.hashLabels(labels)
- if err != nil {
- return false
- }
-
- return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
-}
-
-// Without explicit forwarding of Describe, Collect, Reset, those methods won't
-// show up in GoDoc.
-
-// Describe implements Collector.
-func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
-
-// Collect implements Collector.
-func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
-
-// Reset deletes all metrics in this vector.
-func (m *metricVec) Reset() { m.metricMap.Reset() }
-
-func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
- var (
- newCurry []curriedLabelValue
- oldCurry = m.curry
- iCurry int
- )
- for i, label := range m.desc.variableLabels {
- val, ok := labels[label]
- if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
- if ok {
- return nil, fmt.Errorf("label name %q is already curried", label)
- }
- newCurry = append(newCurry, oldCurry[iCurry])
- iCurry++
- } else {
- if !ok {
- continue // Label stays uncurried.
- }
- newCurry = append(newCurry, curriedLabelValue{i, val})
- }
- }
- if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
- return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
- }
-
- return &metricVec{
- metricMap: m.metricMap,
- curry: newCurry,
- hashAdd: m.hashAdd,
- hashAddByte: m.hashAddByte,
- }, nil
-}
-
-func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
- h, err := m.hashLabelValues(lvs)
- if err != nil {
- return nil, err
- }
-
- return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
-}
-
-func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
- h, err := m.hashLabels(labels)
- if err != nil {
- return nil, err
- }
-
- return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
-}
-
-func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
- if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
- return 0, err
- }
-
- var (
- h = hashNew()
- curry = m.curry
- iVals, iCurry int
- )
- for i := 0; i < len(m.desc.variableLabels); i++ {
- if iCurry < len(curry) && curry[iCurry].index == i {
- h = m.hashAdd(h, curry[iCurry].value)
- iCurry++
- } else {
- h = m.hashAdd(h, vals[iVals])
- iVals++
- }
- h = m.hashAddByte(h, model.SeparatorByte)
- }
- return h, nil
-}
-
-func (m *metricVec) hashLabels(labels Labels) (uint64, error) {
- if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
- return 0, err
- }
-
- var (
- h = hashNew()
- curry = m.curry
- iCurry int
- )
- for i, label := range m.desc.variableLabels {
- val, ok := labels[label]
- if iCurry < len(curry) && curry[iCurry].index == i {
- if ok {
- return 0, fmt.Errorf("label name %q is already curried", label)
- }
- h = m.hashAdd(h, curry[iCurry].value)
- iCurry++
- } else {
- if !ok {
- return 0, fmt.Errorf("label name %q missing in label map", label)
- }
- h = m.hashAdd(h, val)
- }
- h = m.hashAddByte(h, model.SeparatorByte)
- }
- return h, nil
-}
-
-// metricWithLabelValues provides the metric and its label values for
-// disambiguation on hash collision.
-type metricWithLabelValues struct {
- values []string
- metric Metric
-}
-
-// curriedLabelValue sets the curried value for a label at the given index.
-type curriedLabelValue struct {
- index int
- value string
-}
-
-// metricMap is a helper for metricVec and shared between differently curried
-// metricVecs.
-type metricMap struct {
- mtx sync.RWMutex // Protects metrics.
- metrics map[uint64][]metricWithLabelValues
- desc *Desc
- newMetric func(labelValues ...string) Metric
-}
-
-// Describe implements Collector. It will send exactly one Desc to the provided
-// channel.
-func (m *metricMap) Describe(ch chan<- *Desc) {
- ch <- m.desc
-}
-
-// Collect implements Collector.
-func (m *metricMap) Collect(ch chan<- Metric) {
- m.mtx.RLock()
- defer m.mtx.RUnlock()
-
- for _, metrics := range m.metrics {
- for _, metric := range metrics {
- ch <- metric.metric
- }
- }
-}
-
-// Reset deletes all metrics in this vector.
-func (m *metricMap) Reset() {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- for h := range m.metrics {
- delete(m.metrics, h)
- }
-}
-
-// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
-// there are multiple matches in the bucket, use lvs to select a metric and
-// remove only that metric.
-func (m *metricMap) deleteByHashWithLabelValues(
- h uint64, lvs []string, curry []curriedLabelValue,
-) bool {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- metrics, ok := m.metrics[h]
- if !ok {
- return false
- }
-
- i := findMetricWithLabelValues(metrics, lvs, curry)
- if i >= len(metrics) {
- return false
- }
-
- if len(metrics) > 1 {
- m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
- } else {
- delete(m.metrics, h)
- }
- return true
-}
-
-// deleteByHashWithLabels removes the metric from the hash bucket h. If there
-// are multiple matches in the bucket, use lvs to select a metric and remove
-// only that metric.
-func (m *metricMap) deleteByHashWithLabels(
- h uint64, labels Labels, curry []curriedLabelValue,
-) bool {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- metrics, ok := m.metrics[h]
- if !ok {
- return false
- }
- i := findMetricWithLabels(m.desc, metrics, labels, curry)
- if i >= len(metrics) {
- return false
- }
-
- if len(metrics) > 1 {
- m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
- } else {
- delete(m.metrics, h)
- }
- return true
-}
-
-// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
-// or creates it and returns the new one.
-//
-// This function holds the mutex.
-func (m *metricMap) getOrCreateMetricWithLabelValues(
- hash uint64, lvs []string, curry []curriedLabelValue,
-) Metric {
- m.mtx.RLock()
- metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
- m.mtx.RUnlock()
- if ok {
- return metric
- }
-
- m.mtx.Lock()
- defer m.mtx.Unlock()
- metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
- if !ok {
- inlinedLVs := inlineLabelValues(lvs, curry)
- metric = m.newMetric(inlinedLVs...)
- m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
- }
- return metric
-}
-
-// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
-// or creates it and returns the new one.
-//
-// This function holds the mutex.
-func (m *metricMap) getOrCreateMetricWithLabels(
- hash uint64, labels Labels, curry []curriedLabelValue,
-) Metric {
- m.mtx.RLock()
- metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
- m.mtx.RUnlock()
- if ok {
- return metric
- }
-
- m.mtx.Lock()
- defer m.mtx.Unlock()
- metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
- if !ok {
- lvs := extractLabelValues(m.desc, labels, curry)
- metric = m.newMetric(lvs...)
- m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
- }
- return metric
-}
-
-// getMetricWithHashAndLabelValues gets a metric while handling possible
-// collisions in the hash space. Must be called while holding the read mutex.
-func (m *metricMap) getMetricWithHashAndLabelValues(
- h uint64, lvs []string, curry []curriedLabelValue,
-) (Metric, bool) {
- metrics, ok := m.metrics[h]
- if ok {
- if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
- return metrics[i].metric, true
- }
- }
- return nil, false
-}
-
-// getMetricWithHashAndLabels gets a metric while handling possible collisions in
-// the hash space. Must be called while holding read mutex.
-func (m *metricMap) getMetricWithHashAndLabels(
- h uint64, labels Labels, curry []curriedLabelValue,
-) (Metric, bool) {
- metrics, ok := m.metrics[h]
- if ok {
- if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
- return metrics[i].metric, true
- }
- }
- return nil, false
-}
-
-// findMetricWithLabelValues returns the index of the matching metric or
-// len(metrics) if not found.
-func findMetricWithLabelValues(
- metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
-) int {
- for i, metric := range metrics {
- if matchLabelValues(metric.values, lvs, curry) {
- return i
- }
- }
- return len(metrics)
-}
-
-// findMetricWithLabels returns the index of the matching metric or len(metrics)
-// if not found.
-func findMetricWithLabels(
- desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
-) int {
- for i, metric := range metrics {
- if matchLabels(desc, metric.values, labels, curry) {
- return i
- }
- }
- return len(metrics)
-}
-
-func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
- if len(values) != len(lvs)+len(curry) {
- return false
- }
- var iLVs, iCurry int
- for i, v := range values {
- if iCurry < len(curry) && curry[iCurry].index == i {
- if v != curry[iCurry].value {
- return false
- }
- iCurry++
- continue
- }
- if v != lvs[iLVs] {
- return false
- }
- iLVs++
- }
- return true
-}
-
-func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
- if len(values) != len(labels)+len(curry) {
- return false
- }
- iCurry := 0
- for i, k := range desc.variableLabels {
- if iCurry < len(curry) && curry[iCurry].index == i {
- if values[i] != curry[iCurry].value {
- return false
- }
- iCurry++
- continue
- }
- if values[i] != labels[k] {
- return false
- }
- }
- return true
-}
-
-func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
- labelValues := make([]string, len(labels)+len(curry))
- iCurry := 0
- for i, k := range desc.variableLabels {
- if iCurry < len(curry) && curry[iCurry].index == i {
- labelValues[i] = curry[iCurry].value
- iCurry++
- continue
- }
- labelValues[i] = labels[k]
- }
- return labelValues
-}
-
-func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
- labelValues := make([]string, len(lvs)+len(curry))
- var iCurry, iLVs int
- for i := range labelValues {
- if iCurry < len(curry) && curry[iCurry].index == i {
- labelValues[i] = curry[iCurry].value
- iCurry++
- continue
- }
- labelValues[i] = lvs[iLVs]
- iLVs++
- }
- return labelValues
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
deleted file mode 100644
index 438aa5e9247..00000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "sort"
-
- //lint:ignore SA1019 Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// WrapRegistererWith returns a Registerer wrapping the provided
-// Registerer. Collectors registered with the returned Registerer will be
-// registered with the wrapped Registerer in a modified way. The modified
-// Collector adds the provided Labels to all Metrics it collects (as
-// ConstLabels). The Metrics collected by the unmodified Collector must not
-// duplicate any of those labels. Wrapping a nil value is valid, resulting
-// in a no-op Registerer.
-//
-// WrapRegistererWith provides a way to add fixed labels to a subset of
-// Collectors. It should not be used to add fixed labels to all metrics exposed.
-//
-// Conflicts between Collectors registered through the original Registerer with
-// Collectors registered through the wrapping Registerer will still be
-// detected. Any AlreadyRegisteredError returned by the Register method of
-// either Registerer will contain the ExistingCollector in the form it was
-// provided to the respective registry.
-//
-// The Collector example demonstrates a use of WrapRegistererWith.
-func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
- return &wrappingRegisterer{
- wrappedRegisterer: reg,
- labels: labels,
- }
-}
-
-// WrapRegistererWithPrefix returns a Registerer wrapping the provided
-// Registerer. Collectors registered with the returned Registerer will be
-// registered with the wrapped Registerer in a modified way. The modified
-// Collector adds the provided prefix to the name of all Metrics it collects.
-// Wrapping a nil value is valid, resulting in a no-op Registerer.
-//
-// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
-// a sub-system. To make this work, register metrics of the sub-system with the
-// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
-// to use the same prefix for all metrics exposed. In particular, do not prefix
-// metric names that are standardized across applications, as that would break
-// horizontal monitoring, for example the metrics provided by the Go collector
-// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
-// fact, those metrics are already prefixed with “go_” or “process_”,
-// respectively.)
-//
-// Conflicts between Collectors registered through the original Registerer with
-// Collectors registered through the wrapping Registerer will still be
-// detected. Any AlreadyRegisteredError returned by the Register method of
-// either Registerer will contain the ExistingCollector in the form it was
-// provided to the respective registry.
-func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
- return &wrappingRegisterer{
- wrappedRegisterer: reg,
- prefix: prefix,
- }
-}
-
-type wrappingRegisterer struct {
- wrappedRegisterer Registerer
- prefix string
- labels Labels
-}
-
-func (r *wrappingRegisterer) Register(c Collector) error {
- if r.wrappedRegisterer == nil {
- return nil
- }
- return r.wrappedRegisterer.Register(&wrappingCollector{
- wrappedCollector: c,
- prefix: r.prefix,
- labels: r.labels,
- })
-}
-
-func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
- if r.wrappedRegisterer == nil {
- return
- }
- for _, c := range cs {
- if err := r.Register(c); err != nil {
- panic(err)
- }
- }
-}
-
-func (r *wrappingRegisterer) Unregister(c Collector) bool {
- if r.wrappedRegisterer == nil {
- return false
- }
- return r.wrappedRegisterer.Unregister(&wrappingCollector{
- wrappedCollector: c,
- prefix: r.prefix,
- labels: r.labels,
- })
-}
-
-type wrappingCollector struct {
- wrappedCollector Collector
- prefix string
- labels Labels
-}
-
-func (c *wrappingCollector) Collect(ch chan<- Metric) {
- wrappedCh := make(chan Metric)
- go func() {
- c.wrappedCollector.Collect(wrappedCh)
- close(wrappedCh)
- }()
- for m := range wrappedCh {
- ch <- &wrappingMetric{
- wrappedMetric: m,
- prefix: c.prefix,
- labels: c.labels,
- }
- }
-}
-
-func (c *wrappingCollector) Describe(ch chan<- *Desc) {
- wrappedCh := make(chan *Desc)
- go func() {
- c.wrappedCollector.Describe(wrappedCh)
- close(wrappedCh)
- }()
- for desc := range wrappedCh {
- ch <- wrapDesc(desc, c.prefix, c.labels)
- }
-}
-
-func (c *wrappingCollector) unwrapRecursively() Collector {
- switch wc := c.wrappedCollector.(type) {
- case *wrappingCollector:
- return wc.unwrapRecursively()
- default:
- return wc
- }
-}
-
-type wrappingMetric struct {
- wrappedMetric Metric
- prefix string
- labels Labels
-}
-
-func (m *wrappingMetric) Desc() *Desc {
- return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
-}
-
-func (m *wrappingMetric) Write(out *dto.Metric) error {
- if err := m.wrappedMetric.Write(out); err != nil {
- return err
- }
- if len(m.labels) == 0 {
- // No wrapping labels.
- return nil
- }
- for ln, lv := range m.labels {
- out.Label = append(out.Label, &dto.LabelPair{
- Name: proto.String(ln),
- Value: proto.String(lv),
- })
- }
- sort.Sort(labelPairSorter(out.Label))
- return nil
-}
-
-func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
- constLabels := Labels{}
- for _, lp := range desc.constLabelPairs {
- constLabels[*lp.Name] = *lp.Value
- }
- for ln, lv := range labels {
- if _, alreadyUsed := constLabels[ln]; alreadyUsed {
- return &Desc{
- fqName: desc.fqName,
- help: desc.help,
- variableLabels: desc.variableLabels,
- constLabelPairs: desc.constLabelPairs,
- err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
- }
- }
- constLabels[ln] = lv
- }
- // NewDesc will do remaining validations.
- newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
- // Propagate errors if there was any. This will override any errer
- // created by NewDesc above, i.e. earlier errors get precedence.
- if desc.err != nil {
- newDesc.err = desc.err
- }
- return newDesc
-}
diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE
deleted file mode 100644
index 20110e410e5..00000000000
--- a/vendor/github.com/prometheus/client_model/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-Data model artifacts for Prometheus.
-Copyright 2012-2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
deleted file mode 100644
index 2f4930d9dd3..00000000000
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ /dev/null
@@ -1,723 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: metrics.proto
-
-package io_prometheus_client
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type MetricType int32
-
-const (
- MetricType_COUNTER MetricType = 0
- MetricType_GAUGE MetricType = 1
- MetricType_SUMMARY MetricType = 2
- MetricType_UNTYPED MetricType = 3
- MetricType_HISTOGRAM MetricType = 4
-)
-
-var MetricType_name = map[int32]string{
- 0: "COUNTER",
- 1: "GAUGE",
- 2: "SUMMARY",
- 3: "UNTYPED",
- 4: "HISTOGRAM",
-}
-
-var MetricType_value = map[string]int32{
- "COUNTER": 0,
- "GAUGE": 1,
- "SUMMARY": 2,
- "UNTYPED": 3,
- "HISTOGRAM": 4,
-}
-
-func (x MetricType) Enum() *MetricType {
- p := new(MetricType)
- *p = x
- return p
-}
-
-func (x MetricType) String() string {
- return proto.EnumName(MetricType_name, int32(x))
-}
-
-func (x *MetricType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
- if err != nil {
- return err
- }
- *x = MetricType(value)
- return nil
-}
-
-func (MetricType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{0}
-}
-
-type LabelPair struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LabelPair) Reset() { *m = LabelPair{} }
-func (m *LabelPair) String() string { return proto.CompactTextString(m) }
-func (*LabelPair) ProtoMessage() {}
-func (*LabelPair) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{0}
-}
-
-func (m *LabelPair) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LabelPair.Unmarshal(m, b)
-}
-func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
-}
-func (m *LabelPair) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelPair.Merge(m, src)
-}
-func (m *LabelPair) XXX_Size() int {
- return xxx_messageInfo_LabelPair.Size(m)
-}
-func (m *LabelPair) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelPair.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LabelPair proto.InternalMessageInfo
-
-func (m *LabelPair) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *LabelPair) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return ""
-}
-
-type Gauge struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Gauge) Reset() { *m = Gauge{} }
-func (m *Gauge) String() string { return proto.CompactTextString(m) }
-func (*Gauge) ProtoMessage() {}
-func (*Gauge) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{1}
-}
-
-func (m *Gauge) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Gauge.Unmarshal(m, b)
-}
-func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
-}
-func (m *Gauge) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Gauge.Merge(m, src)
-}
-func (m *Gauge) XXX_Size() int {
- return xxx_messageInfo_Gauge.Size(m)
-}
-func (m *Gauge) XXX_DiscardUnknown() {
- xxx_messageInfo_Gauge.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Gauge proto.InternalMessageInfo
-
-func (m *Gauge) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type Counter struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Counter) Reset() { *m = Counter{} }
-func (m *Counter) String() string { return proto.CompactTextString(m) }
-func (*Counter) ProtoMessage() {}
-func (*Counter) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{2}
-}
-
-func (m *Counter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Counter.Unmarshal(m, b)
-}
-func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
-}
-func (m *Counter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Counter.Merge(m, src)
-}
-func (m *Counter) XXX_Size() int {
- return xxx_messageInfo_Counter.Size(m)
-}
-func (m *Counter) XXX_DiscardUnknown() {
- xxx_messageInfo_Counter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Counter proto.InternalMessageInfo
-
-func (m *Counter) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-func (m *Counter) GetExemplar() *Exemplar {
- if m != nil {
- return m.Exemplar
- }
- return nil
-}
-
-type Quantile struct {
- Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Quantile) Reset() { *m = Quantile{} }
-func (m *Quantile) String() string { return proto.CompactTextString(m) }
-func (*Quantile) ProtoMessage() {}
-func (*Quantile) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{3}
-}
-
-func (m *Quantile) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Quantile.Unmarshal(m, b)
-}
-func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
-}
-func (m *Quantile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Quantile.Merge(m, src)
-}
-func (m *Quantile) XXX_Size() int {
- return xxx_messageInfo_Quantile.Size(m)
-}
-func (m *Quantile) XXX_DiscardUnknown() {
- xxx_messageInfo_Quantile.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Quantile proto.InternalMessageInfo
-
-func (m *Quantile) GetQuantile() float64 {
- if m != nil && m.Quantile != nil {
- return *m.Quantile
- }
- return 0
-}
-
-func (m *Quantile) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type Summary struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
- Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Summary) Reset() { *m = Summary{} }
-func (m *Summary) String() string { return proto.CompactTextString(m) }
-func (*Summary) ProtoMessage() {}
-func (*Summary) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{4}
-}
-
-func (m *Summary) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Summary.Unmarshal(m, b)
-}
-func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
-}
-func (m *Summary) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Summary.Merge(m, src)
-}
-func (m *Summary) XXX_Size() int {
- return xxx_messageInfo_Summary.Size(m)
-}
-func (m *Summary) XXX_DiscardUnknown() {
- xxx_messageInfo_Summary.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Summary proto.InternalMessageInfo
-
-func (m *Summary) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
- }
- return 0
-}
-
-func (m *Summary) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
- }
- return 0
-}
-
-func (m *Summary) GetQuantile() []*Quantile {
- if m != nil {
- return m.Quantile
- }
- return nil
-}
-
-type Untyped struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Untyped) Reset() { *m = Untyped{} }
-func (m *Untyped) String() string { return proto.CompactTextString(m) }
-func (*Untyped) ProtoMessage() {}
-func (*Untyped) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{5}
-}
-
-func (m *Untyped) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Untyped.Unmarshal(m, b)
-}
-func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
-}
-func (m *Untyped) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Untyped.Merge(m, src)
-}
-func (m *Untyped) XXX_Size() int {
- return xxx_messageInfo_Untyped.Size(m)
-}
-func (m *Untyped) XXX_DiscardUnknown() {
- xxx_messageInfo_Untyped.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Untyped proto.InternalMessageInfo
-
-func (m *Untyped) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type Histogram struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
- Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Histogram) Reset() { *m = Histogram{} }
-func (m *Histogram) String() string { return proto.CompactTextString(m) }
-func (*Histogram) ProtoMessage() {}
-func (*Histogram) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{6}
-}
-
-func (m *Histogram) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Histogram.Unmarshal(m, b)
-}
-func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
-}
-func (m *Histogram) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Histogram.Merge(m, src)
-}
-func (m *Histogram) XXX_Size() int {
- return xxx_messageInfo_Histogram.Size(m)
-}
-func (m *Histogram) XXX_DiscardUnknown() {
- xxx_messageInfo_Histogram.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Histogram proto.InternalMessageInfo
-
-func (m *Histogram) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
- }
- return 0
-}
-
-func (m *Histogram) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
- }
- return 0
-}
-
-func (m *Histogram) GetBucket() []*Bucket {
- if m != nil {
- return m.Bucket
- }
- return nil
-}
-
-type Bucket struct {
- CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
- UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
- Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Bucket) Reset() { *m = Bucket{} }
-func (m *Bucket) String() string { return proto.CompactTextString(m) }
-func (*Bucket) ProtoMessage() {}
-func (*Bucket) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{7}
-}
-
-func (m *Bucket) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Bucket.Unmarshal(m, b)
-}
-func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
-}
-func (m *Bucket) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Bucket.Merge(m, src)
-}
-func (m *Bucket) XXX_Size() int {
- return xxx_messageInfo_Bucket.Size(m)
-}
-func (m *Bucket) XXX_DiscardUnknown() {
- xxx_messageInfo_Bucket.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Bucket proto.InternalMessageInfo
-
-func (m *Bucket) GetCumulativeCount() uint64 {
- if m != nil && m.CumulativeCount != nil {
- return *m.CumulativeCount
- }
- return 0
-}
-
-func (m *Bucket) GetUpperBound() float64 {
- if m != nil && m.UpperBound != nil {
- return *m.UpperBound
- }
- return 0
-}
-
-func (m *Bucket) GetExemplar() *Exemplar {
- if m != nil {
- return m.Exemplar
- }
- return nil
-}
-
-type Exemplar struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Exemplar) Reset() { *m = Exemplar{} }
-func (m *Exemplar) String() string { return proto.CompactTextString(m) }
-func (*Exemplar) ProtoMessage() {}
-func (*Exemplar) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{8}
-}
-
-func (m *Exemplar) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Exemplar.Unmarshal(m, b)
-}
-func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
-}
-func (m *Exemplar) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Exemplar.Merge(m, src)
-}
-func (m *Exemplar) XXX_Size() int {
- return xxx_messageInfo_Exemplar.Size(m)
-}
-func (m *Exemplar) XXX_DiscardUnknown() {
- xxx_messageInfo_Exemplar.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Exemplar proto.InternalMessageInfo
-
-func (m *Exemplar) GetLabel() []*LabelPair {
- if m != nil {
- return m.Label
- }
- return nil
-}
-
-func (m *Exemplar) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-func (m *Exemplar) GetTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.Timestamp
- }
- return nil
-}
-
-type Metric struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
- Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
- Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
- Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
- Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
- TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Metric) Reset() { *m = Metric{} }
-func (m *Metric) String() string { return proto.CompactTextString(m) }
-func (*Metric) ProtoMessage() {}
-func (*Metric) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{9}
-}
-
-func (m *Metric) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Metric.Unmarshal(m, b)
-}
-func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
-}
-func (m *Metric) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metric.Merge(m, src)
-}
-func (m *Metric) XXX_Size() int {
- return xxx_messageInfo_Metric.Size(m)
-}
-func (m *Metric) XXX_DiscardUnknown() {
- xxx_messageInfo_Metric.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Metric proto.InternalMessageInfo
-
-func (m *Metric) GetLabel() []*LabelPair {
- if m != nil {
- return m.Label
- }
- return nil
-}
-
-func (m *Metric) GetGauge() *Gauge {
- if m != nil {
- return m.Gauge
- }
- return nil
-}
-
-func (m *Metric) GetCounter() *Counter {
- if m != nil {
- return m.Counter
- }
- return nil
-}
-
-func (m *Metric) GetSummary() *Summary {
- if m != nil {
- return m.Summary
- }
- return nil
-}
-
-func (m *Metric) GetUntyped() *Untyped {
- if m != nil {
- return m.Untyped
- }
- return nil
-}
-
-func (m *Metric) GetHistogram() *Histogram {
- if m != nil {
- return m.Histogram
- }
- return nil
-}
-
-func (m *Metric) GetTimestampMs() int64 {
- if m != nil && m.TimestampMs != nil {
- return *m.TimestampMs
- }
- return 0
-}
-
-type MetricFamily struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
- Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
- Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MetricFamily) Reset() { *m = MetricFamily{} }
-func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
-func (*MetricFamily) ProtoMessage() {}
-func (*MetricFamily) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{10}
-}
-
-func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
-}
-func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
-}
-func (m *MetricFamily) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricFamily.Merge(m, src)
-}
-func (m *MetricFamily) XXX_Size() int {
- return xxx_messageInfo_MetricFamily.Size(m)
-}
-func (m *MetricFamily) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricFamily.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
-
-func (m *MetricFamily) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *MetricFamily) GetHelp() string {
- if m != nil && m.Help != nil {
- return *m.Help
- }
- return ""
-}
-
-func (m *MetricFamily) GetType() MetricType {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return MetricType_COUNTER
-}
-
-func (m *MetricFamily) GetMetric() []*Metric {
- if m != nil {
- return m.Metric
- }
- return nil
-}
-
-func init() {
- proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
- proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
- proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
- proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
- proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
- proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
- proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
- proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
- proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
- proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
- proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
- proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
-}
-
-func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
-
-var fileDescriptor_6039342a2ba47b72 = []byte{
- // 665 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
- 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55,
- 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2,
- 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e,
- 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa,
- 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66,
- 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4,
- 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45,
- 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a,
- 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d,
- 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b,
- 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22,
- 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79,
- 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0,
- 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00,
- 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01,
- 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe,
- 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55,
- 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f,
- 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31,
- 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16,
- 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e,
- 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c,
- 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f,
- 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57,
- 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64,
- 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76,
- 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7,
- 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95,
- 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed,
- 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33,
- 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07,
- 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72,
- 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56,
- 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6,
- 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f,
- 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f,
- 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27,
- 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83,
- 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24,
- 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff,
- 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00,
-}
diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE
deleted file mode 100644
index 261eeb9e9f8..00000000000
--- a/vendor/github.com/prometheus/common/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE
deleted file mode 100644
index 636a2c1a5e8..00000000000
--- a/vendor/github.com/prometheus/common/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-Common libraries shared by Prometheus Go components.
-Copyright 2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
deleted file mode 100644
index c092723e84a..00000000000
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ /dev/null
@@ -1,429 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "fmt"
- "io"
- "math"
- "mime"
- "net/http"
-
- dto "github.com/prometheus/client_model/go"
-
- "github.com/matttproud/golang_protobuf_extensions/pbutil"
- "github.com/prometheus/common/model"
-)
-
-// Decoder types decode an input stream into metric families.
-type Decoder interface {
- Decode(*dto.MetricFamily) error
-}
-
-// DecodeOptions contains options used by the Decoder and in sample extraction.
-type DecodeOptions struct {
- // Timestamp is added to each value from the stream that has no explicit timestamp set.
- Timestamp model.Time
-}
-
-// ResponseFormat extracts the correct format from a HTTP response header.
-// If no matching format can be found FormatUnknown is returned.
-func ResponseFormat(h http.Header) Format {
- ct := h.Get(hdrContentType)
-
- mediatype, params, err := mime.ParseMediaType(ct)
- if err != nil {
- return FmtUnknown
- }
-
- const textType = "text/plain"
-
- switch mediatype {
- case ProtoType:
- if p, ok := params["proto"]; ok && p != ProtoProtocol {
- return FmtUnknown
- }
- if e, ok := params["encoding"]; ok && e != "delimited" {
- return FmtUnknown
- }
- return FmtProtoDelim
-
- case textType:
- if v, ok := params["version"]; ok && v != TextVersion {
- return FmtUnknown
- }
- return FmtText
- }
-
- return FmtUnknown
-}
-
-// NewDecoder returns a new decoder based on the given input format.
-// If the input format does not imply otherwise, a text format decoder is returned.
-func NewDecoder(r io.Reader, format Format) Decoder {
- switch format {
- case FmtProtoDelim:
- return &protoDecoder{r: r}
- }
- return &textDecoder{r: r}
-}
-
-// protoDecoder implements the Decoder interface for protocol buffers.
-type protoDecoder struct {
- r io.Reader
-}
-
-// Decode implements the Decoder interface.
-func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
- _, err := pbutil.ReadDelimited(d.r, v)
- if err != nil {
- return err
- }
- if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
- return fmt.Errorf("invalid metric name %q", v.GetName())
- }
- for _, m := range v.GetMetric() {
- if m == nil {
- continue
- }
- for _, l := range m.GetLabel() {
- if l == nil {
- continue
- }
- if !model.LabelValue(l.GetValue()).IsValid() {
- return fmt.Errorf("invalid label value %q", l.GetValue())
- }
- if !model.LabelName(l.GetName()).IsValid() {
- return fmt.Errorf("invalid label name %q", l.GetName())
- }
- }
- }
- return nil
-}
-
-// textDecoder implements the Decoder interface for the text protocol.
-type textDecoder struct {
- r io.Reader
- p TextParser
- fams []*dto.MetricFamily
-}
-
-// Decode implements the Decoder interface.
-func (d *textDecoder) Decode(v *dto.MetricFamily) error {
- // TODO(fabxc): Wrap this as a line reader to make streaming safer.
- if len(d.fams) == 0 {
- // No cached metric families, read everything and parse metrics.
- fams, err := d.p.TextToMetricFamilies(d.r)
- if err != nil {
- return err
- }
- if len(fams) == 0 {
- return io.EOF
- }
- d.fams = make([]*dto.MetricFamily, 0, len(fams))
- for _, f := range fams {
- d.fams = append(d.fams, f)
- }
- }
-
- *v = *d.fams[0]
- d.fams = d.fams[1:]
-
- return nil
-}
-
-// SampleDecoder wraps a Decoder to extract samples from the metric families
-// decoded by the wrapped Decoder.
-type SampleDecoder struct {
- Dec Decoder
- Opts *DecodeOptions
-
- f dto.MetricFamily
-}
-
-// Decode calls the Decode method of the wrapped Decoder and then extracts the
-// samples from the decoded MetricFamily into the provided model.Vector.
-func (sd *SampleDecoder) Decode(s *model.Vector) error {
- err := sd.Dec.Decode(&sd.f)
- if err != nil {
- return err
- }
- *s, err = extractSamples(&sd.f, sd.Opts)
- return err
-}
-
-// ExtractSamples builds a slice of samples from the provided metric
-// families. If an error occurrs during sample extraction, it continues to
-// extract from the remaining metric families. The returned error is the last
-// error that has occurred.
-func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
- var (
- all model.Vector
- lastErr error
- )
- for _, f := range fams {
- some, err := extractSamples(f, o)
- if err != nil {
- lastErr = err
- continue
- }
- all = append(all, some...)
- }
- return all, lastErr
-}
-
-func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
- switch f.GetType() {
- case dto.MetricType_COUNTER:
- return extractCounter(o, f), nil
- case dto.MetricType_GAUGE:
- return extractGauge(o, f), nil
- case dto.MetricType_SUMMARY:
- return extractSummary(o, f), nil
- case dto.MetricType_UNTYPED:
- return extractUntyped(o, f), nil
- case dto.MetricType_HISTOGRAM:
- return extractHistogram(o, f), nil
- }
- return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
-}
-
-func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Counter == nil {
- continue
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- smpl := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Counter.GetValue()),
- }
-
- if m.TimestampMs != nil {
- smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- } else {
- smpl.Timestamp = o.Timestamp
- }
-
- samples = append(samples, smpl)
- }
-
- return samples
-}
-
-func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Gauge == nil {
- continue
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- smpl := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Gauge.GetValue()),
- }
-
- if m.TimestampMs != nil {
- smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- } else {
- smpl.Timestamp = o.Timestamp
- }
-
- samples = append(samples, smpl)
- }
-
- return samples
-}
-
-func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Untyped == nil {
- continue
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- smpl := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Untyped.GetValue()),
- }
-
- if m.TimestampMs != nil {
- smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- } else {
- smpl.Timestamp = o.Timestamp
- }
-
- samples = append(samples, smpl)
- }
-
- return samples
-}
-
-func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Summary == nil {
- continue
- }
-
- timestamp := o.Timestamp
- if m.TimestampMs != nil {
- timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- }
-
- for _, q := range m.Summary.Quantile {
- lset := make(model.LabelSet, len(m.Label)+2)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- // BUG(matt): Update other names to "quantile".
- lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(q.GetValue()),
- Timestamp: timestamp,
- })
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Summary.GetSampleSum()),
- Timestamp: timestamp,
- })
-
- lset = make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Summary.GetSampleCount()),
- Timestamp: timestamp,
- })
- }
-
- return samples
-}
-
-func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Histogram == nil {
- continue
- }
-
- timestamp := o.Timestamp
- if m.TimestampMs != nil {
- timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- }
-
- infSeen := false
-
- for _, q := range m.Histogram.Bucket {
- lset := make(model.LabelSet, len(m.Label)+2)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
-
- if math.IsInf(q.GetUpperBound(), +1) {
- infSeen = true
- }
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(q.GetCumulativeCount()),
- Timestamp: timestamp,
- })
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Histogram.GetSampleSum()),
- Timestamp: timestamp,
- })
-
- lset = make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
-
- count := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Histogram.GetSampleCount()),
- Timestamp: timestamp,
- }
- samples = append(samples, count)
-
- if !infSeen {
- // Append an infinity bucket sample.
- lset := make(model.LabelSet, len(m.Label)+2)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: count.Value,
- Timestamp: timestamp,
- })
- }
- }
-
- return samples
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
deleted file mode 100644
index bd4e3474546..00000000000
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "fmt"
- "io"
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "github.com/matttproud/golang_protobuf_extensions/pbutil"
- "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// Encoder types encode metric families into an underlying wire protocol.
-type Encoder interface {
- Encode(*dto.MetricFamily) error
-}
-
-// Closer is implemented by Encoders that need to be closed to finalize
-// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
-//
-// Note that all Encoder implementations returned from this package implement
-// Closer, too, even if the Close call is a no-op. This happens in preparation
-// for adding a Close method to the Encoder interface directly in a (mildly
-// breaking) release in the future.
-type Closer interface {
- Close() error
-}
-
-type encoderCloser struct {
- encode func(*dto.MetricFamily) error
- close func() error
-}
-
-func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
- return ec.encode(v)
-}
-
-func (ec encoderCloser) Close() error {
- return ec.close()
-}
-
-// Negotiate returns the Content-Type based on the given Accept header. If no
-// appropriate accepted type is found, FmtText is returned (which is the
-// Prometheus text format). This function will never negotiate FmtOpenMetrics,
-// as the support is still experimental. To include the option to negotiate
-// FmtOpenMetrics, use NegotiateOpenMetrics.
-func Negotiate(h http.Header) Format {
- for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
- ver := ac.Params["version"]
- if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
- switch ac.Params["encoding"] {
- case "delimited":
- return FmtProtoDelim
- case "text":
- return FmtProtoText
- case "compact-text":
- return FmtProtoCompact
- }
- }
- if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return FmtText
- }
- }
- return FmtText
-}
-
-// NegotiateIncludingOpenMetrics works like Negotiate but includes
-// FmtOpenMetrics as an option for the result. Note that this function is
-// temporary and will disappear once FmtOpenMetrics is fully supported and as
-// such may be negotiated by the normal Negotiate function.
-func NegotiateIncludingOpenMetrics(h http.Header) Format {
- for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
- ver := ac.Params["version"]
- if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
- switch ac.Params["encoding"] {
- case "delimited":
- return FmtProtoDelim
- case "text":
- return FmtProtoText
- case "compact-text":
- return FmtProtoCompact
- }
- }
- if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return FmtText
- }
- if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
- return FmtOpenMetrics
- }
- }
- return FmtText
-}
-
-// NewEncoder returns a new encoder based on content type negotiation. All
-// Encoder implementations returned by NewEncoder also implement Closer, and
-// callers should always call the Close method. It is currently only required
-// for FmtOpenMetrics, but a future (breaking) release will add the Close method
-// to the Encoder interface directly. The current version of the Encoder
-// interface is kept for backwards compatibility.
-func NewEncoder(w io.Writer, format Format) Encoder {
- switch format {
- case FmtProtoDelim:
- return encoderCloser{
- encode: func(v *dto.MetricFamily) error {
- _, err := pbutil.WriteDelimited(w, v)
- return err
- },
- close: func() error { return nil },
- }
- case FmtProtoCompact:
- return encoderCloser{
- encode: func(v *dto.MetricFamily) error {
- _, err := fmt.Fprintln(w, v.String())
- return err
- },
- close: func() error { return nil },
- }
- case FmtProtoText:
- return encoderCloser{
- encode: func(v *dto.MetricFamily) error {
- _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
- return err
- },
- close: func() error { return nil },
- }
- case FmtText:
- return encoderCloser{
- encode: func(v *dto.MetricFamily) error {
- _, err := MetricFamilyToText(w, v)
- return err
- },
- close: func() error { return nil },
- }
- case FmtOpenMetrics:
- return encoderCloser{
- encode: func(v *dto.MetricFamily) error {
- _, err := MetricFamilyToOpenMetrics(w, v)
- return err
- },
- close: func() error {
- _, err := FinalizeOpenMetrics(w)
- return err
- },
- }
- }
- panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
deleted file mode 100644
index 0f176fa64f2..00000000000
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package expfmt contains tools for reading and writing Prometheus metrics.
-package expfmt
-
-// Format specifies the HTTP content type of the different wire protocols.
-type Format string
-
-// Constants to assemble the Content-Type values for the different wire protocols.
-const (
- TextVersion = "0.0.4"
- ProtoType = `application/vnd.google.protobuf`
- ProtoProtocol = `io.prometheus.client.MetricFamily`
- ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
- OpenMetricsType = `application/openmetrics-text`
- OpenMetricsVersion = "0.0.1"
-
- // The Content-Type values for the different wire protocols.
- FmtUnknown Format = ``
- FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
- FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
- FmtProtoText Format = ProtoFmt + ` encoding=text`
- FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
- FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
-)
-
-const (
- hdrContentType = "Content-Type"
- hdrAccept = "Accept"
-)
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
deleted file mode 100644
index dc2eedeefca..00000000000
--- a/vendor/github.com/prometheus/common/expfmt/fuzz.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Build only when actually fuzzing
-// +build gofuzz
-
-package expfmt
-
-import "bytes"
-
-// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
-//
-// go-fuzz-build github.com/prometheus/common/expfmt
-// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
-//
-// Further input samples should go in the folder fuzz/corpus.
-func Fuzz(in []byte) int {
- parser := TextParser{}
- _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
-
- if err != nil {
- return 0
- }
-
- return 1
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
deleted file mode 100644
index 8a9313a3bee..00000000000
--- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ /dev/null
@@ -1,527 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "math"
- "strconv"
- "strings"
-
- "github.com/golang/protobuf/ptypes"
- "github.com/prometheus/common/model"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
-// OpenMetrics text format and writes the resulting lines to 'out'. It returns
-// the number of bytes written and any error encountered. The output will have
-// the same order as the input, no further sorting is performed. Furthermore,
-// this function assumes the input is already sanitized and does not perform any
-// sanity checks. If the input contains duplicate metrics or invalid metric or
-// label names, the conversion will result in invalid text format output.
-//
-// This function fulfills the type 'expfmt.encoder'.
-//
-// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
-// on individual metric families, it is the responsibility of the caller to
-// append this line to 'out' once all metric families have been written.
-// Conveniently, this can be done by calling FinalizeOpenMetrics.
-//
-// The output should be fully OpenMetrics compliant. However, there are a few
-// missing features and peculiarities to avoid complications when switching from
-// Prometheus to OpenMetrics or vice versa:
-//
-// - Counters are expected to have the `_total` suffix in their metric name. In
-// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
-// line. A counter with a missing `_total` suffix is not an error. However,
-// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
-// output.
-//
-// - No support for the following (optional) features: `# UNIT` line, `_created`
-// line, info type, stateset type, gaugehistogram type.
-//
-// - The size of exemplar labels is not checked (i.e. it's possible to create
-// exemplars that are larger than allowed by the OpenMetrics specification).
-//
-// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
-// with a `NaN` value.)
-func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
- name := in.GetName()
- if name == "" {
- return 0, fmt.Errorf("MetricFamily has no name: %s", in)
- }
-
- // Try the interface upgrade. If it doesn't work, we'll use a
- // bufio.Writer from the sync.Pool.
- w, ok := out.(enhancedWriter)
- if !ok {
- b := bufPool.Get().(*bufio.Writer)
- b.Reset(out)
- w = b
- defer func() {
- bErr := b.Flush()
- if err == nil {
- err = bErr
- }
- bufPool.Put(b)
- }()
- }
-
- var (
- n int
- metricType = in.GetType()
- shortName = name
- )
- if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
- shortName = name[:len(name)-6]
- }
-
- // Comments, first HELP, then TYPE.
- if in.Help != nil {
- n, err = w.WriteString("# HELP ")
- written += n
- if err != nil {
- return
- }
- n, err = w.WriteString(shortName)
- written += n
- if err != nil {
- return
- }
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return
- }
- n, err = writeEscapedString(w, *in.Help, true)
- written += n
- if err != nil {
- return
- }
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return
- }
- }
- n, err = w.WriteString("# TYPE ")
- written += n
- if err != nil {
- return
- }
- n, err = w.WriteString(shortName)
- written += n
- if err != nil {
- return
- }
- switch metricType {
- case dto.MetricType_COUNTER:
- if strings.HasSuffix(name, "_total") {
- n, err = w.WriteString(" counter\n")
- } else {
- n, err = w.WriteString(" unknown\n")
- }
- case dto.MetricType_GAUGE:
- n, err = w.WriteString(" gauge\n")
- case dto.MetricType_SUMMARY:
- n, err = w.WriteString(" summary\n")
- case dto.MetricType_UNTYPED:
- n, err = w.WriteString(" unknown\n")
- case dto.MetricType_HISTOGRAM:
- n, err = w.WriteString(" histogram\n")
- default:
- return written, fmt.Errorf("unknown metric type %s", metricType.String())
- }
- written += n
- if err != nil {
- return
- }
-
- // Finally the samples, one line for each.
- for _, metric := range in.Metric {
- switch metricType {
- case dto.MetricType_COUNTER:
- if metric.Counter == nil {
- return written, fmt.Errorf(
- "expected counter in metric %s %s", name, metric,
- )
- }
- // Note that we have ensured above that either the name
- // ends on `_total` or that the rendered type is
- // `unknown`. Therefore, no `_total` must be added here.
- n, err = writeOpenMetricsSample(
- w, name, "", metric, "", 0,
- metric.Counter.GetValue(), 0, false,
- metric.Counter.Exemplar,
- )
- case dto.MetricType_GAUGE:
- if metric.Gauge == nil {
- return written, fmt.Errorf(
- "expected gauge in metric %s %s", name, metric,
- )
- }
- n, err = writeOpenMetricsSample(
- w, name, "", metric, "", 0,
- metric.Gauge.GetValue(), 0, false,
- nil,
- )
- case dto.MetricType_UNTYPED:
- if metric.Untyped == nil {
- return written, fmt.Errorf(
- "expected untyped in metric %s %s", name, metric,
- )
- }
- n, err = writeOpenMetricsSample(
- w, name, "", metric, "", 0,
- metric.Untyped.GetValue(), 0, false,
- nil,
- )
- case dto.MetricType_SUMMARY:
- if metric.Summary == nil {
- return written, fmt.Errorf(
- "expected summary in metric %s %s", name, metric,
- )
- }
- for _, q := range metric.Summary.Quantile {
- n, err = writeOpenMetricsSample(
- w, name, "", metric,
- model.QuantileLabel, q.GetQuantile(),
- q.GetValue(), 0, false,
- nil,
- )
- written += n
- if err != nil {
- return
- }
- }
- n, err = writeOpenMetricsSample(
- w, name, "_sum", metric, "", 0,
- metric.Summary.GetSampleSum(), 0, false,
- nil,
- )
- written += n
- if err != nil {
- return
- }
- n, err = writeOpenMetricsSample(
- w, name, "_count", metric, "", 0,
- 0, metric.Summary.GetSampleCount(), true,
- nil,
- )
- case dto.MetricType_HISTOGRAM:
- if metric.Histogram == nil {
- return written, fmt.Errorf(
- "expected histogram in metric %s %s", name, metric,
- )
- }
- infSeen := false
- for _, b := range metric.Histogram.Bucket {
- n, err = writeOpenMetricsSample(
- w, name, "_bucket", metric,
- model.BucketLabel, b.GetUpperBound(),
- 0, b.GetCumulativeCount(), true,
- b.Exemplar,
- )
- written += n
- if err != nil {
- return
- }
- if math.IsInf(b.GetUpperBound(), +1) {
- infSeen = true
- }
- }
- if !infSeen {
- n, err = writeOpenMetricsSample(
- w, name, "_bucket", metric,
- model.BucketLabel, math.Inf(+1),
- 0, metric.Histogram.GetSampleCount(), true,
- nil,
- )
- written += n
- if err != nil {
- return
- }
- }
- n, err = writeOpenMetricsSample(
- w, name, "_sum", metric, "", 0,
- metric.Histogram.GetSampleSum(), 0, false,
- nil,
- )
- written += n
- if err != nil {
- return
- }
- n, err = writeOpenMetricsSample(
- w, name, "_count", metric, "", 0,
- 0, metric.Histogram.GetSampleCount(), true,
- nil,
- )
- default:
- return written, fmt.Errorf(
- "unexpected type in metric %s %s", name, metric,
- )
- }
- written += n
- if err != nil {
- return
- }
- }
- return
-}
-
-// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
-func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
- return w.Write([]byte("# EOF\n"))
-}
-
-// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
-// w, given the metric name, the metric proto message itself, optionally an
-// additional label name with a float64 value (use empty string as label name if
-// not required), the value (optionally as float64 or uint64, determined by
-// useIntValue), and optionally an exemplar (use nil if not required). The
-// function returns the number of bytes written and any error encountered.
-func writeOpenMetricsSample(
- w enhancedWriter,
- name, suffix string,
- metric *dto.Metric,
- additionalLabelName string, additionalLabelValue float64,
- floatValue float64, intValue uint64, useIntValue bool,
- exemplar *dto.Exemplar,
-) (int, error) {
- var written int
- n, err := w.WriteString(name)
- written += n
- if err != nil {
- return written, err
- }
- if suffix != "" {
- n, err = w.WriteString(suffix)
- written += n
- if err != nil {
- return written, err
- }
- }
- n, err = writeOpenMetricsLabelPairs(
- w, metric.Label, additionalLabelName, additionalLabelValue,
- )
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- if useIntValue {
- n, err = writeUint(w, intValue)
- } else {
- n, err = writeOpenMetricsFloat(w, floatValue)
- }
- written += n
- if err != nil {
- return written, err
- }
- if metric.TimestampMs != nil {
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- // TODO(beorn7): Format this directly without converting to a float first.
- n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
- written += n
- if err != nil {
- return written, err
- }
- }
- if exemplar != nil {
- n, err = writeExemplar(w, exemplar)
- written += n
- if err != nil {
- return written, err
- }
- }
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
-// in OpenMetrics style.
-func writeOpenMetricsLabelPairs(
- w enhancedWriter,
- in []*dto.LabelPair,
- additionalLabelName string, additionalLabelValue float64,
-) (int, error) {
- if len(in) == 0 && additionalLabelName == "" {
- return 0, nil
- }
- var (
- written int
- separator byte = '{'
- )
- for _, lp := range in {
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- n, err := w.WriteString(lp.GetName())
- written += n
- if err != nil {
- return written, err
- }
- n, err = w.WriteString(`="`)
- written += n
- if err != nil {
- return written, err
- }
- n, err = writeEscapedString(w, lp.GetValue(), true)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte('"')
- written++
- if err != nil {
- return written, err
- }
- separator = ','
- }
- if additionalLabelName != "" {
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- n, err := w.WriteString(additionalLabelName)
- written += n
- if err != nil {
- return written, err
- }
- n, err = w.WriteString(`="`)
- written += n
- if err != nil {
- return written, err
- }
- n, err = writeOpenMetricsFloat(w, additionalLabelValue)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte('"')
- written++
- if err != nil {
- return written, err
- }
- }
- err := w.WriteByte('}')
- written++
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
-// function returns the number of bytes written and any error encountered.
-func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
- written := 0
- n, err := w.WriteString(" # ")
- written += n
- if err != nil {
- return written, err
- }
- n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- n, err = writeOpenMetricsFloat(w, e.GetValue())
- written += n
- if err != nil {
- return written, err
- }
- if e.Timestamp != nil {
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- ts, err := ptypes.Timestamp((*e).Timestamp)
- if err != nil {
- return written, err
- }
- // TODO(beorn7): Format this directly from components of ts to
- // avoid overflow/underflow and precision issues of the float
- // conversion.
- n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
- written += n
- if err != nil {
- return written, err
- }
- }
- return written, nil
-}
-
-// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
-// number would otherwise contain neither a "." nor an "e".
-func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
- switch {
- case f == 1:
- return w.WriteString("1.0")
- case f == 0:
- return w.WriteString("0.0")
- case f == -1:
- return w.WriteString("-1.0")
- case math.IsNaN(f):
- return w.WriteString("NaN")
- case math.IsInf(f, +1):
- return w.WriteString("+Inf")
- case math.IsInf(f, -1):
- return w.WriteString("-Inf")
- default:
- bp := numBufPool.Get().(*[]byte)
- *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
- if !bytes.ContainsAny(*bp, "e.") {
- *bp = append(*bp, '.', '0')
- }
- written, err := w.Write(*bp)
- numBufPool.Put(bp)
- return written, err
- }
-}
-
-// writeUint is like writeInt just for uint64.
-func writeUint(w enhancedWriter, u uint64) (int, error) {
- bp := numBufPool.Get().(*[]byte)
- *bp = strconv.AppendUint((*bp)[:0], u, 10)
- written, err := w.Write(*bp)
- numBufPool.Put(bp)
- return written, err
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
deleted file mode 100644
index 5ba503b0654..00000000000
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ /dev/null
@@ -1,465 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "bufio"
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "strconv"
- "strings"
- "sync"
-
- "github.com/prometheus/common/model"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
-// implements it.
-type enhancedWriter interface {
- io.Writer
- WriteRune(r rune) (n int, err error)
- WriteString(s string) (n int, err error)
- WriteByte(c byte) error
-}
-
-const (
- initialNumBufSize = 24
-)
-
-var (
- bufPool = sync.Pool{
- New: func() interface{} {
- return bufio.NewWriter(ioutil.Discard)
- },
- }
- numBufPool = sync.Pool{
- New: func() interface{} {
- b := make([]byte, 0, initialNumBufSize)
- return &b
- },
- }
-)
-
-// MetricFamilyToText converts a MetricFamily proto message into text format and
-// writes the resulting lines to 'out'. It returns the number of bytes written
-// and any error encountered. The output will have the same order as the input,
-// no further sorting is performed. Furthermore, this function assumes the input
-// is already sanitized and does not perform any sanity checks. If the input
-// contains duplicate metrics or invalid metric or label names, the conversion
-// will result in invalid text format output.
-//
-// This method fulfills the type 'prometheus.encoder'.
-func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
- // Fail-fast checks.
- if len(in.Metric) == 0 {
- return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
- }
- name := in.GetName()
- if name == "" {
- return 0, fmt.Errorf("MetricFamily has no name: %s", in)
- }
-
- // Try the interface upgrade. If it doesn't work, we'll use a
- // bufio.Writer from the sync.Pool.
- w, ok := out.(enhancedWriter)
- if !ok {
- b := bufPool.Get().(*bufio.Writer)
- b.Reset(out)
- w = b
- defer func() {
- bErr := b.Flush()
- if err == nil {
- err = bErr
- }
- bufPool.Put(b)
- }()
- }
-
- var n int
-
- // Comments, first HELP, then TYPE.
- if in.Help != nil {
- n, err = w.WriteString("# HELP ")
- written += n
- if err != nil {
- return
- }
- n, err = w.WriteString(name)
- written += n
- if err != nil {
- return
- }
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return
- }
- n, err = writeEscapedString(w, *in.Help, false)
- written += n
- if err != nil {
- return
- }
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return
- }
- }
- n, err = w.WriteString("# TYPE ")
- written += n
- if err != nil {
- return
- }
- n, err = w.WriteString(name)
- written += n
- if err != nil {
- return
- }
- metricType := in.GetType()
- switch metricType {
- case dto.MetricType_COUNTER:
- n, err = w.WriteString(" counter\n")
- case dto.MetricType_GAUGE:
- n, err = w.WriteString(" gauge\n")
- case dto.MetricType_SUMMARY:
- n, err = w.WriteString(" summary\n")
- case dto.MetricType_UNTYPED:
- n, err = w.WriteString(" untyped\n")
- case dto.MetricType_HISTOGRAM:
- n, err = w.WriteString(" histogram\n")
- default:
- return written, fmt.Errorf("unknown metric type %s", metricType.String())
- }
- written += n
- if err != nil {
- return
- }
-
- // Finally the samples, one line for each.
- for _, metric := range in.Metric {
- switch metricType {
- case dto.MetricType_COUNTER:
- if metric.Counter == nil {
- return written, fmt.Errorf(
- "expected counter in metric %s %s", name, metric,
- )
- }
- n, err = writeSample(
- w, name, "", metric, "", 0,
- metric.Counter.GetValue(),
- )
- case dto.MetricType_GAUGE:
- if metric.Gauge == nil {
- return written, fmt.Errorf(
- "expected gauge in metric %s %s", name, metric,
- )
- }
- n, err = writeSample(
- w, name, "", metric, "", 0,
- metric.Gauge.GetValue(),
- )
- case dto.MetricType_UNTYPED:
- if metric.Untyped == nil {
- return written, fmt.Errorf(
- "expected untyped in metric %s %s", name, metric,
- )
- }
- n, err = writeSample(
- w, name, "", metric, "", 0,
- metric.Untyped.GetValue(),
- )
- case dto.MetricType_SUMMARY:
- if metric.Summary == nil {
- return written, fmt.Errorf(
- "expected summary in metric %s %s", name, metric,
- )
- }
- for _, q := range metric.Summary.Quantile {
- n, err = writeSample(
- w, name, "", metric,
- model.QuantileLabel, q.GetQuantile(),
- q.GetValue(),
- )
- written += n
- if err != nil {
- return
- }
- }
- n, err = writeSample(
- w, name, "_sum", metric, "", 0,
- metric.Summary.GetSampleSum(),
- )
- written += n
- if err != nil {
- return
- }
- n, err = writeSample(
- w, name, "_count", metric, "", 0,
- float64(metric.Summary.GetSampleCount()),
- )
- case dto.MetricType_HISTOGRAM:
- if metric.Histogram == nil {
- return written, fmt.Errorf(
- "expected histogram in metric %s %s", name, metric,
- )
- }
- infSeen := false
- for _, b := range metric.Histogram.Bucket {
- n, err = writeSample(
- w, name, "_bucket", metric,
- model.BucketLabel, b.GetUpperBound(),
- float64(b.GetCumulativeCount()),
- )
- written += n
- if err != nil {
- return
- }
- if math.IsInf(b.GetUpperBound(), +1) {
- infSeen = true
- }
- }
- if !infSeen {
- n, err = writeSample(
- w, name, "_bucket", metric,
- model.BucketLabel, math.Inf(+1),
- float64(metric.Histogram.GetSampleCount()),
- )
- written += n
- if err != nil {
- return
- }
- }
- n, err = writeSample(
- w, name, "_sum", metric, "", 0,
- metric.Histogram.GetSampleSum(),
- )
- written += n
- if err != nil {
- return
- }
- n, err = writeSample(
- w, name, "_count", metric, "", 0,
- float64(metric.Histogram.GetSampleCount()),
- )
- default:
- return written, fmt.Errorf(
- "unexpected type in metric %s %s", name, metric,
- )
- }
- written += n
- if err != nil {
- return
- }
- }
- return
-}
-
-// writeSample writes a single sample in text format to w, given the metric
-// name, the metric proto message itself, optionally an additional label name
-// with a float64 value (use empty string as label name if not required), and
-// the value. The function returns the number of bytes written and any error
-// encountered.
-func writeSample(
- w enhancedWriter,
- name, suffix string,
- metric *dto.Metric,
- additionalLabelName string, additionalLabelValue float64,
- value float64,
-) (int, error) {
- var written int
- n, err := w.WriteString(name)
- written += n
- if err != nil {
- return written, err
- }
- if suffix != "" {
- n, err = w.WriteString(suffix)
- written += n
- if err != nil {
- return written, err
- }
- }
- n, err = writeLabelPairs(
- w, metric.Label, additionalLabelName, additionalLabelValue,
- )
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- n, err = writeFloat(w, value)
- written += n
- if err != nil {
- return written, err
- }
- if metric.TimestampMs != nil {
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- n, err = writeInt(w, *metric.TimestampMs)
- written += n
- if err != nil {
- return written, err
- }
- }
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// writeLabelPairs converts a slice of LabelPair proto messages plus the
-// explicitly given additional label pair into text formatted as required by the
-// text format and writes it to 'w'. An empty slice in combination with an empty
-// string 'additionalLabelName' results in nothing being written. Otherwise, the
-// label pairs are written, escaped as required by the text format, and enclosed
-// in '{...}'. The function returns the number of bytes written and any error
-// encountered.
-func writeLabelPairs(
- w enhancedWriter,
- in []*dto.LabelPair,
- additionalLabelName string, additionalLabelValue float64,
-) (int, error) {
- if len(in) == 0 && additionalLabelName == "" {
- return 0, nil
- }
- var (
- written int
- separator byte = '{'
- )
- for _, lp := range in {
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- n, err := w.WriteString(lp.GetName())
- written += n
- if err != nil {
- return written, err
- }
- n, err = w.WriteString(`="`)
- written += n
- if err != nil {
- return written, err
- }
- n, err = writeEscapedString(w, lp.GetValue(), true)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte('"')
- written++
- if err != nil {
- return written, err
- }
- separator = ','
- }
- if additionalLabelName != "" {
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- n, err := w.WriteString(additionalLabelName)
- written += n
- if err != nil {
- return written, err
- }
- n, err = w.WriteString(`="`)
- written += n
- if err != nil {
- return written, err
- }
- n, err = writeFloat(w, additionalLabelValue)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte('"')
- written++
- if err != nil {
- return written, err
- }
- }
- err := w.WriteByte('}')
- written++
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
-// includeDoubleQuote is true - '"' by '\"'.
-var (
- escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`)
- quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
-)
-
-func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
- if includeDoubleQuote {
- return quotedEscaper.WriteString(w, v)
- }
- return escaper.WriteString(w, v)
-}
-
-// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
-// a few common cases for increased efficiency. For non-hardcoded cases, it uses
-// strconv.AppendFloat to avoid allocations, similar to writeInt.
-func writeFloat(w enhancedWriter, f float64) (int, error) {
- switch {
- case f == 1:
- return 1, w.WriteByte('1')
- case f == 0:
- return 1, w.WriteByte('0')
- case f == -1:
- return w.WriteString("-1")
- case math.IsNaN(f):
- return w.WriteString("NaN")
- case math.IsInf(f, +1):
- return w.WriteString("+Inf")
- case math.IsInf(f, -1):
- return w.WriteString("-Inf")
- default:
- bp := numBufPool.Get().(*[]byte)
- *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
- written, err := w.Write(*bp)
- numBufPool.Put(bp)
- return written, err
- }
-}
-
-// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
-// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
-// allocations.
-func writeInt(w enhancedWriter, i int64) (int, error) {
- bp := numBufPool.Get().(*[]byte)
- *bp = strconv.AppendInt((*bp)[:0], i, 10)
- written, err := w.Write(*bp)
- numBufPool.Put(bp)
- return written, err
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
deleted file mode 100644
index 342e5940d0f..00000000000
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ /dev/null
@@ -1,764 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "math"
- "strconv"
- "strings"
-
- dto "github.com/prometheus/client_model/go"
-
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/model"
-)
-
-// A stateFn is a function that represents a state in a state machine. By
-// executing it, the state is progressed to the next state. The stateFn returns
-// another stateFn, which represents the new state. The end state is represented
-// by nil.
-type stateFn func() stateFn
-
-// ParseError signals errors while parsing the simple and flat text-based
-// exchange format.
-type ParseError struct {
- Line int
- Msg string
-}
-
-// Error implements the error interface.
-func (e ParseError) Error() string {
- return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
-}
-
-// TextParser is used to parse the simple and flat text-based exchange format. Its
-// zero value is ready to use.
-type TextParser struct {
- metricFamiliesByName map[string]*dto.MetricFamily
- buf *bufio.Reader // Where the parsed input is read through.
- err error // Most recent error.
- lineCount int // Tracks the line count for error messages.
- currentByte byte // The most recent byte read.
- currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
- currentMF *dto.MetricFamily
- currentMetric *dto.Metric
- currentLabelPair *dto.LabelPair
-
- // The remaining member variables are only used for summaries/histograms.
- currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
- // Summary specific.
- summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
- currentQuantile float64
- // Histogram specific.
- histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
- currentBucket float64
- // These tell us if the currently processed line ends on '_count' or
- // '_sum' respectively and belong to a summary/histogram, representing the sample
- // count and sum of that summary/histogram.
- currentIsSummaryCount, currentIsSummarySum bool
- currentIsHistogramCount, currentIsHistogramSum bool
-}
-
-// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
-// format and creates MetricFamily proto messages. It returns the MetricFamily
-// proto messages in a map where the metric names are the keys, along with any
-// error encountered.
-//
-// If the input contains duplicate metrics (i.e. lines with the same metric name
-// and exactly the same label set), the resulting MetricFamily will contain
-// duplicate Metric proto messages. Similar is true for duplicate label
-// names. Checks for duplicates have to be performed separately, if required.
-// Also note that neither the metrics within each MetricFamily are sorted nor
-// the label pairs within each Metric. Sorting is not required for the most
-// frequent use of this method, which is sample ingestion in the Prometheus
-// server. However, for presentation purposes, you might want to sort the
-// metrics, and in some cases, you must sort the labels, e.g. for consumption by
-// the metric family injection hook of the Prometheus registry.
-//
-// Summaries and histograms are rather special beasts. You would probably not
-// use them in the simple text format anyway. This method can deal with
-// summaries and histograms if they are presented in exactly the way the
-// text.Create function creates them.
-//
-// This method must not be called concurrently. If you want to parse different
-// input concurrently, instantiate a separate Parser for each goroutine.
-func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
- p.reset(in)
- for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
- // Magic happens here...
- }
- // Get rid of empty metric families.
- for k, mf := range p.metricFamiliesByName {
- if len(mf.GetMetric()) == 0 {
- delete(p.metricFamiliesByName, k)
- }
- }
- // If p.err is io.EOF now, we have run into a premature end of the input
- // stream. Turn this error into something nicer and more
- // meaningful. (io.EOF is often used as a signal for the legitimate end
- // of an input stream.)
- if p.err == io.EOF {
- p.parseError("unexpected end of input stream")
- }
- return p.metricFamiliesByName, p.err
-}
-
-func (p *TextParser) reset(in io.Reader) {
- p.metricFamiliesByName = map[string]*dto.MetricFamily{}
- if p.buf == nil {
- p.buf = bufio.NewReader(in)
- } else {
- p.buf.Reset(in)
- }
- p.err = nil
- p.lineCount = 0
- if p.summaries == nil || len(p.summaries) > 0 {
- p.summaries = map[uint64]*dto.Metric{}
- }
- if p.histograms == nil || len(p.histograms) > 0 {
- p.histograms = map[uint64]*dto.Metric{}
- }
- p.currentQuantile = math.NaN()
- p.currentBucket = math.NaN()
-}
-
-// startOfLine represents the state where the next byte read from p.buf is the
-// start of a line (or whitespace leading up to it).
-func (p *TextParser) startOfLine() stateFn {
- p.lineCount++
- if p.skipBlankTab(); p.err != nil {
- // End of input reached. This is the only case where
- // that is not an error but a signal that we are done.
- p.err = nil
- return nil
- }
- switch p.currentByte {
- case '#':
- return p.startComment
- case '\n':
- return p.startOfLine // Empty line, start the next one.
- }
- return p.readingMetricName
-}
-
-// startComment represents the state where the next byte read from p.buf is the
-// start of a comment (or whitespace leading up to it).
-func (p *TextParser) startComment() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '\n' {
- return p.startOfLine
- }
- if p.readTokenUntilWhitespace(); p.err != nil {
- return nil // Unexpected end of input.
- }
- // If we have hit the end of line already, there is nothing left
- // to do. This is not considered a syntax error.
- if p.currentByte == '\n' {
- return p.startOfLine
- }
- keyword := p.currentToken.String()
- if keyword != "HELP" && keyword != "TYPE" {
- // Generic comment, ignore by fast forwarding to end of line.
- for p.currentByte != '\n' {
- if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
- return nil // Unexpected end of input.
- }
- }
- return p.startOfLine
- }
- // There is something. Next has to be a metric name.
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.readTokenAsMetricName(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '\n' {
- // At the end of the line already.
- // Again, this is not considered a syntax error.
- return p.startOfLine
- }
- if !isBlankOrTab(p.currentByte) {
- p.parseError("invalid metric name in comment")
- return nil
- }
- p.setOrCreateCurrentMF()
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '\n' {
- // At the end of the line already.
- // Again, this is not considered a syntax error.
- return p.startOfLine
- }
- switch keyword {
- case "HELP":
- return p.readingHelp
- case "TYPE":
- return p.readingType
- }
- panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
-}
-
-// readingMetricName represents the state where the last byte read (now in
-// p.currentByte) is the first byte of a metric name.
-func (p *TextParser) readingMetricName() stateFn {
- if p.readTokenAsMetricName(); p.err != nil {
- return nil
- }
- if p.currentToken.Len() == 0 {
- p.parseError("invalid metric name")
- return nil
- }
- p.setOrCreateCurrentMF()
- // Now is the time to fix the type if it hasn't happened yet.
- if p.currentMF.Type == nil {
- p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
- }
- p.currentMetric = &dto.Metric{}
- // Do not append the newly created currentMetric to
- // currentMF.Metric right now. First wait if this is a summary,
- // and the metric exists already, which we can only know after
- // having read all the labels.
- if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- return p.readingLabels
-}
-
-// readingLabels represents the state where the last byte read (now in
-// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
-// first byte of the value (otherwise).
-func (p *TextParser) readingLabels() stateFn {
- // Summaries/histograms are special. We have to reset the
- // currentLabels map, currentQuantile and currentBucket before starting to
- // read labels.
- if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- p.currentLabels = map[string]string{}
- p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
- p.currentQuantile = math.NaN()
- p.currentBucket = math.NaN()
- }
- if p.currentByte != '{' {
- return p.readingValue
- }
- return p.startLabelName
-}
-
-// startLabelName represents the state where the next byte read from p.buf is
-// the start of a label name (or whitespace leading up to it).
-func (p *TextParser) startLabelName() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '}' {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- return p.readingValue
- }
- if p.readTokenAsLabelName(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentToken.Len() == 0 {
- p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
- return nil
- }
- p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
- if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
- p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
- return nil
- }
- // Special summary/histogram treatment. Don't add 'quantile' and 'le'
- // labels to 'real' labels.
- if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
- !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
- p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
- }
- if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte != '=' {
- p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
- return nil
- }
- return p.startLabelValue
-}
-
-// startLabelValue represents the state where the next byte read from p.buf is
-// the start of a (quoted) label value (or whitespace leading up to it).
-func (p *TextParser) startLabelValue() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte != '"' {
- p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
- return nil
- }
- if p.readTokenAsLabelValue(); p.err != nil {
- return nil
- }
- if !model.LabelValue(p.currentToken.String()).IsValid() {
- p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
- return nil
- }
- p.currentLabelPair.Value = proto.String(p.currentToken.String())
- // Special treatment of summaries:
- // - Quantile labels are special, will result in dto.Quantile later.
- // - Other labels have to be added to currentLabels for signature calculation.
- if p.currentMF.GetType() == dto.MetricType_SUMMARY {
- if p.currentLabelPair.GetName() == model.QuantileLabel {
- if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
- return nil
- }
- } else {
- p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
- }
- }
- // Similar special treatment of histograms.
- if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- if p.currentLabelPair.GetName() == model.BucketLabel {
- if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
- return nil
- }
- } else {
- p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
- }
- }
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- switch p.currentByte {
- case ',':
- return p.startLabelName
-
- case '}':
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- return p.readingValue
- default:
- p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
- return nil
- }
-}
-
-// readingValue represents the state where the last byte read (now in
-// p.currentByte) is the first byte of the sample value (i.e. a float).
-func (p *TextParser) readingValue() stateFn {
- // When we are here, we have read all the labels, so for the
- // special case of a summary/histogram, we can finally find out
- // if the metric already exists.
- if p.currentMF.GetType() == dto.MetricType_SUMMARY {
- signature := model.LabelsToSignature(p.currentLabels)
- if summary := p.summaries[signature]; summary != nil {
- p.currentMetric = summary
- } else {
- p.summaries[signature] = p.currentMetric
- p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
- }
- } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- signature := model.LabelsToSignature(p.currentLabels)
- if histogram := p.histograms[signature]; histogram != nil {
- p.currentMetric = histogram
- } else {
- p.histograms[signature] = p.currentMetric
- p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
- }
- } else {
- p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
- }
- if p.readTokenUntilWhitespace(); p.err != nil {
- return nil // Unexpected end of input.
- }
- value, err := parseFloat(p.currentToken.String())
- if err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
- return nil
- }
- switch p.currentMF.GetType() {
- case dto.MetricType_COUNTER:
- p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
- case dto.MetricType_GAUGE:
- p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
- case dto.MetricType_UNTYPED:
- p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
- case dto.MetricType_SUMMARY:
- // *sigh*
- if p.currentMetric.Summary == nil {
- p.currentMetric.Summary = &dto.Summary{}
- }
- switch {
- case p.currentIsSummaryCount:
- p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
- case p.currentIsSummarySum:
- p.currentMetric.Summary.SampleSum = proto.Float64(value)
- case !math.IsNaN(p.currentQuantile):
- p.currentMetric.Summary.Quantile = append(
- p.currentMetric.Summary.Quantile,
- &dto.Quantile{
- Quantile: proto.Float64(p.currentQuantile),
- Value: proto.Float64(value),
- },
- )
- }
- case dto.MetricType_HISTOGRAM:
- // *sigh*
- if p.currentMetric.Histogram == nil {
- p.currentMetric.Histogram = &dto.Histogram{}
- }
- switch {
- case p.currentIsHistogramCount:
- p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
- case p.currentIsHistogramSum:
- p.currentMetric.Histogram.SampleSum = proto.Float64(value)
- case !math.IsNaN(p.currentBucket):
- p.currentMetric.Histogram.Bucket = append(
- p.currentMetric.Histogram.Bucket,
- &dto.Bucket{
- UpperBound: proto.Float64(p.currentBucket),
- CumulativeCount: proto.Uint64(uint64(value)),
- },
- )
- }
- default:
- p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
- }
- if p.currentByte == '\n' {
- return p.startOfLine
- }
- return p.startTimestamp
-}
-
-// startTimestamp represents the state where the next byte read from p.buf is
-// the start of the timestamp (or whitespace leading up to it).
-func (p *TextParser) startTimestamp() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.readTokenUntilWhitespace(); p.err != nil {
- return nil // Unexpected end of input.
- }
- timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
- if err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
- return nil
- }
- p.currentMetric.TimestampMs = proto.Int64(timestamp)
- if p.readTokenUntilNewline(false); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentToken.Len() > 0 {
- p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
- return nil
- }
- return p.startOfLine
-}
-
-// readingHelp represents the state where the last byte read (now in
-// p.currentByte) is the first byte of the docstring after 'HELP'.
-func (p *TextParser) readingHelp() stateFn {
- if p.currentMF.Help != nil {
- p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
- return nil
- }
- // Rest of line is the docstring.
- if p.readTokenUntilNewline(true); p.err != nil {
- return nil // Unexpected end of input.
- }
- p.currentMF.Help = proto.String(p.currentToken.String())
- return p.startOfLine
-}
-
-// readingType represents the state where the last byte read (now in
-// p.currentByte) is the first byte of the type hint after 'HELP'.
-func (p *TextParser) readingType() stateFn {
- if p.currentMF.Type != nil {
- p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
- return nil
- }
- // Rest of line is the type.
- if p.readTokenUntilNewline(false); p.err != nil {
- return nil // Unexpected end of input.
- }
- metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
- if !ok {
- p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
- return nil
- }
- p.currentMF.Type = dto.MetricType(metricType).Enum()
- return p.startOfLine
-}
-
-// parseError sets p.err to a ParseError at the current line with the given
-// message.
-func (p *TextParser) parseError(msg string) {
- p.err = ParseError{
- Line: p.lineCount,
- Msg: msg,
- }
-}
-
-// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
-// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
-func (p *TextParser) skipBlankTab() {
- for {
- if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
- return
- }
- }
-}
-
-// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
-// anything if p.currentByte is neither ' ' nor '\t'.
-func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
- if isBlankOrTab(p.currentByte) {
- p.skipBlankTab()
- }
-}
-
-// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
-// first byte considered is the byte already read (now in p.currentByte). The
-// first whitespace byte encountered is still copied into p.currentByte, but not
-// into p.currentToken.
-func (p *TextParser) readTokenUntilWhitespace() {
- p.currentToken.Reset()
- for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
- p.currentToken.WriteByte(p.currentByte)
- p.currentByte, p.err = p.buf.ReadByte()
- }
-}
-
-// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
-// byte considered is the byte already read (now in p.currentByte). The first
-// newline byte encountered is still copied into p.currentByte, but not into
-// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
-// recognized: '\\' translates into '\', and '\n' into a line-feed character.
-// All other escape sequences are invalid and cause an error.
-func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
- p.currentToken.Reset()
- escaped := false
- for p.err == nil {
- if recognizeEscapeSequence && escaped {
- switch p.currentByte {
- case '\\':
- p.currentToken.WriteByte(p.currentByte)
- case 'n':
- p.currentToken.WriteByte('\n')
- default:
- p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
- return
- }
- escaped = false
- } else {
- switch p.currentByte {
- case '\n':
- return
- case '\\':
- escaped = true
- default:
- p.currentToken.WriteByte(p.currentByte)
- }
- }
- p.currentByte, p.err = p.buf.ReadByte()
- }
-}
-
-// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
-// The first byte considered is the byte already read (now in p.currentByte).
-// The first byte not part of a metric name is still copied into p.currentByte,
-// but not into p.currentToken.
-func (p *TextParser) readTokenAsMetricName() {
- p.currentToken.Reset()
- if !isValidMetricNameStart(p.currentByte) {
- return
- }
- for {
- p.currentToken.WriteByte(p.currentByte)
- p.currentByte, p.err = p.buf.ReadByte()
- if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
- return
- }
- }
-}
-
-// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
-// The first byte considered is the byte already read (now in p.currentByte).
-// The first byte not part of a label name is still copied into p.currentByte,
-// but not into p.currentToken.
-func (p *TextParser) readTokenAsLabelName() {
- p.currentToken.Reset()
- if !isValidLabelNameStart(p.currentByte) {
- return
- }
- for {
- p.currentToken.WriteByte(p.currentByte)
- p.currentByte, p.err = p.buf.ReadByte()
- if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
- return
- }
- }
-}
-
-// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
-// In contrast to the other 'readTokenAs...' functions, which start with the
-// last read byte in p.currentByte, this method ignores p.currentByte and starts
-// with reading a new byte from p.buf. The first byte not part of a label value
-// is still copied into p.currentByte, but not into p.currentToken.
-func (p *TextParser) readTokenAsLabelValue() {
- p.currentToken.Reset()
- escaped := false
- for {
- if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
- return
- }
- if escaped {
- switch p.currentByte {
- case '"', '\\':
- p.currentToken.WriteByte(p.currentByte)
- case 'n':
- p.currentToken.WriteByte('\n')
- default:
- p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
- return
- }
- escaped = false
- continue
- }
- switch p.currentByte {
- case '"':
- return
- case '\n':
- p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
- return
- case '\\':
- escaped = true
- default:
- p.currentToken.WriteByte(p.currentByte)
- }
- }
-}
-
-func (p *TextParser) setOrCreateCurrentMF() {
- p.currentIsSummaryCount = false
- p.currentIsSummarySum = false
- p.currentIsHistogramCount = false
- p.currentIsHistogramSum = false
- name := p.currentToken.String()
- if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
- return
- }
- // Try out if this is a _sum or _count for a summary/histogram.
- summaryName := summaryMetricName(name)
- if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
- if p.currentMF.GetType() == dto.MetricType_SUMMARY {
- if isCount(name) {
- p.currentIsSummaryCount = true
- }
- if isSum(name) {
- p.currentIsSummarySum = true
- }
- return
- }
- }
- histogramName := histogramMetricName(name)
- if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
- if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- if isCount(name) {
- p.currentIsHistogramCount = true
- }
- if isSum(name) {
- p.currentIsHistogramSum = true
- }
- return
- }
- }
- p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
- p.metricFamiliesByName[name] = p.currentMF
-}
-
-func isValidLabelNameStart(b byte) bool {
- return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
-}
-
-func isValidLabelNameContinuation(b byte) bool {
- return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
-}
-
-func isValidMetricNameStart(b byte) bool {
- return isValidLabelNameStart(b) || b == ':'
-}
-
-func isValidMetricNameContinuation(b byte) bool {
- return isValidLabelNameContinuation(b) || b == ':'
-}
-
-func isBlankOrTab(b byte) bool {
- return b == ' ' || b == '\t'
-}
-
-func isCount(name string) bool {
- return len(name) > 6 && name[len(name)-6:] == "_count"
-}
-
-func isSum(name string) bool {
- return len(name) > 4 && name[len(name)-4:] == "_sum"
-}
-
-func isBucket(name string) bool {
- return len(name) > 7 && name[len(name)-7:] == "_bucket"
-}
-
-func summaryMetricName(name string) string {
- switch {
- case isCount(name):
- return name[:len(name)-6]
- case isSum(name):
- return name[:len(name)-4]
- default:
- return name
- }
-}
-
-func histogramMetricName(name string) string {
- switch {
- case isCount(name):
- return name[:len(name)-6]
- case isSum(name):
- return name[:len(name)-4]
- case isBucket(name):
- return name[:len(name)-7]
- default:
- return name
- }
-}
-
-func parseFloat(s string) (float64, error) {
- if strings.ContainsAny(s, "pP_") {
- return 0, fmt.Errorf("unsupported character in float")
- }
- return strconv.ParseFloat(s, 64)
-}
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
deleted file mode 100644
index 7723656d58d..00000000000
--- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-PACKAGE
-
-package goautoneg
-import "bitbucket.org/ww/goautoneg"
-
-HTTP Content-Type Autonegotiation.
-
-The functions in this package implement the behaviour specified in
-http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
-
-Copyright (c) 2011, Open Knowledge Foundation Ltd.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
- Neither the name of the Open Knowledge Foundation Ltd. nor the
- names of its contributors may be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-FUNCTIONS
-
-func Negotiate(header string, alternatives []string) (content_type string)
-Negotiate the most appropriate content_type given the accept header
-and a list of alternatives.
-
-func ParseAccept(header string) (accept []Accept)
-Parse an Accept Header string returning a sorted list
-of clauses
-
-
-TYPES
-
-type Accept struct {
- Type, SubType string
- Q float32
- Params map[string]string
-}
-Structure to represent a clause in an HTTP Accept Header
-
-
-SUBDIRECTORIES
-
- .hg
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
deleted file mode 100644
index 26e92288c7c..00000000000
--- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
-Copyright (c) 2011, Open Knowledge Foundation Ltd.
-All rights reserved.
-
-HTTP Content-Type Autonegotiation.
-
-The functions in this package implement the behaviour specified in
-http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
- Neither the name of the Open Knowledge Foundation Ltd. nor the
- names of its contributors may be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-*/
-package goautoneg
-
-import (
- "sort"
- "strconv"
- "strings"
-)
-
-// Structure to represent a clause in an HTTP Accept Header
-type Accept struct {
- Type, SubType string
- Q float64
- Params map[string]string
-}
-
-// For internal use, so that we can use the sort interface
-type accept_slice []Accept
-
-func (accept accept_slice) Len() int {
- slice := []Accept(accept)
- return len(slice)
-}
-
-func (accept accept_slice) Less(i, j int) bool {
- slice := []Accept(accept)
- ai, aj := slice[i], slice[j]
- if ai.Q > aj.Q {
- return true
- }
- if ai.Type != "*" && aj.Type == "*" {
- return true
- }
- if ai.SubType != "*" && aj.SubType == "*" {
- return true
- }
- return false
-}
-
-func (accept accept_slice) Swap(i, j int) {
- slice := []Accept(accept)
- slice[i], slice[j] = slice[j], slice[i]
-}
-
-// Parse an Accept Header string returning a sorted list
-// of clauses
-func ParseAccept(header string) (accept []Accept) {
- parts := strings.Split(header, ",")
- accept = make([]Accept, 0, len(parts))
- for _, part := range parts {
- part := strings.Trim(part, " ")
-
- a := Accept{}
- a.Params = make(map[string]string)
- a.Q = 1.0
-
- mrp := strings.Split(part, ";")
-
- media_range := mrp[0]
- sp := strings.Split(media_range, "/")
- a.Type = strings.Trim(sp[0], " ")
-
- switch {
- case len(sp) == 1 && a.Type == "*":
- a.SubType = "*"
- case len(sp) == 2:
- a.SubType = strings.Trim(sp[1], " ")
- default:
- continue
- }
-
- if len(mrp) == 1 {
- accept = append(accept, a)
- continue
- }
-
- for _, param := range mrp[1:] {
- sp := strings.SplitN(param, "=", 2)
- if len(sp) != 2 {
- continue
- }
- token := strings.Trim(sp[0], " ")
- if token == "q" {
- a.Q, _ = strconv.ParseFloat(sp[1], 32)
- } else {
- a.Params[token] = strings.Trim(sp[1], " ")
- }
- }
-
- accept = append(accept, a)
- }
-
- slice := accept_slice(accept)
- sort.Sort(slice)
-
- return
-}
-
-// Negotiate the most appropriate content_type given the accept header
-// and a list of alternatives.
-func Negotiate(header string, alternatives []string) (content_type string) {
- asp := make([][]string, 0, len(alternatives))
- for _, ctype := range alternatives {
- asp = append(asp, strings.SplitN(ctype, "/", 2))
- }
- for _, clause := range ParseAccept(header) {
- for i, ctsp := range asp {
- if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
- content_type = alternatives[i]
- return
- }
- if clause.Type == ctsp[0] && clause.SubType == "*" {
- content_type = alternatives[i]
- return
- }
- if clause.Type == "*" && clause.SubType == "*" {
- content_type = alternatives[i]
- return
- }
- }
- }
- return
-}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
deleted file mode 100644
index 35e739c7ad2..00000000000
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "fmt"
- "time"
-)
-
-type AlertStatus string
-
-const (
- AlertFiring AlertStatus = "firing"
- AlertResolved AlertStatus = "resolved"
-)
-
-// Alert is a generic representation of an alert in the Prometheus eco-system.
-type Alert struct {
- // Label value pairs for purpose of aggregation, matching, and disposition
- // dispatching. This must minimally include an "alertname" label.
- Labels LabelSet `json:"labels"`
-
- // Extra key/value information which does not define alert identity.
- Annotations LabelSet `json:"annotations"`
-
- // The known time range for this alert. Both ends are optional.
- StartsAt time.Time `json:"startsAt,omitempty"`
- EndsAt time.Time `json:"endsAt,omitempty"`
- GeneratorURL string `json:"generatorURL"`
-}
-
-// Name returns the name of the alert. It is equivalent to the "alertname" label.
-func (a *Alert) Name() string {
- return string(a.Labels[AlertNameLabel])
-}
-
-// Fingerprint returns a unique hash for the alert. It is equivalent to
-// the fingerprint of the alert's label set.
-func (a *Alert) Fingerprint() Fingerprint {
- return a.Labels.Fingerprint()
-}
-
-func (a *Alert) String() string {
- s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
- if a.Resolved() {
- return s + "[resolved]"
- }
- return s + "[active]"
-}
-
-// Resolved returns true iff the activity interval ended in the past.
-func (a *Alert) Resolved() bool {
- return a.ResolvedAt(time.Now())
-}
-
-// ResolvedAt returns true off the activity interval ended before
-// the given timestamp.
-func (a *Alert) ResolvedAt(ts time.Time) bool {
- if a.EndsAt.IsZero() {
- return false
- }
- return !a.EndsAt.After(ts)
-}
-
-// Status returns the status of the alert.
-func (a *Alert) Status() AlertStatus {
- if a.Resolved() {
- return AlertResolved
- }
- return AlertFiring
-}
-
-// Validate checks whether the alert data is inconsistent.
-func (a *Alert) Validate() error {
- if a.StartsAt.IsZero() {
- return fmt.Errorf("start time missing")
- }
- if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
- return fmt.Errorf("start time must be before end time")
- }
- if err := a.Labels.Validate(); err != nil {
- return fmt.Errorf("invalid label set: %s", err)
- }
- if len(a.Labels) == 0 {
- return fmt.Errorf("at least one label pair required")
- }
- if err := a.Annotations.Validate(); err != nil {
- return fmt.Errorf("invalid annotations: %s", err)
- }
- return nil
-}
-
-// Alert is a list of alerts that can be sorted in chronological order.
-type Alerts []*Alert
-
-func (as Alerts) Len() int { return len(as) }
-func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
-
-func (as Alerts) Less(i, j int) bool {
- if as[i].StartsAt.Before(as[j].StartsAt) {
- return true
- }
- if as[i].EndsAt.Before(as[j].EndsAt) {
- return true
- }
- return as[i].Fingerprint() < as[j].Fingerprint()
-}
-
-// HasFiring returns true iff one of the alerts is not resolved.
-func (as Alerts) HasFiring() bool {
- for _, a := range as {
- if !a.Resolved() {
- return true
- }
- }
- return false
-}
-
-// Status returns StatusFiring iff at least one of the alerts is firing.
-func (as Alerts) Status() AlertStatus {
- if as.HasFiring() {
- return AlertFiring
- }
- return AlertResolved
-}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
deleted file mode 100644
index fc4de4106e8..00000000000
--- a/vendor/github.com/prometheus/common/model/fingerprinting.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "fmt"
- "strconv"
-)
-
-// Fingerprint provides a hash-capable representation of a Metric.
-// For our purposes, FNV-1A 64-bit is used.
-type Fingerprint uint64
-
-// FingerprintFromString transforms a string representation into a Fingerprint.
-func FingerprintFromString(s string) (Fingerprint, error) {
- num, err := strconv.ParseUint(s, 16, 64)
- return Fingerprint(num), err
-}
-
-// ParseFingerprint parses the input string into a fingerprint.
-func ParseFingerprint(s string) (Fingerprint, error) {
- num, err := strconv.ParseUint(s, 16, 64)
- if err != nil {
- return 0, err
- }
- return Fingerprint(num), nil
-}
-
-func (f Fingerprint) String() string {
- return fmt.Sprintf("%016x", uint64(f))
-}
-
-// Fingerprints represents a collection of Fingerprint subject to a given
-// natural sorting scheme. It implements sort.Interface.
-type Fingerprints []Fingerprint
-
-// Len implements sort.Interface.
-func (f Fingerprints) Len() int {
- return len(f)
-}
-
-// Less implements sort.Interface.
-func (f Fingerprints) Less(i, j int) bool {
- return f[i] < f[j]
-}
-
-// Swap implements sort.Interface.
-func (f Fingerprints) Swap(i, j int) {
- f[i], f[j] = f[j], f[i]
-}
-
-// FingerprintSet is a set of Fingerprints.
-type FingerprintSet map[Fingerprint]struct{}
-
-// Equal returns true if both sets contain the same elements (and not more).
-func (s FingerprintSet) Equal(o FingerprintSet) bool {
- if len(s) != len(o) {
- return false
- }
-
- for k := range s {
- if _, ok := o[k]; !ok {
- return false
- }
- }
-
- return true
-}
-
-// Intersection returns the elements contained in both sets.
-func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
- myLength, otherLength := len(s), len(o)
- if myLength == 0 || otherLength == 0 {
- return FingerprintSet{}
- }
-
- subSet := s
- superSet := o
-
- if otherLength < myLength {
- subSet = o
- superSet = s
- }
-
- out := FingerprintSet{}
-
- for k := range subSet {
- if _, ok := superSet[k]; ok {
- out[k] = struct{}{}
- }
- }
-
- return out
-}
diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go
deleted file mode 100644
index 038fc1c9003..00000000000
--- a/vendor/github.com/prometheus/common/model/fnv.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-// Inline and byte-free variant of hash/fnv's fnv64a.
-
-const (
- offset64 = 14695981039346656037
- prime64 = 1099511628211
-)
-
-// hashNew initializies a new fnv64a hash value.
-func hashNew() uint64 {
- return offset64
-}
-
-// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
-func hashAdd(h uint64, s string) uint64 {
- for i := 0; i < len(s); i++ {
- h ^= uint64(s[i])
- h *= prime64
- }
- return h
-}
-
-// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
-func hashAddByte(h uint64, b byte) uint64 {
- h ^= uint64(b)
- h *= prime64
- return h
-}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
deleted file mode 100644
index 41051a01a36..00000000000
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "regexp"
- "strings"
- "unicode/utf8"
-)
-
-const (
- // AlertNameLabel is the name of the label containing the an alert's name.
- AlertNameLabel = "alertname"
-
- // ExportedLabelPrefix is the prefix to prepend to the label names present in
- // exported metrics if a label of the same name is added by the server.
- ExportedLabelPrefix = "exported_"
-
- // MetricNameLabel is the label name indicating the metric name of a
- // timeseries.
- MetricNameLabel = "__name__"
-
- // SchemeLabel is the name of the label that holds the scheme on which to
- // scrape a target.
- SchemeLabel = "__scheme__"
-
- // AddressLabel is the name of the label that holds the address of
- // a scrape target.
- AddressLabel = "__address__"
-
- // MetricsPathLabel is the name of the label that holds the path on which to
- // scrape a target.
- MetricsPathLabel = "__metrics_path__"
-
- // ReservedLabelPrefix is a prefix which is not legal in user-supplied
- // label names.
- ReservedLabelPrefix = "__"
-
- // MetaLabelPrefix is a prefix for labels that provide meta information.
- // Labels with this prefix are used for intermediate label processing and
- // will not be attached to time series.
- MetaLabelPrefix = "__meta_"
-
- // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
- // Labels with this prefix are used for intermediate label processing and
- // will not be attached to time series. This is reserved for use in
- // Prometheus configuration files by users.
- TmpLabelPrefix = "__tmp_"
-
- // ParamLabelPrefix is a prefix for labels that provide URL parameters
- // used to scrape a target.
- ParamLabelPrefix = "__param_"
-
- // JobLabel is the label name indicating the job from which a timeseries
- // was scraped.
- JobLabel = "job"
-
- // InstanceLabel is the label name used for the instance label.
- InstanceLabel = "instance"
-
- // BucketLabel is used for the label that defines the upper bound of a
- // bucket of a histogram ("le" -> "less or equal").
- BucketLabel = "le"
-
- // QuantileLabel is used for the label that defines the quantile in a
- // summary.
- QuantileLabel = "quantile"
-)
-
-// LabelNameRE is a regular expression matching valid label names. Note that the
-// IsValid method of LabelName performs the same check but faster than a match
-// with this regular expression.
-var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
-
-// A LabelName is a key for a LabelSet or Metric. It has a value associated
-// therewith.
-type LabelName string
-
-// IsValid is true iff the label name matches the pattern of LabelNameRE. This
-// method, however, does not use LabelNameRE for the check but a much faster
-// hardcoded implementation.
-func (ln LabelName) IsValid() bool {
- if len(ln) == 0 {
- return false
- }
- for i, b := range ln {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
- return false
- }
- }
- return true
-}
-
-// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var s string
- if err := unmarshal(&s); err != nil {
- return err
- }
- if !LabelName(s).IsValid() {
- return fmt.Errorf("%q is not a valid label name", s)
- }
- *ln = LabelName(s)
- return nil
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (ln *LabelName) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
- return err
- }
- if !LabelName(s).IsValid() {
- return fmt.Errorf("%q is not a valid label name", s)
- }
- *ln = LabelName(s)
- return nil
-}
-
-// LabelNames is a sortable LabelName slice. In implements sort.Interface.
-type LabelNames []LabelName
-
-func (l LabelNames) Len() int {
- return len(l)
-}
-
-func (l LabelNames) Less(i, j int) bool {
- return l[i] < l[j]
-}
-
-func (l LabelNames) Swap(i, j int) {
- l[i], l[j] = l[j], l[i]
-}
-
-func (l LabelNames) String() string {
- labelStrings := make([]string, 0, len(l))
- for _, label := range l {
- labelStrings = append(labelStrings, string(label))
- }
- return strings.Join(labelStrings, ", ")
-}
-
-// A LabelValue is an associated value for a LabelName.
-type LabelValue string
-
-// IsValid returns true iff the string is a valid UTF8.
-func (lv LabelValue) IsValid() bool {
- return utf8.ValidString(string(lv))
-}
-
-// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
-type LabelValues []LabelValue
-
-func (l LabelValues) Len() int {
- return len(l)
-}
-
-func (l LabelValues) Less(i, j int) bool {
- return string(l[i]) < string(l[j])
-}
-
-func (l LabelValues) Swap(i, j int) {
- l[i], l[j] = l[j], l[i]
-}
-
-// LabelPair pairs a name with a value.
-type LabelPair struct {
- Name LabelName
- Value LabelValue
-}
-
-// LabelPairs is a sortable slice of LabelPair pointers. It implements
-// sort.Interface.
-type LabelPairs []*LabelPair
-
-func (l LabelPairs) Len() int {
- return len(l)
-}
-
-func (l LabelPairs) Less(i, j int) bool {
- switch {
- case l[i].Name > l[j].Name:
- return false
- case l[i].Name < l[j].Name:
- return true
- case l[i].Value > l[j].Value:
- return false
- case l[i].Value < l[j].Value:
- return true
- default:
- return false
- }
-}
-
-func (l LabelPairs) Swap(i, j int) {
- l[i], l[j] = l[j], l[i]
-}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
deleted file mode 100644
index 6eda08a7395..00000000000
--- a/vendor/github.com/prometheus/common/model/labelset.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "sort"
- "strings"
-)
-
-// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
-// may be fully-qualified down to the point where it may resolve to a single
-// Metric in the data store or not. All operations that occur within the realm
-// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
-// match.
-type LabelSet map[LabelName]LabelValue
-
-// Validate checks whether all names and values in the label set
-// are valid.
-func (ls LabelSet) Validate() error {
- for ln, lv := range ls {
- if !ln.IsValid() {
- return fmt.Errorf("invalid name %q", ln)
- }
- if !lv.IsValid() {
- return fmt.Errorf("invalid value %q", lv)
- }
- }
- return nil
-}
-
-// Equal returns true iff both label sets have exactly the same key/value pairs.
-func (ls LabelSet) Equal(o LabelSet) bool {
- if len(ls) != len(o) {
- return false
- }
- for ln, lv := range ls {
- olv, ok := o[ln]
- if !ok {
- return false
- }
- if olv != lv {
- return false
- }
- }
- return true
-}
-
-// Before compares the metrics, using the following criteria:
-//
-// If m has fewer labels than o, it is before o. If it has more, it is not.
-//
-// If the number of labels is the same, the superset of all label names is
-// sorted alphanumerically. The first differing label pair found in that order
-// determines the outcome: If the label does not exist at all in m, then m is
-// before o, and vice versa. Otherwise the label value is compared
-// alphanumerically.
-//
-// If m and o are equal, the method returns false.
-func (ls LabelSet) Before(o LabelSet) bool {
- if len(ls) < len(o) {
- return true
- }
- if len(ls) > len(o) {
- return false
- }
-
- lns := make(LabelNames, 0, len(ls)+len(o))
- for ln := range ls {
- lns = append(lns, ln)
- }
- for ln := range o {
- lns = append(lns, ln)
- }
- // It's probably not worth it to de-dup lns.
- sort.Sort(lns)
- for _, ln := range lns {
- mlv, ok := ls[ln]
- if !ok {
- return true
- }
- olv, ok := o[ln]
- if !ok {
- return false
- }
- if mlv < olv {
- return true
- }
- if mlv > olv {
- return false
- }
- }
- return false
-}
-
-// Clone returns a copy of the label set.
-func (ls LabelSet) Clone() LabelSet {
- lsn := make(LabelSet, len(ls))
- for ln, lv := range ls {
- lsn[ln] = lv
- }
- return lsn
-}
-
-// Merge is a helper function to non-destructively merge two label sets.
-func (l LabelSet) Merge(other LabelSet) LabelSet {
- result := make(LabelSet, len(l))
-
- for k, v := range l {
- result[k] = v
- }
-
- for k, v := range other {
- result[k] = v
- }
-
- return result
-}
-
-func (l LabelSet) String() string {
- lstrs := make([]string, 0, len(l))
- for l, v := range l {
- lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
- }
-
- sort.Strings(lstrs)
- return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
-}
-
-// Fingerprint returns the LabelSet's fingerprint.
-func (ls LabelSet) Fingerprint() Fingerprint {
- return labelSetToFingerprint(ls)
-}
-
-// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
-// algorithm, which is, however, more susceptible to hash collisions.
-func (ls LabelSet) FastFingerprint() Fingerprint {
- return labelSetToFastFingerprint(ls)
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (l *LabelSet) UnmarshalJSON(b []byte) error {
- var m map[LabelName]LabelValue
- if err := json.Unmarshal(b, &m); err != nil {
- return err
- }
- // encoding/json only unmarshals maps of the form map[string]T. It treats
- // LabelName as a string and does not call its UnmarshalJSON method.
- // Thus, we have to replicate the behavior here.
- for ln := range m {
- if !ln.IsValid() {
- return fmt.Errorf("%q is not a valid label name", ln)
- }
- }
- *l = LabelSet(m)
- return nil
-}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
deleted file mode 100644
index 00804b7fedb..00000000000
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "fmt"
- "regexp"
- "sort"
- "strings"
-)
-
-var (
- // MetricNameRE is a regular expression matching valid metric
- // names. Note that the IsValidMetricName function performs the same
- // check but faster than a match with this regular expression.
- MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
-)
-
-// A Metric is similar to a LabelSet, but the key difference is that a Metric is
-// a singleton and refers to one and only one stream of samples.
-type Metric LabelSet
-
-// Equal compares the metrics.
-func (m Metric) Equal(o Metric) bool {
- return LabelSet(m).Equal(LabelSet(o))
-}
-
-// Before compares the metrics' underlying label sets.
-func (m Metric) Before(o Metric) bool {
- return LabelSet(m).Before(LabelSet(o))
-}
-
-// Clone returns a copy of the Metric.
-func (m Metric) Clone() Metric {
- clone := make(Metric, len(m))
- for k, v := range m {
- clone[k] = v
- }
- return clone
-}
-
-func (m Metric) String() string {
- metricName, hasName := m[MetricNameLabel]
- numLabels := len(m) - 1
- if !hasName {
- numLabels = len(m)
- }
- labelStrings := make([]string, 0, numLabels)
- for label, value := range m {
- if label != MetricNameLabel {
- labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
- }
- }
-
- switch numLabels {
- case 0:
- if hasName {
- return string(metricName)
- }
- return "{}"
- default:
- sort.Strings(labelStrings)
- return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
- }
-}
-
-// Fingerprint returns a Metric's Fingerprint.
-func (m Metric) Fingerprint() Fingerprint {
- return LabelSet(m).Fingerprint()
-}
-
-// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
-// algorithm, which is, however, more susceptible to hash collisions.
-func (m Metric) FastFingerprint() Fingerprint {
- return LabelSet(m).FastFingerprint()
-}
-
-// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
-// This function, however, does not use MetricNameRE for the check but a much
-// faster hardcoded implementation.
-func IsValidMetricName(n LabelValue) bool {
- if len(n) == 0 {
- return false
- }
- for i, b := range n {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
deleted file mode 100644
index a7b9691707e..00000000000
--- a/vendor/github.com/prometheus/common/model/model.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package model contains common data structures that are shared across
-// Prometheus components and libraries.
-package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
deleted file mode 100644
index 8762b13c63d..00000000000
--- a/vendor/github.com/prometheus/common/model/signature.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "sort"
-)
-
-// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
-// used to separate label names, label values, and other strings from each other
-// when calculating their combined hash value (aka signature aka fingerprint).
-const SeparatorByte byte = 255
-
-var (
- // cache the signature of an empty label set.
- emptyLabelSignature = hashNew()
-)
-
-// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
-// given label set. (Collisions are possible but unlikely if the number of label
-// sets the function is applied to is small.)
-func LabelsToSignature(labels map[string]string) uint64 {
- if len(labels) == 0 {
- return emptyLabelSignature
- }
-
- labelNames := make([]string, 0, len(labels))
- for labelName := range labels {
- labelNames = append(labelNames, labelName)
- }
- sort.Strings(labelNames)
-
- sum := hashNew()
- for _, labelName := range labelNames {
- sum = hashAdd(sum, labelName)
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, labels[labelName])
- sum = hashAddByte(sum, SeparatorByte)
- }
- return sum
-}
-
-// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
-// parameter (rather than a label map) and returns a Fingerprint.
-func labelSetToFingerprint(ls LabelSet) Fingerprint {
- if len(ls) == 0 {
- return Fingerprint(emptyLabelSignature)
- }
-
- labelNames := make(LabelNames, 0, len(ls))
- for labelName := range ls {
- labelNames = append(labelNames, labelName)
- }
- sort.Sort(labelNames)
-
- sum := hashNew()
- for _, labelName := range labelNames {
- sum = hashAdd(sum, string(labelName))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(ls[labelName]))
- sum = hashAddByte(sum, SeparatorByte)
- }
- return Fingerprint(sum)
-}
-
-// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
-// faster and less allocation-heavy hash function, which is more susceptible to
-// create hash collisions. Therefore, collision detection should be applied.
-func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
- if len(ls) == 0 {
- return Fingerprint(emptyLabelSignature)
- }
-
- var result uint64
- for labelName, labelValue := range ls {
- sum := hashNew()
- sum = hashAdd(sum, string(labelName))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(labelValue))
- result ^= sum
- }
- return Fingerprint(result)
-}
-
-// SignatureForLabels works like LabelsToSignature but takes a Metric as
-// parameter (rather than a label map) and only includes the labels with the
-// specified LabelNames into the signature calculation. The labels passed in
-// will be sorted by this function.
-func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
- if len(labels) == 0 {
- return emptyLabelSignature
- }
-
- sort.Sort(LabelNames(labels))
-
- sum := hashNew()
- for _, label := range labels {
- sum = hashAdd(sum, string(label))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(m[label]))
- sum = hashAddByte(sum, SeparatorByte)
- }
- return sum
-}
-
-// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
-// parameter (rather than a label map) and excludes the labels with any of the
-// specified LabelNames from the signature calculation.
-func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
- if len(m) == 0 {
- return emptyLabelSignature
- }
-
- labelNames := make(LabelNames, 0, len(m))
- for labelName := range m {
- if _, exclude := labels[labelName]; !exclude {
- labelNames = append(labelNames, labelName)
- }
- }
- if len(labelNames) == 0 {
- return emptyLabelSignature
- }
- sort.Sort(labelNames)
-
- sum := hashNew()
- for _, labelName := range labelNames {
- sum = hashAdd(sum, string(labelName))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(m[labelName]))
- sum = hashAddByte(sum, SeparatorByte)
- }
- return sum
-}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
deleted file mode 100644
index bb99889d2cc..00000000000
--- a/vendor/github.com/prometheus/common/model/silence.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "regexp"
- "time"
-)
-
-// Matcher describes a matches the value of a given label.
-type Matcher struct {
- Name LabelName `json:"name"`
- Value string `json:"value"`
- IsRegex bool `json:"isRegex"`
-}
-
-func (m *Matcher) UnmarshalJSON(b []byte) error {
- type plain Matcher
- if err := json.Unmarshal(b, (*plain)(m)); err != nil {
- return err
- }
-
- if len(m.Name) == 0 {
- return fmt.Errorf("label name in matcher must not be empty")
- }
- if m.IsRegex {
- if _, err := regexp.Compile(m.Value); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Validate returns true iff all fields of the matcher have valid values.
-func (m *Matcher) Validate() error {
- if !m.Name.IsValid() {
- return fmt.Errorf("invalid name %q", m.Name)
- }
- if m.IsRegex {
- if _, err := regexp.Compile(m.Value); err != nil {
- return fmt.Errorf("invalid regular expression %q", m.Value)
- }
- } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
- return fmt.Errorf("invalid value %q", m.Value)
- }
- return nil
-}
-
-// Silence defines the representation of a silence definition in the Prometheus
-// eco-system.
-type Silence struct {
- ID uint64 `json:"id,omitempty"`
-
- Matchers []*Matcher `json:"matchers"`
-
- StartsAt time.Time `json:"startsAt"`
- EndsAt time.Time `json:"endsAt"`
-
- CreatedAt time.Time `json:"createdAt,omitempty"`
- CreatedBy string `json:"createdBy"`
- Comment string `json:"comment,omitempty"`
-}
-
-// Validate returns true iff all fields of the silence have valid values.
-func (s *Silence) Validate() error {
- if len(s.Matchers) == 0 {
- return fmt.Errorf("at least one matcher required")
- }
- for _, m := range s.Matchers {
- if err := m.Validate(); err != nil {
- return fmt.Errorf("invalid matcher: %s", err)
- }
- }
- if s.StartsAt.IsZero() {
- return fmt.Errorf("start time missing")
- }
- if s.EndsAt.IsZero() {
- return fmt.Errorf("end time missing")
- }
- if s.EndsAt.Before(s.StartsAt) {
- return fmt.Errorf("start time must be before end time")
- }
- if s.CreatedBy == "" {
- return fmt.Errorf("creator information missing")
- }
- if s.Comment == "" {
- return fmt.Errorf("comment missing")
- }
- if s.CreatedAt.IsZero() {
- return fmt.Errorf("creation timestamp missing")
- }
- return nil
-}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
deleted file mode 100644
index 490a0240c10..00000000000
--- a/vendor/github.com/prometheus/common/model/time.go
+++ /dev/null
@@ -1,274 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "fmt"
- "math"
- "regexp"
- "strconv"
- "strings"
- "time"
-)
-
-const (
- // MinimumTick is the minimum supported time resolution. This has to be
- // at least time.Second in order for the code below to work.
- minimumTick = time.Millisecond
- // second is the Time duration equivalent to one second.
- second = int64(time.Second / minimumTick)
- // The number of nanoseconds per minimum tick.
- nanosPerTick = int64(minimumTick / time.Nanosecond)
-
- // Earliest is the earliest Time representable. Handy for
- // initializing a high watermark.
- Earliest = Time(math.MinInt64)
- // Latest is the latest Time representable. Handy for initializing
- // a low watermark.
- Latest = Time(math.MaxInt64)
-)
-
-// Time is the number of milliseconds since the epoch
-// (1970-01-01 00:00 UTC) excluding leap seconds.
-type Time int64
-
-// Interval describes an interval between two timestamps.
-type Interval struct {
- Start, End Time
-}
-
-// Now returns the current time as a Time.
-func Now() Time {
- return TimeFromUnixNano(time.Now().UnixNano())
-}
-
-// TimeFromUnix returns the Time equivalent to the Unix Time t
-// provided in seconds.
-func TimeFromUnix(t int64) Time {
- return Time(t * second)
-}
-
-// TimeFromUnixNano returns the Time equivalent to the Unix Time
-// t provided in nanoseconds.
-func TimeFromUnixNano(t int64) Time {
- return Time(t / nanosPerTick)
-}
-
-// Equal reports whether two Times represent the same instant.
-func (t Time) Equal(o Time) bool {
- return t == o
-}
-
-// Before reports whether the Time t is before o.
-func (t Time) Before(o Time) bool {
- return t < o
-}
-
-// After reports whether the Time t is after o.
-func (t Time) After(o Time) bool {
- return t > o
-}
-
-// Add returns the Time t + d.
-func (t Time) Add(d time.Duration) Time {
- return t + Time(d/minimumTick)
-}
-
-// Sub returns the Duration t - o.
-func (t Time) Sub(o Time) time.Duration {
- return time.Duration(t-o) * minimumTick
-}
-
-// Time returns the time.Time representation of t.
-func (t Time) Time() time.Time {
- return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
-}
-
-// Unix returns t as a Unix time, the number of seconds elapsed
-// since January 1, 1970 UTC.
-func (t Time) Unix() int64 {
- return int64(t) / second
-}
-
-// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
-// since January 1, 1970 UTC.
-func (t Time) UnixNano() int64 {
- return int64(t) * nanosPerTick
-}
-
-// The number of digits after the dot.
-var dotPrecision = int(math.Log10(float64(second)))
-
-// String returns a string representation of the Time.
-func (t Time) String() string {
- return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (t Time) MarshalJSON() ([]byte, error) {
- return []byte(t.String()), nil
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (t *Time) UnmarshalJSON(b []byte) error {
- p := strings.Split(string(b), ".")
- switch len(p) {
- case 1:
- v, err := strconv.ParseInt(string(p[0]), 10, 64)
- if err != nil {
- return err
- }
- *t = Time(v * second)
-
- case 2:
- v, err := strconv.ParseInt(string(p[0]), 10, 64)
- if err != nil {
- return err
- }
- v *= second
-
- prec := dotPrecision - len(p[1])
- if prec < 0 {
- p[1] = p[1][:dotPrecision]
- } else if prec > 0 {
- p[1] = p[1] + strings.Repeat("0", prec)
- }
-
- va, err := strconv.ParseInt(p[1], 10, 32)
- if err != nil {
- return err
- }
-
- // If the value was something like -0.1 the negative is lost in the
- // parsing because of the leading zero, this ensures that we capture it.
- if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 {
- *t = Time(v+va) * -1
- } else {
- *t = Time(v + va)
- }
-
- default:
- return fmt.Errorf("invalid time %q", string(b))
- }
- return nil
-}
-
-// Duration wraps time.Duration. It is used to parse the custom duration format
-// from YAML.
-// This type should not propagate beyond the scope of input/output processing.
-type Duration time.Duration
-
-// Set implements pflag/flag.Value
-func (d *Duration) Set(s string) error {
- var err error
- *d, err = ParseDuration(s)
- return err
-}
-
-// Type implements pflag.Value
-func (d *Duration) Type() string {
- return "duration"
-}
-
-var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
-
-// ParseDuration parses a string into a time.Duration, assuming that a year
-// always has 365d, a week always has 7d, and a day always has 24h.
-func ParseDuration(durationStr string) (Duration, error) {
- // Allow 0 without a unit.
- if durationStr == "0" {
- return 0, nil
- }
- matches := durationRE.FindStringSubmatch(durationStr)
- if len(matches) != 3 {
- return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
- }
- var (
- n, _ = strconv.Atoi(matches[1])
- dur = time.Duration(n) * time.Millisecond
- )
- switch unit := matches[2]; unit {
- case "y":
- dur *= 1000 * 60 * 60 * 24 * 365
- case "w":
- dur *= 1000 * 60 * 60 * 24 * 7
- case "d":
- dur *= 1000 * 60 * 60 * 24
- case "h":
- dur *= 1000 * 60 * 60
- case "m":
- dur *= 1000 * 60
- case "s":
- dur *= 1000
- case "ms":
- // Value already correct
- default:
- return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
- }
- return Duration(dur), nil
-}
-
-func (d Duration) String() string {
- var (
- ms = int64(time.Duration(d) / time.Millisecond)
- unit = "ms"
- )
- if ms == 0 {
- return "0s"
- }
- factors := map[string]int64{
- "y": 1000 * 60 * 60 * 24 * 365,
- "w": 1000 * 60 * 60 * 24 * 7,
- "d": 1000 * 60 * 60 * 24,
- "h": 1000 * 60 * 60,
- "m": 1000 * 60,
- "s": 1000,
- "ms": 1,
- }
-
- switch int64(0) {
- case ms % factors["y"]:
- unit = "y"
- case ms % factors["w"]:
- unit = "w"
- case ms % factors["d"]:
- unit = "d"
- case ms % factors["h"]:
- unit = "h"
- case ms % factors["m"]:
- unit = "m"
- case ms % factors["s"]:
- unit = "s"
- }
- return fmt.Sprintf("%v%v", ms/factors[unit], unit)
-}
-
-// MarshalYAML implements the yaml.Marshaler interface.
-func (d Duration) MarshalYAML() (interface{}, error) {
- return d.String(), nil
-}
-
-// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var s string
- if err := unmarshal(&s); err != nil {
- return err
- }
- dur, err := ParseDuration(s)
- if err != nil {
- return err
- }
- *d = dur
- return nil
-}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
deleted file mode 100644
index c9d8fb1a283..00000000000
--- a/vendor/github.com/prometheus/common/model/value.go
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "math"
- "sort"
- "strconv"
- "strings"
-)
-
-var (
- // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
- // non-existing sample pair. It is a SamplePair with timestamp Earliest and
- // value 0.0. Note that the natural zero value of SamplePair has a timestamp
- // of 0, which is possible to appear in a real SamplePair and thus not
- // suitable to signal a non-existing SamplePair.
- ZeroSamplePair = SamplePair{Timestamp: Earliest}
-
- // ZeroSample is the pseudo zero-value of Sample used to signal a
- // non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
- // and metric nil. Note that the natural zero value of Sample has a timestamp
- // of 0, which is possible to appear in a real Sample and thus not suitable
- // to signal a non-existing Sample.
- ZeroSample = Sample{Timestamp: Earliest}
-)
-
-// A SampleValue is a representation of a value for a given sample at a given
-// time.
-type SampleValue float64
-
-// MarshalJSON implements json.Marshaler.
-func (v SampleValue) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (v *SampleValue) UnmarshalJSON(b []byte) error {
- if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return fmt.Errorf("sample value must be a quoted string")
- }
- f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
- if err != nil {
- return err
- }
- *v = SampleValue(f)
- return nil
-}
-
-// Equal returns true if the value of v and o is equal or if both are NaN. Note
-// that v==o is false if both are NaN. If you want the conventional float
-// behavior, use == to compare two SampleValues.
-func (v SampleValue) Equal(o SampleValue) bool {
- if v == o {
- return true
- }
- return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
-}
-
-func (v SampleValue) String() string {
- return strconv.FormatFloat(float64(v), 'f', -1, 64)
-}
-
-// SamplePair pairs a SampleValue with a Timestamp.
-type SamplePair struct {
- Timestamp Time
- Value SampleValue
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s SamplePair) MarshalJSON() ([]byte, error) {
- t, err := json.Marshal(s.Timestamp)
- if err != nil {
- return nil, err
- }
- v, err := json.Marshal(s.Value)
- if err != nil {
- return nil, err
- }
- return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *SamplePair) UnmarshalJSON(b []byte) error {
- v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
- return json.Unmarshal(b, &v)
-}
-
-// Equal returns true if this SamplePair and o have equal Values and equal
-// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
-func (s *SamplePair) Equal(o *SamplePair) bool {
- return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
-}
-
-func (s SamplePair) String() string {
- return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
-}
-
-// Sample is a sample pair associated with a metric.
-type Sample struct {
- Metric Metric `json:"metric"`
- Value SampleValue `json:"value"`
- Timestamp Time `json:"timestamp"`
-}
-
-// Equal compares first the metrics, then the timestamp, then the value. The
-// semantics of value equality is defined by SampleValue.Equal.
-func (s *Sample) Equal(o *Sample) bool {
- if s == o {
- return true
- }
-
- if !s.Metric.Equal(o.Metric) {
- return false
- }
- if !s.Timestamp.Equal(o.Timestamp) {
- return false
- }
-
- return s.Value.Equal(o.Value)
-}
-
-func (s Sample) String() string {
- return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
- Timestamp: s.Timestamp,
- Value: s.Value,
- })
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s Sample) MarshalJSON() ([]byte, error) {
- v := struct {
- Metric Metric `json:"metric"`
- Value SamplePair `json:"value"`
- }{
- Metric: s.Metric,
- Value: SamplePair{
- Timestamp: s.Timestamp,
- Value: s.Value,
- },
- }
-
- return json.Marshal(&v)
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *Sample) UnmarshalJSON(b []byte) error {
- v := struct {
- Metric Metric `json:"metric"`
- Value SamplePair `json:"value"`
- }{
- Metric: s.Metric,
- Value: SamplePair{
- Timestamp: s.Timestamp,
- Value: s.Value,
- },
- }
-
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
-
- s.Metric = v.Metric
- s.Timestamp = v.Value.Timestamp
- s.Value = v.Value.Value
-
- return nil
-}
-
-// Samples is a sortable Sample slice. It implements sort.Interface.
-type Samples []*Sample
-
-func (s Samples) Len() int {
- return len(s)
-}
-
-// Less compares first the metrics, then the timestamp.
-func (s Samples) Less(i, j int) bool {
- switch {
- case s[i].Metric.Before(s[j].Metric):
- return true
- case s[j].Metric.Before(s[i].Metric):
- return false
- case s[i].Timestamp.Before(s[j].Timestamp):
- return true
- default:
- return false
- }
-}
-
-func (s Samples) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-// Equal compares two sets of samples and returns true if they are equal.
-func (s Samples) Equal(o Samples) bool {
- if len(s) != len(o) {
- return false
- }
-
- for i, sample := range s {
- if !sample.Equal(o[i]) {
- return false
- }
- }
- return true
-}
-
-// SampleStream is a stream of Values belonging to an attached COWMetric.
-type SampleStream struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
-}
-
-func (ss SampleStream) String() string {
- vals := make([]string, len(ss.Values))
- for i, v := range ss.Values {
- vals[i] = v.String()
- }
- return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
-}
-
-// Value is a generic interface for values resulting from a query evaluation.
-type Value interface {
- Type() ValueType
- String() string
-}
-
-func (Matrix) Type() ValueType { return ValMatrix }
-func (Vector) Type() ValueType { return ValVector }
-func (*Scalar) Type() ValueType { return ValScalar }
-func (*String) Type() ValueType { return ValString }
-
-type ValueType int
-
-const (
- ValNone ValueType = iota
- ValScalar
- ValVector
- ValMatrix
- ValString
-)
-
-// MarshalJSON implements json.Marshaler.
-func (et ValueType) MarshalJSON() ([]byte, error) {
- return json.Marshal(et.String())
-}
-
-func (et *ValueType) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
- return err
- }
- switch s {
- case "":
- *et = ValNone
- case "scalar":
- *et = ValScalar
- case "vector":
- *et = ValVector
- case "matrix":
- *et = ValMatrix
- case "string":
- *et = ValString
- default:
- return fmt.Errorf("unknown value type %q", s)
- }
- return nil
-}
-
-func (e ValueType) String() string {
- switch e {
- case ValNone:
- return ""
- case ValScalar:
- return "scalar"
- case ValVector:
- return "vector"
- case ValMatrix:
- return "matrix"
- case ValString:
- return "string"
- }
- panic("ValueType.String: unhandled value type")
-}
-
-// Scalar is a scalar value evaluated at the set timestamp.
-type Scalar struct {
- Value SampleValue `json:"value"`
- Timestamp Time `json:"timestamp"`
-}
-
-func (s Scalar) String() string {
- return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s Scalar) MarshalJSON() ([]byte, error) {
- v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
- return json.Marshal([...]interface{}{s.Timestamp, string(v)})
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *Scalar) UnmarshalJSON(b []byte) error {
- var f string
- v := [...]interface{}{&s.Timestamp, &f}
-
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
-
- value, err := strconv.ParseFloat(f, 64)
- if err != nil {
- return fmt.Errorf("error parsing sample value: %s", err)
- }
- s.Value = SampleValue(value)
- return nil
-}
-
-// String is a string value evaluated at the set timestamp.
-type String struct {
- Value string `json:"value"`
- Timestamp Time `json:"timestamp"`
-}
-
-func (s *String) String() string {
- return s.Value
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s String) MarshalJSON() ([]byte, error) {
- return json.Marshal([]interface{}{s.Timestamp, s.Value})
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *String) UnmarshalJSON(b []byte) error {
- v := [...]interface{}{&s.Timestamp, &s.Value}
- return json.Unmarshal(b, &v)
-}
-
-// Vector is basically only an alias for Samples, but the
-// contract is that in a Vector, all Samples have the same timestamp.
-type Vector []*Sample
-
-func (vec Vector) String() string {
- entries := make([]string, len(vec))
- for i, s := range vec {
- entries[i] = s.String()
- }
- return strings.Join(entries, "\n")
-}
-
-func (vec Vector) Len() int { return len(vec) }
-func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
-
-// Less compares first the metrics, then the timestamp.
-func (vec Vector) Less(i, j int) bool {
- switch {
- case vec[i].Metric.Before(vec[j].Metric):
- return true
- case vec[j].Metric.Before(vec[i].Metric):
- return false
- case vec[i].Timestamp.Before(vec[j].Timestamp):
- return true
- default:
- return false
- }
-}
-
-// Equal compares two sets of samples and returns true if they are equal.
-func (vec Vector) Equal(o Vector) bool {
- if len(vec) != len(o) {
- return false
- }
-
- for i, sample := range vec {
- if !sample.Equal(o[i]) {
- return false
- }
- }
- return true
-}
-
-// Matrix is a list of time series.
-type Matrix []*SampleStream
-
-func (m Matrix) Len() int { return len(m) }
-func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
-func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
-
-func (mat Matrix) String() string {
- matCp := make(Matrix, len(mat))
- copy(matCp, mat)
- sort.Sort(matCp)
-
- strs := make([]string, len(matCp))
-
- for i, ss := range matCp {
- strs[i] = ss.String()
- }
-
- return strings.Join(strs, "\n")
-}
diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore
deleted file mode 100644
index 25e3659ab25..00000000000
--- a/vendor/github.com/prometheus/procfs/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-/fixtures/
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
deleted file mode 100644
index 0aa09edacb3..00000000000
--- a/vendor/github.com/prometheus/procfs/.golangci.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-linters:
- enable:
- - golint
diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
deleted file mode 100644
index 9a1aff41270..00000000000
--- a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Prometheus Community Code of Conduct
-
-Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
deleted file mode 100644
index 943de7615ee..00000000000
--- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
+++ /dev/null
@@ -1,121 +0,0 @@
-# Contributing
-
-Prometheus uses GitHub to manage reviews of pull requests.
-
-* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute)
-
-* If you have a trivial fix or improvement, go ahead and create a pull request,
- addressing (with `@...`) a suitable maintainer of this repository (see
- [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
-
-* If you plan to do something more involved, first discuss your ideas
- on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
- This will avoid unnecessary work and surely give you and us a good deal
- of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on.
-
-* Relevant coding style guidelines are the [Go Code Review
- Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
- and the _Formatting and style_ section of Peter Bourgon's [Go: Best
- Practices for Production
- Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style).
-
-* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works)
-
-## Steps to Contribute
-
-Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue.
-
-Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community).
-
-For quickly compiling and testing your changes do:
-```
-make test # Make sure all the tests pass before you commit and push :)
-```
-
-We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action.
-
-## Pull Request Checklist
-
-* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes.
-
-* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests).
-
-* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)).
-
-* Add tests relevant to the fixed bug or new feature.
-
-## Dependency management
-
-The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed.
-
-All dependencies are vendored in the `vendor/` directory.
-
-To add or update a new dependency, use the `go get` command:
-
-```bash
-# Pick the latest tagged release.
-go get example.com/some/module/pkg
-
-# Pick a specific version.
-go get example.com/some/module/pkg@vX.Y.Z
-```
-
-Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory:
-
-
-```bash
-# The GO111MODULE variable can be omitted when the code isn't located in GOPATH.
-GO111MODULE=on go mod tidy
-
-GO111MODULE=on go mod vendor
-```
-
-You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request.
-
-
-## API Implementation Guidelines
-
-### Naming and Documentation
-
-Public functions and structs should normally be named according to the file(s) being read and parsed. For example,
-the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function
-should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s).
-
-### Reading vs. Parsing
-
-Most functionality in this library consists of reading files and then parsing the text into structured data. In most
-cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and
-a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested
-directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types
-such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files
-in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead.
-
-### /proc and /sys filesystem I/O
-
-The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O.
-Many of the files are changing continuously and the data being read can in some cases change between subsequent
-reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls
-to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the
-full file in a single operation using an internal utility function called `util.ReadFileNoStat`.
-This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of
-the file.
-
-Note that parsing the file's contents can still be performed one line at a time. This is done by first reading
-the full file, and then using a scanner on the `[]byte` or `string` containing the data.
-
-```
- data, err := util.ReadFileNoStat("/proc/cpuinfo")
- if err != nil {
- return err
- }
- reader := bytes.NewReader(data)
- scanner := bufio.NewScanner(reader)
-```
-
-The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files
-can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does
-not bother to check the size of the file before reading.
-```
- data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity")
-```
-
diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
deleted file mode 100644
index 56ba67d3e31..00000000000
--- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md
+++ /dev/null
@@ -1,2 +0,0 @@
-* Johannes 'fish' Ziemke @discordianfish
-* Paul Gier @pgier
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
deleted file mode 100644
index 616a0d25eb0..00000000000
--- a/vendor/github.com/prometheus/procfs/Makefile
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2018 The Prometheus Authors
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-include Makefile.common
-
-%/.unpacked: %.ttar
- @echo ">> extracting fixtures"
- ./ttar -C $(dir $*) -x -f $*.ttar
- touch $@
-
-update_fixtures:
- rm -vf fixtures/.unpacked
- ./ttar -c -f fixtures.ttar fixtures/
-
-.PHONY: build
-build:
-
-.PHONY: test
-test: fixtures/.unpacked common-test
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
deleted file mode 100644
index 3ac29c636c6..00000000000
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ /dev/null
@@ -1,302 +0,0 @@
-# Copyright 2018 The Prometheus Authors
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# A common Makefile that includes rules to be reused in different prometheus projects.
-# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
-
-# Example usage :
-# Create the main Makefile in the root project directory.
-# include Makefile.common
-# customTarget:
-# @echo ">> Running customTarget"
-#
-
-# Ensure GOBIN is not set during build so that promu is installed to the correct path
-unexport GOBIN
-
-GO ?= go
-GOFMT ?= $(GO)fmt
-FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
-GOOPTS ?=
-GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
-GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
-
-GO_VERSION ?= $(shell $(GO) version)
-GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
-PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
-
-GOVENDOR :=
-GO111MODULE :=
-ifeq (, $(PRE_GO_111))
- ifneq (,$(wildcard go.mod))
- # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
- GO111MODULE := on
-
- ifneq (,$(wildcard vendor))
- # Always use the local vendor/ directory to satisfy the dependencies.
- GOOPTS := $(GOOPTS) -mod=vendor
- endif
- endif
-else
- ifneq (,$(wildcard go.mod))
- ifneq (,$(wildcard vendor))
-$(warning This repository requires Go >= 1.11 because of Go modules)
-$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)')
- endif
- else
- # This repository isn't using Go modules (yet).
- GOVENDOR := $(FIRST_GOPATH)/bin/govendor
- endif
-endif
-PROMU := $(FIRST_GOPATH)/bin/promu
-pkgs = ./...
-
-ifeq (arm, $(GOHOSTARCH))
- GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
- GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
-else
- GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
-endif
-
-GOTEST := $(GO) test
-GOTEST_DIR :=
-ifneq ($(CIRCLE_JOB),)
-ifneq ($(shell which gotestsum),)
- GOTEST_DIR := test-results
- GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
-endif
-endif
-
-PROMU_VERSION ?= 0.7.0
-PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
-
-GOLANGCI_LINT :=
-GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.18.0
-# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
-# windows isn't included here because of the path separator being different.
-ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
- ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
- GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
- endif
-endif
-
-PREFIX ?= $(shell pwd)
-BIN_DIR ?= $(shell pwd)
-DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
-DOCKERFILE_PATH ?= ./Dockerfile
-DOCKERBUILD_CONTEXT ?= ./
-DOCKER_REPO ?= prom
-
-DOCKER_ARCHS ?= amd64
-
-BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
-PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
-TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
-
-ifeq ($(GOHOSTARCH),amd64)
- ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
- # Only supported on amd64
- test-flags := -race
- endif
-endif
-
-# This rule is used to forward a target like "build" to "common-build". This
-# allows a new "build" target to be defined in a Makefile which includes this
-# one and override "common-build" without override warnings.
-%: common-% ;
-
-.PHONY: common-all
-common-all: precheck style check_license lint unused build test
-
-.PHONY: common-style
-common-style:
- @echo ">> checking code style"
- @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \
- if [ -n "$${fmtRes}" ]; then \
- echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \
- echo "Please ensure you are using $$($(GO) version) for formatting code."; \
- exit 1; \
- fi
-
-.PHONY: common-check_license
-common-check_license:
- @echo ">> checking license header"
- @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
- awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
- done); \
- if [ -n "$${licRes}" ]; then \
- echo "license header checking failed:"; echo "$${licRes}"; \
- exit 1; \
- fi
-
-.PHONY: common-deps
-common-deps:
- @echo ">> getting dependencies"
-ifdef GO111MODULE
- GO111MODULE=$(GO111MODULE) $(GO) mod download
-else
- $(GO) get $(GOOPTS) -t ./...
-endif
-
-.PHONY: update-go-deps
-update-go-deps:
- @echo ">> updating Go dependencies"
- @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
- $(GO) get $$m; \
- done
- GO111MODULE=$(GO111MODULE) $(GO) mod tidy
-ifneq (,$(wildcard vendor))
- GO111MODULE=$(GO111MODULE) $(GO) mod vendor
-endif
-
-.PHONY: common-test-short
-common-test-short: $(GOTEST_DIR)
- @echo ">> running short tests"
- GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs)
-
-.PHONY: common-test
-common-test: $(GOTEST_DIR)
- @echo ">> running all tests"
- GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
-
-$(GOTEST_DIR):
- @mkdir -p $@
-
-.PHONY: common-format
-common-format:
- @echo ">> formatting code"
- GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs)
-
-.PHONY: common-vet
-common-vet:
- @echo ">> vetting code"
- GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
-
-.PHONY: common-lint
-common-lint: $(GOLANGCI_LINT)
-ifdef GOLANGCI_LINT
- @echo ">> running golangci-lint"
-ifdef GO111MODULE
-# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
-# Otherwise staticcheck might fail randomly for some reason not yet explained.
- GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
- GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
-else
- $(GOLANGCI_LINT) run $(pkgs)
-endif
-endif
-
-# For backward-compatibility.
-.PHONY: common-staticcheck
-common-staticcheck: lint
-
-.PHONY: common-unused
-common-unused: $(GOVENDOR)
-ifdef GOVENDOR
- @echo ">> running check for unused packages"
- @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
-else
-ifdef GO111MODULE
- @echo ">> running check for unused/missing packages in go.mod"
- GO111MODULE=$(GO111MODULE) $(GO) mod tidy
-ifeq (,$(wildcard vendor))
- @git diff --exit-code -- go.sum go.mod
-else
- @echo ">> running check for unused packages in vendor/"
- GO111MODULE=$(GO111MODULE) $(GO) mod vendor
- @git diff --exit-code -- go.sum go.mod vendor/
-endif
-endif
-endif
-
-.PHONY: common-build
-common-build: promu
- @echo ">> building binaries"
- GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
-
-.PHONY: common-tarball
-common-tarball: promu
- @echo ">> building release tarball"
- $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
-
-.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
-common-docker: $(BUILD_DOCKER_ARCHS)
-$(BUILD_DOCKER_ARCHS): common-docker-%:
- docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
- -f $(DOCKERFILE_PATH) \
- --build-arg ARCH="$*" \
- --build-arg OS="linux" \
- $(DOCKERBUILD_CONTEXT)
-
-.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
-common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
-$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
- docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
-
-DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
-.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
-common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
-$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
-
-.PHONY: common-docker-manifest
-common-docker-manifest:
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
-
-.PHONY: promu
-promu: $(PROMU)
-
-$(PROMU):
- $(eval PROMU_TMP := $(shell mktemp -d))
- curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)
- mkdir -p $(FIRST_GOPATH)/bin
- cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
- rm -r $(PROMU_TMP)
-
-.PHONY: proto
-proto:
- @echo ">> generating code from proto files"
- @./scripts/genproto.sh
-
-ifdef GOLANGCI_LINT
-$(GOLANGCI_LINT):
- mkdir -p $(FIRST_GOPATH)/bin
- curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \
- | sed -e '/install -d/d' \
- | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
-endif
-
-ifdef GOVENDOR
-.PHONY: $(GOVENDOR)
-$(GOVENDOR):
- GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
-endif
-
-.PHONY: precheck
-precheck::
-
-define PRECHECK_COMMAND_template =
-precheck:: $(1)_precheck
-
-PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
-.PHONY: $(1)_precheck
-$(1)_precheck:
- @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \
- echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \
- exit 1; \
- fi
-endef
diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE
deleted file mode 100644
index 53c5e9aa111..00000000000
--- a/vendor/github.com/prometheus/procfs/NOTICE
+++ /dev/null
@@ -1,7 +0,0 @@
-procfs provides functions to retrieve system, kernel and process
-metrics from the pseudo-filesystem proc.
-
-Copyright 2014-2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
deleted file mode 100644
index 55d1e3261c9..00000000000
--- a/vendor/github.com/prometheus/procfs/README.md
+++ /dev/null
@@ -1,61 +0,0 @@
-# procfs
-
-This package provides functions to retrieve system, kernel, and process
-metrics from the pseudo-filesystems /proc and /sys.
-
-*WARNING*: This package is a work in progress. Its API may still break in
-backwards-incompatible ways without warnings. Use it at your own risk.
-
-[](https://godoc.org/github.com/prometheus/procfs)
-[](https://travis-ci.org/prometheus/procfs)
-[](https://goreportcard.com/report/github.com/prometheus/procfs)
-
-## Usage
-
-The procfs library is organized by packages based on whether the gathered data is coming from
-/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc,
-/sys, or both. For example, cpu statistics are gathered from
-`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount
-point is initialized, and then the stat information is read.
-
-```go
-fs, err := procfs.NewFS("/proc")
-stats, err := fs.Stat()
-```
-
-Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems.
-
-```go
- fs, err := blockdevice.NewFS("/proc", "/sys")
- stats, err := fs.ProcDiskstats()
-```
-
-## Package Organization
-
-The packages in this project are organized according to (1) whether the data comes from the `/proc` or
-`/sys` filesystem and (2) the type of information being retrieved. For example, most process information
-can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives
-is available in the `blockdevices` sub-package.
-
-## Building and Testing
-
-The procfs library is intended to be built as part of another application, so there are no distributable binaries.
-However, most of the API includes unit tests which can be run with `make test`.
-
-### Updating Test Fixtures
-
-The procfs library includes a set of test fixtures which include many example files from
-the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
-which is extracted automatically during testing. To add/update the test fixtures, first
-ensure the `fixtures` directory is up to date by removing the existing directory and then
-extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
-
-```bash
-rm -rf fixtures
-make test
-```
-
-Next, make the required changes to the extracted files in the `fixtures` directory. When
-the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
-based on the updated `fixtures` directory. And finally, verify the changes using
-`git diff fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md
deleted file mode 100644
index 67741f015af..00000000000
--- a/vendor/github.com/prometheus/procfs/SECURITY.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Reporting a security issue
-
-The Prometheus security policy, including how to report vulnerabilities, can be
-found here:
-
-https://prometheus.io/docs/operating/security/
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
deleted file mode 100644
index 4e47e617209..00000000000
--- a/vendor/github.com/prometheus/procfs/arp.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "io/ioutil"
- "net"
- "strings"
-)
-
-// ARPEntry contains a single row of the columnar data represented in
-// /proc/net/arp.
-type ARPEntry struct {
- // IP address
- IPAddr net.IP
- // MAC address
- HWAddr net.HardwareAddr
- // Name of the device
- Device string
-}
-
-// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
-// and then return a slice of ARPEntry's.
-func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
- data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
- if err != nil {
- return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
- }
-
- return parseARPEntries(data)
-}
-
-func parseARPEntries(data []byte) ([]ARPEntry, error) {
- lines := strings.Split(string(data), "\n")
- entries := make([]ARPEntry, 0)
- var err error
- const (
- expectedDataWidth = 6
- expectedHeaderWidth = 9
- )
- for _, line := range lines {
- columns := strings.Fields(line)
- width := len(columns)
-
- if width == expectedHeaderWidth || width == 0 {
- continue
- } else if width == expectedDataWidth {
- entry, err := parseARPEntry(columns)
- if err != nil {
- return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err)
- }
- entries = append(entries, entry)
- } else {
- return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
- }
-
- }
-
- return entries, err
-}
-
-func parseARPEntry(columns []string) (ARPEntry, error) {
- ip := net.ParseIP(columns[0])
- mac := net.HardwareAddr(columns[3])
-
- entry := ARPEntry{
- IPAddr: ip,
- HWAddr: mac,
- Device: columns[5],
- }
-
- return entry, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
deleted file mode 100644
index f5b7939b266..00000000000
--- a/vendor/github.com/prometheus/procfs/buddyinfo.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strconv"
- "strings"
-)
-
-// A BuddyInfo is the details parsed from /proc/buddyinfo.
-// The data is comprised of an array of free fragments of each size.
-// The sizes are 2^n*PAGE_SIZE, where n is the array index.
-type BuddyInfo struct {
- Node string
- Zone string
- Sizes []float64
-}
-
-// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
-func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
- file, err := os.Open(fs.proc.Path("buddyinfo"))
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- return parseBuddyInfo(file)
-}
-
-func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
- var (
- buddyInfo = []BuddyInfo{}
- scanner = bufio.NewScanner(r)
- bucketCount = -1
- )
-
- for scanner.Scan() {
- var err error
- line := scanner.Text()
- parts := strings.Fields(line)
-
- if len(parts) < 4 {
- return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
- }
-
- node := strings.TrimRight(parts[1], ",")
- zone := strings.TrimRight(parts[3], ",")
- arraySize := len(parts[4:])
-
- if bucketCount == -1 {
- bucketCount = arraySize
- } else {
- if bucketCount != arraySize {
- return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
- }
- }
-
- sizes := make([]float64, arraySize)
- for i := 0; i < arraySize; i++ {
- sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
- if err != nil {
- return nil, fmt.Errorf("invalid value in buddyinfo: %w", err)
- }
- }
-
- buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
- }
-
- return buddyInfo, scanner.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
deleted file mode 100644
index 5623b24a161..00000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo.go
+++ /dev/null
@@ -1,481 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build linux
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
-type CPUInfo struct {
- Processor uint
- VendorID string
- CPUFamily string
- Model string
- ModelName string
- Stepping string
- Microcode string
- CPUMHz float64
- CacheSize string
- PhysicalID string
- Siblings uint
- CoreID string
- CPUCores uint
- APICID string
- InitialAPICID string
- FPU string
- FPUException string
- CPUIDLevel uint
- WP string
- Flags []string
- Bugs []string
- BogoMips float64
- CLFlushSize uint
- CacheAlignment uint
- AddressSizes string
- PowerManagement string
-}
-
-var (
- cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`)
- cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`)
-)
-
-// CPUInfo returns information about current system CPUs.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-func (fs FS) CPUInfo() ([]CPUInfo, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo"))
- if err != nil {
- return nil, err
- }
- return parseCPUInfo(data)
-}
-
-func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- // find the first "processor" line
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- firstcpu := CPUInfo{Processor: uint(v)}
- cpuinfo := []CPUInfo{firstcpu}
- i := 0
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
- i++
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Processor = uint(v)
- case "vendor", "vendor_id":
- cpuinfo[i].VendorID = field[1]
- case "cpu family":
- cpuinfo[i].CPUFamily = field[1]
- case "model":
- cpuinfo[i].Model = field[1]
- case "model name":
- cpuinfo[i].ModelName = field[1]
- case "stepping":
- cpuinfo[i].Stepping = field[1]
- case "microcode":
- cpuinfo[i].Microcode = field[1]
- case "cpu MHz":
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUMHz = v
- case "cache size":
- cpuinfo[i].CacheSize = field[1]
- case "physical id":
- cpuinfo[i].PhysicalID = field[1]
- case "siblings":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Siblings = uint(v)
- case "core id":
- cpuinfo[i].CoreID = field[1]
- case "cpu cores":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUCores = uint(v)
- case "apicid":
- cpuinfo[i].APICID = field[1]
- case "initial apicid":
- cpuinfo[i].InitialAPICID = field[1]
- case "fpu":
- cpuinfo[i].FPU = field[1]
- case "fpu_exception":
- cpuinfo[i].FPUException = field[1]
- case "cpuid level":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUIDLevel = uint(v)
- case "wp":
- cpuinfo[i].WP = field[1]
- case "flags":
- cpuinfo[i].Flags = strings.Fields(field[1])
- case "bugs":
- cpuinfo[i].Bugs = strings.Fields(field[1])
- case "bogomips":
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].BogoMips = v
- case "clflush size":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CLFlushSize = uint(v)
- case "cache_alignment":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CacheAlignment = uint(v)
- case "address sizes":
- cpuinfo[i].AddressSizes = field[1]
- case "power management":
- cpuinfo[i].PowerManagement = field[1]
- }
- }
- return cpuinfo, nil
-}
-
-func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- firstLine := firstNonEmptyLine(scanner)
- match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
- if !match || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- cpuinfo := []CPUInfo{}
- featuresLine := ""
- commonCPUInfo := CPUInfo{}
- i := 0
- if strings.TrimSpace(field[0]) == "Processor" {
- commonCPUInfo = CPUInfo{ModelName: field[1]}
- i = -1
- } else {
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- firstcpu := CPUInfo{Processor: uint(v)}
- cpuinfo = []CPUInfo{firstcpu}
- }
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor
- i++
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Processor = uint(v)
- case "BogoMIPS":
- if i == -1 {
- cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor
- i++
- cpuinfo[i].Processor = 0
- }
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].BogoMips = v
- case "Features":
- featuresLine = line
- case "model name":
- cpuinfo[i].ModelName = field[1]
- }
- }
- fields := strings.SplitN(featuresLine, ": ", 2)
- for i := range cpuinfo {
- cpuinfo[i].Flags = strings.Fields(fields[1])
- }
- return cpuinfo, nil
-
-}
-
-func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- cpuinfo := []CPUInfo{}
- commonCPUInfo := CPUInfo{VendorID: field[1]}
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "bogomips per cpu":
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- commonCPUInfo.BogoMips = v
- case "features":
- commonCPUInfo.Flags = strings.Fields(field[1])
- }
- if strings.HasPrefix(line, "processor") {
- match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
- if len(match) < 2 {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
- }
- cpu := commonCPUInfo
- v, err := strconv.ParseUint(match[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpu.Processor = uint(v)
- cpuinfo = append(cpuinfo, cpu)
- }
- if strings.HasPrefix(line, "cpu number") {
- break
- }
- }
-
- i := 0
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "cpu number":
- i++
- case "cpu MHz dynamic":
- clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
- v, err := strconv.ParseFloat(clock, 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUMHz = v
- case "physical id":
- cpuinfo[i].PhysicalID = field[1]
- case "core id":
- cpuinfo[i].CoreID = field[1]
- case "cpu cores":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUCores = uint(v)
- case "siblings":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Siblings = uint(v)
- }
- }
-
- return cpuinfo, nil
-}
-
-func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- // find the first "processor" line
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- cpuinfo := []CPUInfo{}
- systemType := field[1]
-
- i := 0
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- i = int(v)
- cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
- cpuinfo[i].Processor = uint(v)
- cpuinfo[i].VendorID = systemType
- case "cpu model":
- cpuinfo[i].ModelName = field[1]
- case "BogoMIPS":
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].BogoMips = v
- }
- }
- return cpuinfo, nil
-}
-
-func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- firstcpu := CPUInfo{Processor: uint(v)}
- cpuinfo := []CPUInfo{firstcpu}
- i := 0
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
- i++
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Processor = uint(v)
- case "cpu":
- cpuinfo[i].VendorID = field[1]
- case "clock":
- clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
- v, err := strconv.ParseFloat(clock, 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUMHz = v
- }
- }
- return cpuinfo, nil
-}
-
-func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- firstcpu := CPUInfo{Processor: uint(v)}
- cpuinfo := []CPUInfo{firstcpu}
- i := 0
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- i = int(v)
- cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
- cpuinfo[i].Processor = uint(v)
- case "hart":
- cpuinfo[i].CoreID = field[1]
- case "isa":
- cpuinfo[i].ModelName = field[1]
- }
- }
- return cpuinfo, nil
-}
-
-func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode
- return nil, errors.New("not implemented")
-}
-
-// firstNonEmptyLine advances the scanner to the first non-empty line
-// and returns the contents of that line
-func firstNonEmptyLine(scanner *bufio.Scanner) string {
- for scanner.Scan() {
- line := scanner.Text()
- if strings.TrimSpace(line) != "" {
- return line
- }
- }
- return ""
-}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
deleted file mode 100644
index e83c2e207c1..00000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build linux
-// +build riscv riscv64
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoRISCV
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
deleted file mode 100644
index 26814eebaaf..00000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build linux
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoS390X
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
deleted file mode 100644
index d5bedf97f31..00000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build linux
-// +build 386 amd64
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoX86
diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go
deleted file mode 100644
index 5048ad1f214..00000000000
--- a/vendor/github.com/prometheus/procfs/crypto.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Crypto holds info parsed from /proc/crypto.
-type Crypto struct {
- Alignmask *uint64
- Async bool
- Blocksize *uint64
- Chunksize *uint64
- Ctxsize *uint64
- Digestsize *uint64
- Driver string
- Geniv string
- Internal string
- Ivsize *uint64
- Maxauthsize *uint64
- MaxKeysize *uint64
- MinKeysize *uint64
- Module string
- Name string
- Priority *int64
- Refcnt *int64
- Seedsize *uint64
- Selftest string
- Type string
- Walksize *uint64
-}
-
-// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
-// structs containing the relevant info. More information available here:
-// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
-func (fs FS) Crypto() ([]Crypto, error) {
- path := fs.proc.Path("crypto")
- b, err := util.ReadFileNoStat(path)
- if err != nil {
- return nil, fmt.Errorf("error reading crypto %q: %w", path, err)
- }
-
- crypto, err := parseCrypto(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("error parsing crypto %q: %w", path, err)
- }
-
- return crypto, nil
-}
-
-// parseCrypto parses a /proc/crypto stream into Crypto elements.
-func parseCrypto(r io.Reader) ([]Crypto, error) {
- var out []Crypto
-
- s := bufio.NewScanner(r)
- for s.Scan() {
- text := s.Text()
- switch {
- case strings.HasPrefix(text, "name"):
- // Each crypto element begins with its name.
- out = append(out, Crypto{})
- case text == "":
- continue
- }
-
- kv := strings.Split(text, ":")
- if len(kv) != 2 {
- return nil, fmt.Errorf("malformed crypto line: %q", text)
- }
-
- k := strings.TrimSpace(kv[0])
- v := strings.TrimSpace(kv[1])
-
- // Parse the key/value pair into the currently focused element.
- c := &out[len(out)-1]
- if err := c.parseKV(k, v); err != nil {
- return nil, err
- }
- }
-
- if err := s.Err(); err != nil {
- return nil, err
- }
-
- return out, nil
-}
-
-// parseKV parses a key/value pair into the appropriate field of c.
-func (c *Crypto) parseKV(k, v string) error {
- vp := util.NewValueParser(v)
-
- switch k {
- case "async":
- // Interpret literal yes as true.
- c.Async = v == "yes"
- case "blocksize":
- c.Blocksize = vp.PUInt64()
- case "chunksize":
- c.Chunksize = vp.PUInt64()
- case "digestsize":
- c.Digestsize = vp.PUInt64()
- case "driver":
- c.Driver = v
- case "geniv":
- c.Geniv = v
- case "internal":
- c.Internal = v
- case "ivsize":
- c.Ivsize = vp.PUInt64()
- case "maxauthsize":
- c.Maxauthsize = vp.PUInt64()
- case "max keysize":
- c.MaxKeysize = vp.PUInt64()
- case "min keysize":
- c.MinKeysize = vp.PUInt64()
- case "module":
- c.Module = v
- case "name":
- c.Name = v
- case "priority":
- c.Priority = vp.PInt64()
- case "refcnt":
- c.Refcnt = vp.PInt64()
- case "seedsize":
- c.Seedsize = vp.PUInt64()
- case "selftest":
- c.Selftest = v
- case "type":
- c.Type = v
- case "walksize":
- c.Walksize = vp.PUInt64()
- }
-
- return vp.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
deleted file mode 100644
index e2acd6d40a6..00000000000
--- a/vendor/github.com/prometheus/procfs/doc.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2014 Prometheus Team
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package procfs provides functions to retrieve system, kernel and process
-// metrics from the pseudo-filesystem proc.
-//
-// Example:
-//
-// package main
-//
-// import (
-// "fmt"
-// "log"
-//
-// "github.com/prometheus/procfs"
-// )
-//
-// func main() {
-// p, err := procfs.Self()
-// if err != nil {
-// log.Fatalf("could not get process: %s", err)
-// }
-//
-// stat, err := p.NewStat()
-// if err != nil {
-// log.Fatalf("could not get process stat: %s", err)
-// }
-//
-// fmt.Printf("command: %s\n", stat.Comm)
-// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
-// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
-// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
-// }
-//
-package procfs
diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar
deleted file mode 100644
index 1e76173da0a..00000000000
--- a/vendor/github.com/prometheus/procfs/fixtures.ttar
+++ /dev/null
@@ -1,6553 +0,0 @@
-# Archive created by ttar -c -f fixtures.ttar fixtures/
-Directory: fixtures
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/cmdline
-Lines: 1
-vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/comm
-Lines: 1
-vim
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/cwd
-SymlinkTo: /usr/bin
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/environ
-Lines: 1
-PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binNULLBYTEHOSTNAME=cd24e11f73a5NULLBYTETERM=xtermNULLBYTEGOLANG_VERSION=1.12.5NULLBYTEGOPATH=/goNULLBYTEHOME=/rootNULLBYTEEOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/exe
-SymlinkTo: /usr/bin/vim
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/fd
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/0
-SymlinkTo: ../../symlinktargets/abc
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/1
-SymlinkTo: ../../symlinktargets/def
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/10
-SymlinkTo: ../../symlinktargets/xyz
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/2
-SymlinkTo: ../../symlinktargets/ghi
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/3
-SymlinkTo: ../../symlinktargets/uvw
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/fdinfo
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/0
-Lines: 6
-pos: 0
-flags: 02004000
-mnt_id: 13
-inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000
-inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a
-inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/1
-Lines: 4
-pos: 0
-flags: 02004002
-mnt_id: 13
-eventfd-count: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/10
-Lines: 3
-pos: 0
-flags: 02004002
-mnt_id: 9
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/2
-Lines: 3
-pos: 0
-flags: 02004002
-mnt_id: 9
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/3
-Lines: 3
-pos: 0
-flags: 02004002
-mnt_id: 9
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/io
-Lines: 7
-rchar: 750339
-wchar: 818609
-syscr: 7405
-syscw: 5245
-read_bytes: 1024
-write_bytes: 2048
-cancelled_write_bytes: -1024
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/limits
-Lines: 17
-Limit Soft Limit Hard Limit Units
-Max cpu time unlimited unlimited seconds
-Max file size unlimited unlimited bytes
-Max data size unlimited unlimited bytes
-Max stack size 8388608 unlimited bytes
-Max core file size 0 unlimited bytes
-Max resident set unlimited unlimited bytes
-Max processes 62898 62898 processes
-Max open files 2048 4096 files
-Max locked memory 18446744073708503040 18446744073708503040 bytes
-Max address space 8589934592 unlimited bytes
-Max file locks unlimited unlimited locks
-Max pending signals 62898 62898 signals
-Max msgqueue size 819200 819200 bytes
-Max nice priority 0 0
-Max realtime priority 0 0
-Max realtime timeout unlimited unlimited us
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/mountstats
-Lines: 20
-device rootfs mounted on / with fstype rootfs
-device sysfs mounted on /sys with fstype sysfs
-device proc mounted on /proc with fstype proc
-device /dev/sda1 mounted on / with fstype ext4
-device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
- opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none
- age: 13968
- caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
- nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
- sec: flavor=1,pseudoflavor=1
- events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0
- bytes: 1207640230 0 0 0 1210214218 0 295483 0
- RPC iostats version: 1.0 p/v: 100003/4 (nfs)
- xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726
- per-op statistics
- NULL: 0 0 0 0 0 0 0 0
- READ: 1298 1298 0 207680 1210292152 6 79386 79407
- WRITE: 0 0 0 0 0 0 0 0
- ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717
-
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/net
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/net/dev
-Lines: 4
-Inter-| Receive | Transmit
- face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
- lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
- eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/ns
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/ns/mnt
-SymlinkTo: mnt:[4026531840]
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/ns/net
-SymlinkTo: net:[4026531993]
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/root
-SymlinkTo: /
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/schedstat
-Lines: 1
-411605849 93680043 79
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/smaps
-Lines: 252
-00400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager
-Size: 8900 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 2952 kB
-Pss: 2952 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 2952 kB
-Private_Dirty: 0 kB
-Referenced: 2864 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd ex mr mw me dw sd
-00cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager
-Size: 10236 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 6152 kB
-Pss: 6152 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 6152 kB
-Private_Dirty: 0 kB
-Referenced: 5308 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd mr mw me dw sd
-016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager
-Size: 424 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 176 kB
-Pss: 176 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 84 kB
-Private_Dirty: 92 kB
-Referenced: 176 kB
-Anonymous: 92 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 12 kB
-SwapPss: 12 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me dw ac sd
-0171a000-0173f000 rw-p 00000000 00:00 0
-Size: 148 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 76 kB
-Pss: 76 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 76 kB
-Referenced: 76 kB
-Anonymous: 76 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me ac sd
-c000000000-c000400000 rw-p 00000000 00:00 0
-Size: 4096 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 2564 kB
-Pss: 2564 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 20 kB
-Private_Dirty: 2544 kB
-Referenced: 2544 kB
-Anonymous: 2564 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 1100 kB
-SwapPss: 1100 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me ac sd
-c000400000-c001600000 rw-p 00000000 00:00 0
-Size: 18432 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 16024 kB
-Pss: 16024 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 5864 kB
-Private_Dirty: 10160 kB
-Referenced: 11944 kB
-Anonymous: 16024 kB
-LazyFree: 5848 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 440 kB
-SwapPss: 440 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me ac sd nh
-c001600000-c004000000 rw-p 00000000 00:00 0
-Size: 43008 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 0 kB
-Pss: 0 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 0 kB
-Referenced: 0 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me ac sd
-7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0
-Size: 38596 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 1992 kB
-Pss: 1992 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 476 kB
-Private_Dirty: 1516 kB
-Referenced: 1828 kB
-Anonymous: 1992 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 384 kB
-SwapPss: 384 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me ac sd
-7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack]
-Size: 132 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 8 kB
-Pss: 8 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 8 kB
-Referenced: 8 kB
-Anonymous: 8 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 4 kB
-SwapPss: 4 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me gd ac
-7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar]
-Size: 12 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 0 kB
-Pss: 0 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 0 kB
-Referenced: 0 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd mr pf io de dd sd
-7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso]
-Size: 8 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 4 kB
-Pss: 0 kB
-Shared_Clean: 4 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 0 kB
-Referenced: 4 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd ex mr mw me de sd
-ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]
-Size: 4 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 0 kB
-Pss: 0 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 0 kB
-Referenced: 0 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd ex
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/smaps_rollup
-Lines: 17
-00400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup]
-Rss: 29948 kB
-Pss: 29944 kB
-Shared_Clean: 4 kB
-Shared_Dirty: 0 kB
-Private_Clean: 15548 kB
-Private_Dirty: 14396 kB
-Referenced: 24752 kB
-Anonymous: 20756 kB
-LazyFree: 5848 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 1940 kB
-SwapPss: 1940 kB
-Locked: 0 kB
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/stat
-Lines: 1
-26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/status
-Lines: 53
-
-Name: prometheus
-Umask: 0022
-State: S (sleeping)
-Tgid: 26231
-Ngid: 0
-Pid: 26231
-PPid: 1
-TracerPid: 0
-Uid: 1000 1000 1000 0
-Gid: 1001 1001 1001 0
-FDSize: 128
-Groups:
-NStgid: 1
-NSpid: 1
-NSpgid: 1
-NSsid: 1
-VmPeak: 58472 kB
-VmSize: 58440 kB
-VmLck: 0 kB
-VmPin: 0 kB
-VmHWM: 8028 kB
-VmRSS: 6716 kB
-RssAnon: 2092 kB
-RssFile: 4624 kB
-RssShmem: 0 kB
-VmData: 2580 kB
-VmStk: 136 kB
-VmExe: 948 kB
-VmLib: 6816 kB
-VmPTE: 128 kB
-VmPMD: 12 kB
-VmSwap: 660 kB
-HugetlbPages: 0 kB
-Threads: 1
-SigQ: 8/63965
-SigPnd: 0000000000000000
-ShdPnd: 0000000000000000
-SigBlk: 7be3c0fe28014a03
-SigIgn: 0000000000001000
-SigCgt: 00000001800004ec
-CapInh: 0000000000000000
-CapPrm: 0000003fffffffff
-CapEff: 0000003fffffffff
-CapBnd: 0000003fffffffff
-CapAmb: 0000000000000000
-Seccomp: 0
-Cpus_allowed: ff
-Cpus_allowed_list: 0-7
-Mems_allowed: 00000000,00000001
-Mems_allowed_list: 0
-voluntary_ctxt_switches: 4742839
-nonvoluntary_ctxt_switches: 1727500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/wchan
-Lines: 1
-poll_schedule_timeoutEOF
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26232
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/cmdline
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/comm
-Lines: 1
-ata_sff
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/cwd
-SymlinkTo: /does/not/exist
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26232/fd
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/0
-SymlinkTo: ../../symlinktargets/abc
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/1
-SymlinkTo: ../../symlinktargets/def
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/2
-SymlinkTo: ../../symlinktargets/ghi
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/3
-SymlinkTo: ../../symlinktargets/uvw
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/4
-SymlinkTo: ../../symlinktargets/xyz
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/limits
-Lines: 17
-Limit Soft Limit Hard Limit Units
-Max cpu time unlimited unlimited seconds
-Max file size unlimited unlimited bytes
-Max data size unlimited unlimited bytes
-Max stack size 8388608 unlimited bytes
-Max core file size 0 unlimited bytes
-Max resident set unlimited unlimited bytes
-Max processes 29436 29436 processes
-Max open files 1024 4096 files
-Max locked memory 65536 65536 bytes
-Max address space unlimited unlimited bytes
-Max file locks unlimited unlimited locks
-Max pending signals 29436 29436 signals
-Max msgqueue size 819200 819200 bytes
-Max nice priority 0 0
-Max realtime priority 0 0
-Max realtime timeout unlimited unlimited us
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/maps
-Lines: 9
-55680ae1e000-55680ae20000 r--p 00000000 fd:01 47316994 /bin/cat
-55680ae29000-55680ae2a000 rwxs 0000a000 fd:01 47316994 /bin/cat
-55680bed6000-55680bef7000 rw-p 00000000 00:00 0 [heap]
-7fdf964fc000-7fdf973f2000 r--p 00000000 fd:01 17432624 /usr/lib/locale/locale-archive
-7fdf973f2000-7fdf97417000 r--p 00000000 fd:01 60571062 /lib/x86_64-linux-gnu/libc-2.29.so
-7ffe9215c000-7ffe9217f000 rw-p 00000000 00:00 0 [stack]
-7ffe921da000-7ffe921dd000 r--p 00000000 00:00 0 [vvar]
-7ffe921dd000-7ffe921de000 r-xp 00000000 00:00 0 [vdso]
-ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall]
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/root
-SymlinkTo: /does/not/exist
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/stat
-Lines: 1
-33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/wchan
-Lines: 1
-0EOF
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26233
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26233/cmdline
-Lines: 1
-com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26233/schedstat
-Lines: 8
- ____________________________________
-< this is a malformed schedstat file >
- ------------------------------------
- \ ^__^
- \ (oo)\_______
- (__)\ )\/\
- ||----w |
- || ||
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26234
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26234/maps
-Lines: 4
-08048000-08089000 r-xp 00000000 03:01 104219 /bin/tcsh
-08089000-0808c000 rw-p 00041000 03:01 104219 /bin/tcsh
-0808c000-08146000 rwxp 00000000 00:00 0
-40000000-40015000 r-xp 00000000 03:01 61874 /lib/ld-2.3.2.so
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/584
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/584/stat
-Lines: 2
-1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
-#!/bin/cat /proc/self/stat
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/buddyinfo
-Lines: 3
-Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
-Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0
-Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/cpuinfo
-Lines: 216
-processor : 0
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 799.998
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 0
-cpu cores : 4
-apicid : 0
-initial apicid : 0
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 1
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.037
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 1
-cpu cores : 4
-apicid : 2
-initial apicid : 2
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 2
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.010
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 2
-cpu cores : 4
-apicid : 4
-initial apicid : 4
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 3
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.028
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 3
-cpu cores : 4
-apicid : 6
-initial apicid : 6
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 4
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 799.989
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 0
-cpu cores : 4
-apicid : 1
-initial apicid : 1
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 5
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.083
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 1
-cpu cores : 4
-apicid : 3
-initial apicid : 3
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 6
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.017
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 2
-cpu cores : 4
-apicid : 5
-initial apicid : 5
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 7
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.030
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 3
-cpu cores : 4
-apicid : 7
-initial apicid : 7
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/crypto
-Lines: 972
-name : ccm(aes)
-driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni))
-module : ccm
-priority : 300
-refcnt : 4
-selftest : passed
-internal : no
-type : aead
-async : no
-blocksize : 1
-ivsize : 16
-maxauthsize : 16
-geniv :
-
-name : cbcmac(aes)
-driver : cbcmac(aes-aesni)
-module : ccm
-priority : 300
-refcnt : 7
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 16
-
-name : ecdh
-driver : ecdh-generic
-module : ecdh_generic
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : kpp
-async : yes
-
-name : ecb(arc4)
-driver : ecb(arc4)-generic
-module : arc4
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 1
-max keysize : 256
-ivsize : 0
-chunksize : 1
-walksize : 1
-
-name : arc4
-driver : arc4-generic
-module : arc4
-priority : 0
-refcnt : 3
-selftest : passed
-internal : no
-type : cipher
-blocksize : 1
-min keysize : 1
-max keysize : 256
-
-name : crct10dif
-driver : crct10dif-pclmul
-module : crct10dif_pclmul
-priority : 200
-refcnt : 2
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 2
-
-name : crc32
-driver : crc32-pclmul
-module : crc32_pclmul
-priority : 200
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 4
-
-name : __ghash
-driver : cryptd(__ghash-pclmulqdqni)
-module : kernel
-priority : 50
-refcnt : 1
-selftest : passed
-internal : yes
-type : ahash
-async : yes
-blocksize : 16
-digestsize : 16
-
-name : ghash
-driver : ghash-clmulni
-module : ghash_clmulni_intel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : ahash
-async : yes
-blocksize : 16
-digestsize : 16
-
-name : __ghash
-driver : __ghash-pclmulqdqni
-module : ghash_clmulni_intel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : yes
-type : shash
-blocksize : 16
-digestsize : 16
-
-name : crc32c
-driver : crc32c-intel
-module : crc32c_intel
-priority : 200
-refcnt : 5
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 4
-
-name : cbc(aes)
-driver : cbc(aes-aesni)
-module : kernel
-priority : 300
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : ctr(aes)
-driver : ctr(aes-aesni)
-module : kernel
-priority : 300
-refcnt : 5
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : pkcs1pad(rsa,sha256)
-driver : pkcs1pad(rsa-generic,sha256)
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : akcipher
-
-name : __xts(aes)
-driver : cryptd(__xts-aes-aesni)
-module : kernel
-priority : 451
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 32
-max keysize : 64
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : xts(aes)
-driver : xts-aes-aesni
-module : kernel
-priority : 401
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 32
-max keysize : 64
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ctr(aes)
-driver : cryptd(__ctr-aes-aesni)
-module : kernel
-priority : 450
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 1
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : ctr(aes)
-driver : ctr-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 1
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __cbc(aes)
-driver : cryptd(__cbc-aes-aesni)
-module : kernel
-priority : 450
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : cbc(aes)
-driver : cbc-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ecb(aes)
-driver : cryptd(__ecb-aes-aesni)
-module : kernel
-priority : 450
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 0
-chunksize : 16
-walksize : 16
-
-name : ecb(aes)
-driver : ecb-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 0
-chunksize : 16
-walksize : 16
-
-name : __generic-gcm-aes-aesni
-driver : cryptd(__driver-generic-gcm-aes-aesni)
-module : kernel
-priority : 50
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : yes
-blocksize : 1
-ivsize : 12
-maxauthsize : 16
-geniv :
-
-name : gcm(aes)
-driver : generic-gcm-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : aead
-async : yes
-blocksize : 1
-ivsize : 12
-maxauthsize : 16
-geniv :
-
-name : __generic-gcm-aes-aesni
-driver : __driver-generic-gcm-aes-aesni
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : no
-blocksize : 1
-ivsize : 12
-maxauthsize : 16
-geniv :
-
-name : __gcm-aes-aesni
-driver : cryptd(__driver-gcm-aes-aesni)
-module : kernel
-priority : 50
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : yes
-blocksize : 1
-ivsize : 8
-maxauthsize : 16
-geniv :
-
-name : rfc4106(gcm(aes))
-driver : rfc4106-gcm-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : aead
-async : yes
-blocksize : 1
-ivsize : 8
-maxauthsize : 16
-geniv :
-
-name : __gcm-aes-aesni
-driver : __driver-gcm-aes-aesni
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : no
-blocksize : 1
-ivsize : 8
-maxauthsize : 16
-geniv :
-
-name : __xts(aes)
-driver : __xts-aes-aesni
-module : kernel
-priority : 401
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 32
-max keysize : 64
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ctr(aes)
-driver : __ctr-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __cbc(aes)
-driver : __cbc-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ecb(aes)
-driver : __ecb-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 0
-chunksize : 16
-walksize : 16
-
-name : __aes
-driver : __aes-aesni
-module : kernel
-priority : 300
-refcnt : 1
-selftest : passed
-internal : yes
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-name : aes
-driver : aes-aesni
-module : kernel
-priority : 300
-refcnt : 8
-selftest : passed
-internal : no
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-name : hmac(sha1)
-driver : hmac(sha1-generic)
-module : kernel
-priority : 100
-refcnt : 9
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 20
-
-name : ghash
-driver : ghash-generic
-module : kernel
-priority : 100
-refcnt : 3
-selftest : passed
-internal : no
-type : shash
-blocksize : 16
-digestsize : 16
-
-name : jitterentropy_rng
-driver : jitterentropy_rng
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha256
-module : kernel
-priority : 221
-refcnt : 2
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha512
-module : kernel
-priority : 220
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha384
-module : kernel
-priority : 219
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha1
-module : kernel
-priority : 218
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha256
-module : kernel
-priority : 217
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha512
-module : kernel
-priority : 216
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha384
-module : kernel
-priority : 215
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha1
-module : kernel
-priority : 214
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_ctr_aes256
-module : kernel
-priority : 213
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_ctr_aes192
-module : kernel
-priority : 212
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_ctr_aes128
-module : kernel
-priority : 211
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : hmac(sha256)
-driver : hmac(sha256-generic)
-module : kernel
-priority : 100
-refcnt : 10
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 32
-
-name : stdrng
-driver : drbg_pr_hmac_sha256
-module : kernel
-priority : 210
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_hmac_sha512
-module : kernel
-priority : 209
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_hmac_sha384
-module : kernel
-priority : 208
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_hmac_sha1
-module : kernel
-priority : 207
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha256
-module : kernel
-priority : 206
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha512
-module : kernel
-priority : 205
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha384
-module : kernel
-priority : 204
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha1
-module : kernel
-priority : 203
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_ctr_aes256
-module : kernel
-priority : 202
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_ctr_aes192
-module : kernel
-priority : 201
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_ctr_aes128
-module : kernel
-priority : 200
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : 842
-driver : 842-scomp
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : 842
-driver : 842-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : lzo-rle
-driver : lzo-rle-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : lzo-rle
-driver : lzo-rle-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : lzo
-driver : lzo-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : lzo
-driver : lzo-generic
-module : kernel
-priority : 0
-refcnt : 9
-selftest : passed
-internal : no
-type : compression
-
-name : crct10dif
-driver : crct10dif-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 2
-
-name : crc32c
-driver : crc32c-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 4
-
-name : zlib-deflate
-driver : zlib-deflate-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : deflate
-driver : deflate-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : deflate
-driver : deflate-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : aes
-driver : aes-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-name : sha224
-driver : sha224-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 28
-
-name : sha256
-driver : sha256-generic
-module : kernel
-priority : 100
-refcnt : 11
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 32
-
-name : sha1
-driver : sha1-generic
-module : kernel
-priority : 100
-refcnt : 11
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 20
-
-name : md5
-driver : md5-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 16
-
-name : ecb(cipher_null)
-driver : ecb-cipher_null
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 0
-max keysize : 0
-ivsize : 0
-chunksize : 1
-walksize : 1
-
-name : digest_null
-driver : digest_null-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 0
-
-name : compress_null
-driver : compress_null-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : cipher_null
-driver : cipher_null-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : cipher
-blocksize : 1
-min keysize : 0
-max keysize : 0
-
-name : rsa
-driver : rsa-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : akcipher
-
-name : dh
-driver : dh-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : kpp
-
-name : aes
-driver : aes-asm
-module : kernel
-priority : 200
-refcnt : 1
-selftest : passed
-internal : no
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-Mode: 444
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/diskstats
-Lines: 52
- 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0
- 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0
- 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0
- 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0
- 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0
- 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0
- 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0
- 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0
- 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0
- 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0
- 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0
- 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0
- 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0
- 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0
- 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0
- 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0
- 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0
- 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0
- 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0
- 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0
- 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0
- 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0
- 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0
- 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0
- 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804
- 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36
- 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32
- 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60
- 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428
- 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256
- 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84
- 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416
- 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104
- 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44
- 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632
- 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156
- 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24
- 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68
- 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80
- 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228
- 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720
- 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992
- 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0
- 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546
- 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16
- 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970
- 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130
- 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0
- 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130
- 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182
- 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0
- 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/fs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/fs/fscache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/fs/fscache/stats
-Lines: 24
-FS-Cache statistics
-Cookies: idx=3 dat=67877 spc=0
-Objects: alc=67473 nal=0 avl=67473 ded=388
-ChkAux : non=12 ok=33 upd=44 obs=55
-Pages : mrk=547164 unc=364577
-Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26
-Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85
-Invals : n=14 run=13
-Updates: n=7 nul=3 run=8
-Relinqs: n=394 nul=1 wcr=2 rtr=3
-AttrChg: n=6 ok=5 nbf=4 oom=3 run=2
-Allocs : n=20 ok=19 wt=18 nbf=17 int=16
-Allocs : ops=15 owt=14 abt=13
-Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43
-Retrvls: ops=151959 owt=42747 abt=44
-Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14
-Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43
-VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66
-Ops : pend=42753 run=221129 enq=628798 can=11 rej=88
-Ops : ini=377538 dfr=27 rel=377538 gc=37
-CacheOp: alo=1 luo=2 luc=3 gro=4
-CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10
-CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17
-CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/fs/xfs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/fs/xfs/stat
-Lines: 23
-extent_alloc 92447 97589 92448 93751
-abt 0 0 0 0
-blk_map 1767055 188820 184891 92447 92448 2140766 0
-bmbt 0 0 0 0
-dir 185039 92447 92444 136422
-trans 706 944304 0
-ig 185045 58807 0 126238 0 33637 22
-log 2883 113448 9 17360 739
-push_ail 945014 0 134260 15483 0 3940 464 159985 0 40
-xstrat 92447 0
-rw 107739 94045
-attr 4 0 0 0
-icluster 8677 7849 135802
-vnodes 92601 0 0 0 92444 92444 92444 0
-buf 2666287 7122 2659202 3599 2 7085 0 10297 7085
-abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147
-abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023
-bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0
-fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-qm 0 0 0 0 0 0 0 0
-xpc 399724544 92823103 86219234
-debug 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/loadavg
-Lines: 1
-0.02 0.04 0.05 1/497 11947
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/mdstat
-Lines: 60
-Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
-
-md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S)
- 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]
-
-md127 : active raid1 sdi2[0] sdj2[1]
- 312319552 blocks [2/2] [UU]
-
-md0 : active raid1 sdi1[0] sdj1[1]
- 248896 blocks [2/2] [UU]
-
-md4 : inactive raid1 sda3[0](F) sdb3[1](S)
- 4883648 blocks [2/2] [UU]
-
-md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0]
- 195310144 blocks [2/1] [U_]
- [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
-
-md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S)
- 195310144 blocks [2/2] [UU]
- [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
-
-md201 : active raid1 sda3[0] sdb3[1]
- 1993728 blocks super 1.2 [2/2] [UU]
- [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec
-
-md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F)
- 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU]
- bitmap: 0/30 pages [0KB], 65536KB chunk
-
-md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S)
- 523968 blocks super 1.2 [4/4] [UUUU]
- resync=DELAYED
-
-md10 : active raid0 sda1[0] sdb1[1]
- 314159265 blocks 64k chunks
-
-md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S)
- 4190208 blocks super 1.2 [2/2] [UU]
- resync=PENDING
-
-md12 : active raid0 sdc2[0] sdd2[1]
- 3886394368 blocks super 1.2 512k chunks
-
-md126 : active raid0 sdb[1] sdc[0]
- 1855870976 blocks super external:/md127/0 128k chunks
-
-md219 : inactive sdb[2](S) sdc[1](S) sda[0](S)
- 7932 blocks super external:imsm
-
-md00 : active raid0 xvdb[0]
- 4186624 blocks super 1.2 256k chunks
-
-md120 : active linear sda1[1] sdb1[0]
- 2095104 blocks super 1.2 0k rounding
-
-md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0]
- 322560 blocks super 1.2 512k chunks
-
-unused devices:
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/meminfo
-Lines: 42
-MemTotal: 15666184 kB
-MemFree: 440324 kB
-Buffers: 1020128 kB
-Cached: 12007640 kB
-SwapCached: 0 kB
-Active: 6761276 kB
-Inactive: 6532708 kB
-Active(anon): 267256 kB
-Inactive(anon): 268 kB
-Active(file): 6494020 kB
-Inactive(file): 6532440 kB
-Unevictable: 0 kB
-Mlocked: 0 kB
-SwapTotal: 0 kB
-SwapFree: 0 kB
-Dirty: 768 kB
-Writeback: 0 kB
-AnonPages: 266216 kB
-Mapped: 44204 kB
-Shmem: 1308 kB
-Slab: 1807264 kB
-SReclaimable: 1738124 kB
-SUnreclaim: 69140 kB
-KernelStack: 1616 kB
-PageTables: 5288 kB
-NFS_Unstable: 0 kB
-Bounce: 0 kB
-WritebackTmp: 0 kB
-CommitLimit: 7833092 kB
-Committed_AS: 530844 kB
-VmallocTotal: 34359738367 kB
-VmallocUsed: 36596 kB
-VmallocChunk: 34359637840 kB
-HardwareCorrupted: 0 kB
-AnonHugePages: 12288 kB
-HugePages_Total: 0
-HugePages_Free: 0
-HugePages_Rsvd: 0
-HugePages_Surp: 0
-Hugepagesize: 2048 kB
-DirectMap4k: 91136 kB
-DirectMap2M: 16039936 kB
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/net
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/arp
-Lines: 2
-IP address HW type Flags HW address Mask Device
-192.168.224.1 0x1 0x2 00:50:56:c0:00:08 * ens33
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/dev
-Lines: 6
-Inter-| Receive | Transmit
- face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
-vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0
- lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0
-docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0
- eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/ip_vs
-Lines: 21
-IP Virtual Server version 1.2.1 (size=4096)
-Prot LocalAddress:Port Scheduler Flags
- -> RemoteAddress:Port Forward Weight ActiveConn InActConn
-TCP C0A80016:0CEA wlc
- -> C0A85216:0CEA Tunnel 100 248 2
- -> C0A85318:0CEA Tunnel 100 248 2
- -> C0A85315:0CEA Tunnel 100 248 1
-TCP C0A80039:0CEA wlc
- -> C0A85416:0CEA Tunnel 0 0 0
- -> C0A85215:0CEA Tunnel 100 1499 0
- -> C0A83215:0CEA Tunnel 100 1498 0
-TCP C0A80037:0CEA wlc
- -> C0A8321A:0CEA Tunnel 0 0 0
- -> C0A83120:0CEA Tunnel 100 0 0
-TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh
- -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0
- -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0
- -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1
-FWM 10001000 wlc
- -> C0A8321A:0CEA Route 0 0 1
- -> C0A83215:0CEA Route 0 0 2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/ip_vs_stats
-Lines: 6
- Total Incoming Outgoing Incoming Outgoing
- Conns Packets Packets Bytes Bytes
- 16AA370 E33656E5 0 51D8C8883AB3 0
-
- Conns/s Pkts/s Pkts/s Bytes/s Bytes/s
- 4 1FB3C 0 1282A8F 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/protocols
-Lines: 14
-protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em
-PACKET 1344 2 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n
-PINGv6 1112 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n
-RAWv6 1112 1 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n
-UDPLITEv6 1216 0 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n
-UDPv6 1216 10 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n
-TCPv6 2144 1937 1225378 no 320 yes kernel y y y y y y y y y y y y y n y y y y y
-UNIX 1024 120 -1 NI 0 yes kernel n n n n n n n n n n n n n n n n n n n
-UDP-Lite 1024 0 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n
-PING 904 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n
-RAW 912 0 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n
-UDP 1024 73 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n
-TCP 1984 93064 1225378 yes 320 yes kernel y y y y y y y y y y y y y n y y y y y
-NETLINK 1040 16 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/net/rpc
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/rpc/nfs
-Lines: 5
-net 18628 0 18628 6
-rpc 4329785 0 4338291
-proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
-proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39
-proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/rpc/nfsd
-Lines: 11
-rc 0 6 18622
-fh 0 0 0 0 0
-io 157286400 0
-th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
-ra 32 0 0 0 0 0 0 0 0 0 0 0
-net 18628 0 18628 6
-rpc 18628 0 0 0 0
-proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
-proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
-proc4 2 2 10853
-proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/sockstat
-Lines: 6
-sockets: used 1602
-TCP: inuse 35 orphan 0 tw 4 alloc 59 mem 22
-UDP: inuse 12 mem 62
-UDPLITE: inuse 0
-RAW: inuse 0
-FRAG: inuse 0 memory 0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/sockstat6
-Lines: 5
-TCP6: inuse 17
-UDP6: inuse 9
-UDPLITE6: inuse 0
-RAW6: inuse 1
-FRAG6: inuse 0 memory 0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/softnet_stat
-Lines: 2
-00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
-01663fb2 00000000 000109a4 00000000 00000000 00000000 00000000 00000000 00000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/softnet_stat.broken
-Lines: 1
-00015c73 00020e76 F0000769 00000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/tcp
-Lines: 4
- sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
- 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
- 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
- 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/tcp6
-Lines: 3
- sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
- 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0
- 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/udp
-Lines: 4
- sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
- 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
- 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
- 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/udp6
-Lines: 3
- sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
- 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0
- 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/udp_broken
-Lines: 2
- sl local_address rem_address st
- 1: 00000000:0016 00000000:0000 0A
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/unix
-Lines: 6
-Num RefCount Protocol Flags Type St Inode Path
-0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control
-0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log
-0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 00000003 00000000 00000000 0001 03 5091797
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/unix_without_inode
-Lines: 6
-Num RefCount Protocol Flags Type St Path
-0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control
-0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log
-0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 00000003 00000000 00000000 0001 03
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/xfrm_stat
-Lines: 28
-XfrmInError 1
-XfrmInBufferError 2
-XfrmInHdrError 4
-XfrmInNoStates 3
-XfrmInStateProtoError 40
-XfrmInStateModeError 100
-XfrmInStateSeqError 6000
-XfrmInStateExpired 4
-XfrmInStateMismatch 23451
-XfrmInStateInvalid 55555
-XfrmInTmplMismatch 51
-XfrmInNoPols 65432
-XfrmInPolBlock 100
-XfrmInPolError 10000
-XfrmOutError 1000000
-XfrmOutBundleGenError 43321
-XfrmOutBundleCheckError 555
-XfrmOutNoStates 869
-XfrmOutStateProtoError 4542
-XfrmOutStateModeError 4
-XfrmOutStateSeqError 543
-XfrmOutStateExpired 565
-XfrmOutPolBlock 43456
-XfrmOutPolDead 7656
-XfrmOutPolError 1454
-XfrmFwdHdrError 6654
-XfrmOutStateInvalid 28765
-XfrmAcquireError 24532
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/pressure
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/pressure/cpu
-Lines: 1
-some avg10=0.10 avg60=2.00 avg300=3.85 total=15
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/pressure/io
-Lines: 2
-some avg10=0.10 avg60=2.00 avg300=3.85 total=15
-full avg10=0.20 avg60=3.00 avg300=4.95 total=25
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/pressure/memory
-Lines: 2
-some avg10=0.10 avg60=2.00 avg300=3.85 total=15
-full avg10=0.20 avg60=3.00 avg300=4.95 total=25
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/schedstat
-Lines: 6
-version 15
-timestamp 15819019232
-cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306
-domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0
-cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945
-domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/self
-SymlinkTo: 26231
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/slabinfo
-Lines: 302
-slabinfo - version: 2.1
-# name : tunables : slabdata
-pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0
-pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
-nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0
-nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0
-kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0
-kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0
-kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0
-pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0
-x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0
-iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0
-ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0
-bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
-bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0
-bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0
-fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0
-fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0
-squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0
-fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0
-fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0
-xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0
-xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0
-xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
-xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0
-xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0
-xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0
-xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
-nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0
-nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0
-nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0
-nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0
-nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
-nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
-rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
-rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
-fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0
-jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0
-jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0
-reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0
-btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0
-ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0
-ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
-ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0
-ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0
-ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0
-ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0
-ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0
-ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0
-ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0
-jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0
-jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0
-jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
-jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0
-jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0
-jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0
-ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0
-mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
-dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0
-dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0
-dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0
-dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0
-kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0
-io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
-dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0
-dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0
-aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
-asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0
-qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0
-sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0
-scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0
-virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
-L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
-L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
-ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
-fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
-ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
-ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0
-RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0
-UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0
-UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0
-tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0
-TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0
-uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
-sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0
-sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
-sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0
-sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0
-sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
-btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0
-bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0
-mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0
-isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0
-io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
-kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0
-aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
-userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
-fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0
-dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0
-bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
-fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
-audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0
-posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0
-iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
-iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0
-iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0
-UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0
-ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0
-ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
-tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0
-inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0
-xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0
-ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0
-ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0
-ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0
-PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
-RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0
-UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0
-tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0
-request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0
-TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0
-hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0
-dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
-bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
-eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0
-eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0
-inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0
-scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
-bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0
-request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0
-blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0
-bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0
-biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0
-biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0
-biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0
-biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0
-bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
-khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0
-ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
-ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0
-uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
-dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
-dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0
-dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
-dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0
-audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0
-sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0
-skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0
-skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0
-configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0
-file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0
-file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0
-fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0
-net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0
-task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0
-taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0
-proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0
-pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0
-proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0
-seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0
-sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0
-bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0
-shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0
-kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0
-kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0
-mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
-filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0
-inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0
-dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0
-names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0
-hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
-ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
-avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
-avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
-avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0
-iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
-lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0
-lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0
-key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
-buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0
-uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0
-nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0
-vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0
-mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0
-fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0
-files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0
-signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0
-sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0
-task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0
-cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0
-anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0
-anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0
-pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0
-Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0
-Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0
-Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
-Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0
-Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0
-trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0
-ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0
-pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0
-radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0
-task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0
-vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0
-dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
-kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0
-kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0
-kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0
-kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0
-kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0
-kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0
-kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0
-kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0
-kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0
-kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0
-kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0
-kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0
-kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0
-kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0
-kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0
-kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0
-kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/stat
-Lines: 16
-cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
-cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
-cpu1 47869 23 16474 1110787 591 0 46 0 0 0
-cpu2 46504 36 15916 1112321 441 0 326 0 0 0
-cpu3 47054 102 15683 1113230 533 0 60 0 0 0
-cpu4 28413 25 10776 1140321 217 0 8 0 0 0
-cpu5 29271 101 11586 1136270 672 0 30 0 0 0
-cpu6 29152 36 10276 1139721 319 0 29 0 0 0
-cpu7 29098 268 10164 1139282 555 0 31 0 0 0
-intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-ctxt 38014093
-btime 1418183276
-processes 26442
-procs_running 2
-procs_blocked 1
-softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/swaps
-Lines: 2
-Filename Type Size Used Priority
-/dev/dm-2 partition 131068 176 -2
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/symlinktargets
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/README
-Lines: 2
-This directory contains some empty files that are the symlinks the files in the "fd" directory point to.
-They are otherwise ignored by the tests
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/abc
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/def
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/ghi
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/uvw
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/xyz
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys/kernel
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys/kernel/random
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/entropy_avail
-Lines: 1
-3943
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/poolsize
-Lines: 1
-4096
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs
-Lines: 1
-60
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold
-Lines: 1
-3072
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys/vm
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/admin_reserve_kbytes
-Lines: 1
-8192
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/block_dump
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/compact_unevictable_allowed
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_background_bytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_background_ratio
-Lines: 1
-10
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_bytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_expire_centisecs
-Lines: 1
-3000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_ratio
-Lines: 1
-20
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_writeback_centisecs
-Lines: 1
-500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirtytime_expire_seconds
-Lines: 1
-43200
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/drop_caches
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/extfrag_threshold
-Lines: 1
-500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/hugetlb_shm_group
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/laptop_mode
-Lines: 1
-5
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/legacy_va_layout
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/lowmem_reserve_ratio
-Lines: 1
-256 256 32 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/max_map_count
-Lines: 1
-65530
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/memory_failure_early_kill
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/memory_failure_recovery
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/min_free_kbytes
-Lines: 1
-67584
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/min_slab_ratio
-Lines: 1
-5
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/min_unmapped_ratio
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/mmap_min_addr
-Lines: 1
-65536
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/nr_hugepages
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/nr_overcommit_hugepages
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/numa_stat
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/numa_zonelist_order
-Lines: 1
-Node
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/oom_dump_tasks
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/oom_kill_allocating_task
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/overcommit_kbytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/overcommit_memory
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/overcommit_ratio
-Lines: 1
-50
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/page-cluster
-Lines: 1
-3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/panic_on_oom
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/percpu_pagelist_fraction
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/stat_interval
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/swappiness
-Lines: 1
-60
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/user_reserve_kbytes
-Lines: 1
-131072
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/vfs_cache_pressure
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/watermark_boost_factor
-Lines: 1
-15000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/watermark_scale_factor
-Lines: 1
-10
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/zone_reclaim_mode
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/zoneinfo
-Lines: 262
-Node 0, zone DMA
- per-node stats
- nr_inactive_anon 230981
- nr_active_anon 547580
- nr_inactive_file 316904
- nr_active_file 346282
- nr_unevictable 115467
- nr_slab_reclaimable 131220
- nr_slab_unreclaimable 47320
- nr_isolated_anon 0
- nr_isolated_file 0
- workingset_nodes 11627
- workingset_refault 466886
- workingset_activate 276925
- workingset_restore 84055
- workingset_nodereclaim 487
- nr_anon_pages 795576
- nr_mapped 215483
- nr_file_pages 761874
- nr_dirty 908
- nr_writeback 0
- nr_writeback_temp 0
- nr_shmem 224925
- nr_shmem_hugepages 0
- nr_shmem_pmdmapped 0
- nr_anon_transparent_hugepages 0
- nr_unstable 0
- nr_vmscan_write 12950
- nr_vmscan_immediate_reclaim 3033
- nr_dirtied 8007423
- nr_written 7752121
- nr_kernel_misc_reclaimable 0
- pages free 3952
- min 33
- low 41
- high 49
- spanned 4095
- present 3975
- managed 3956
- protection: (0, 2877, 7826, 7826, 7826)
- nr_free_pages 3952
- nr_zone_inactive_anon 0
- nr_zone_active_anon 0
- nr_zone_inactive_file 0
- nr_zone_active_file 0
- nr_zone_unevictable 0
- nr_zone_write_pending 0
- nr_mlock 0
- nr_page_table_pages 0
- nr_kernel_stack 0
- nr_bounce 0
- nr_zspages 0
- nr_free_cma 0
- numa_hit 1
- numa_miss 0
- numa_foreign 0
- numa_interleave 0
- numa_local 1
- numa_other 0
- pagesets
- cpu: 0
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 1
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 2
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 3
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 4
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 5
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 6
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 7
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- node_unreclaimable: 0
- start_pfn: 1
-Node 0, zone DMA32
- pages free 204252
- min 19510
- low 21059
- high 22608
- spanned 1044480
- present 759231
- managed 742806
- protection: (0, 0, 4949, 4949, 4949)
- nr_free_pages 204252
- nr_zone_inactive_anon 118558
- nr_zone_active_anon 106598
- nr_zone_inactive_file 75475
- nr_zone_active_file 70293
- nr_zone_unevictable 66195
- nr_zone_write_pending 64
- nr_mlock 4
- nr_page_table_pages 1756
- nr_kernel_stack 2208
- nr_bounce 0
- nr_zspages 0
- nr_free_cma 0
- numa_hit 113952967
- numa_miss 0
- numa_foreign 0
- numa_interleave 0
- numa_local 113952967
- numa_other 0
- pagesets
- cpu: 0
- count: 345
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 1
- count: 356
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 2
- count: 325
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 3
- count: 346
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 4
- count: 321
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 5
- count: 316
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 6
- count: 373
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 7
- count: 339
- high: 378
- batch: 63
- vm stats threshold: 48
- node_unreclaimable: 0
- start_pfn: 4096
-Node 0, zone Normal
- pages free 18553
- min 11176
- low 13842
- high 16508
- spanned 1308160
- present 1308160
- managed 1268711
- protection: (0, 0, 0, 0, 0)
- nr_free_pages 18553
- nr_zone_inactive_anon 112423
- nr_zone_active_anon 440982
- nr_zone_inactive_file 241429
- nr_zone_active_file 275989
- nr_zone_unevictable 49272
- nr_zone_write_pending 844
- nr_mlock 154
- nr_page_table_pages 9750
- nr_kernel_stack 15136
- nr_bounce 0
- nr_zspages 0
- nr_free_cma 0
- numa_hit 162718019
- numa_miss 0
- numa_foreign 0
- numa_interleave 26812
- numa_local 162718019
- numa_other 0
- pagesets
- cpu: 0
- count: 316
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 1
- count: 366
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 2
- count: 60
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 3
- count: 256
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 4
- count: 253
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 5
- count: 159
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 6
- count: 311
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 7
- count: 264
- high: 378
- batch: 63
- vm stats threshold: 56
- node_unreclaimable: 0
- start_pfn: 1048576
-Node 0, zone Movable
- pages free 0
- min 0
- low 0
- high 0
- spanned 0
- present 0
- managed 0
- protection: (0, 0, 0, 0, 0)
-Node 0, zone Device
- pages free 0
- min 0
- low 0
- high 0
- spanned 0
- present 0
- managed 0
- protection: (0, 0, 0, 0, 0)
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/dm-0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/dm-0/stat
-Lines: 1
-6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/sda
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/sda/queue
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/add_random
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/chunk_sectors
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/dax
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_granularity
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_max_bytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_zeroes_data
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/fua
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/hw_sector_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/io_poll
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/io_poll_delay
-Lines: 1
--1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/io_timeout
-Lines: 1
-30000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/sda/queue/iosched
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/back_seek_max
-Lines: 1
-16384
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async
-Lines: 1
-250
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync
-Lines: 1
-125
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/low_latency
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/max_budget
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/slice_idle
-Lines: 1
-8
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us
-Lines: 1
-8000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/timeout_sync
-Lines: 1
-125
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iostats
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/logical_block_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_discard_segments
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb
-Lines: 1
-32767
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_integrity_segments
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_sectors_kb
-Lines: 1
-1280
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_segment_size
-Lines: 1
-65536
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_segments
-Lines: 1
-168
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/minimum_io_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/nomerges
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/nr_requests
-Lines: 1
-64
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/nr_zones
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/optimal_io_size
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/physical_block_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/read_ahead_kb
-Lines: 1
-128
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/rotational
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/rq_affinity
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/scheduler
-Lines: 1
-mq-deadline kyber [bfq] none
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/wbt_lat_usec
-Lines: 1
-75000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/write_cache
-Lines: 1
-write back
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/write_same_max_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/zoned
-Lines: 1
-none
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/stat
-Lines: 1
-9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/fc_host
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/fc_host/host0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo
-Lines: 1
-30
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/fabric_name
-Lines: 1
-0x0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/node_name
-Lines: 1
-0x2000e0071bce95f2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_id
-Lines: 1
-0x000002
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_name
-Lines: 1
-0x1000e0071bce95f2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_state
-Lines: 1
-Online
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_type
-Lines: 1
-Point-To-Point (direct nport connection)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/speed
-Lines: 1
-16 Gbit
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/fc_host/host0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames
-Lines: 1
-0xffffffffffffffff
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/error_frames
-Lines: 1
-0x0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts
-Lines: 1
-0x13
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count
-Lines: 1
-0x2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count
-Lines: 1
-0x8
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count
-Lines: 1
-0x9
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count
-Lines: 1
-0x11
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count
-Lines: 1
-0x10
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/nos_count
-Lines: 1
-0x12
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames
-Lines: 1
-0x3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/rx_words
-Lines: 1
-0x4
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset
-Lines: 1
-0x7
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames
-Lines: 1
-0x5
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/tx_words
-Lines: 1
-0x6
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/supported_classes
-Lines: 1
-Class 3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/supported_speeds
-Lines: 1
-4 Gbit, 8 Gbit, 16 Gbit
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/symbolic_name
-Lines: 1
-Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/board_id
-Lines: 1
-SM_1141000001000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver
-Lines: 1
-2.31.5050
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/hca_type
-Lines: 1
-MT4099
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped
-Lines: 1
-0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data
-Lines: 1
-2221223609
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets
-Lines: 1
-87169372
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data
-Lines: 1
-26509113295
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets
-Lines: 1
-85734114
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait
-Lines: 1
-3599
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state
-Lines: 1
-5: LinkUp
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate
-Lines: 1
-40 Gb/sec (4X QDR)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state
-Lines: 1
-4: ACTIVE
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped
-Lines: 1
-0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data
-Lines: 1
-2460436784
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets
-Lines: 1
-89332064
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data
-Lines: 1
-26540356890
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets
-Lines: 1
-88622850
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait
-Lines: 1
-3846
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state
-Lines: 1
-5: LinkUp
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate
-Lines: 1
-40 Gb/sec (4X QDR)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/state
-Lines: 1
-4: ACTIVE
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/net
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/net/eth0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/addr_assign_type
-Lines: 1
-3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/addr_len
-Lines: 1
-6
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/address
-Lines: 1
-01:01:01:01:01:01
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/broadcast
-Lines: 1
-ff:ff:ff:ff:ff:ff
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier_changes
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier_down_count
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier_up_count
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/dev_id
-Lines: 1
-0x20
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/device
-SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/dormant
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/duplex
-Lines: 1
-full
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/flags
-Lines: 1
-0x1303
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/ifalias
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/ifindex
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/iflink
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/link_mode
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/mtu
-Lines: 1
-1500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/name_assign_type
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/netdev_group
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/operstate
-Lines: 1
-up
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/phys_port_id
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/phys_port_name
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/phys_switch_id
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/speed
-Lines: 1
-1000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/tx_queue_len
-Lines: 1
-1000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/type
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/power_supply
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/power_supply/AC
-SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/power_supply/BAT0
-SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl/enabled
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw
-Lines: 1
-95000000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_name
-Lines: 1
-long_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_time_window_us
-Lines: 1
-999424
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_name
-Lines: 1
-short_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_time_window_us
-Lines: 1
-2440
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/enabled
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/energy_uj
-Lines: 1
-240422366267
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/max_energy_range_uj
-Lines: 1
-262143328850
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/name
-Lines: 1
-package-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw
-Lines: 0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_name
-Lines: 1
-long_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us
-Lines: 1
-976
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/enabled
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/energy_uj
-Lines: 1
-118821284256
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/max_energy_range_uj
-Lines: 1
-262143328850
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/name
-Lines: 1
-core
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl:a
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw
-Lines: 1
-95000000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name
-Lines: 1
-long_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us
-Lines: 1
-999424
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name
-Lines: 1
-short_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us
-Lines: 1
-2440
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/enabled
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj
-Lines: 1
-240422366267
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj
-Lines: 1
-262143328850
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/name
-Lines: 1
-package-10
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/cooling_device0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device0/cur_state
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device0/max_state
-Lines: 1
-50
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device0/type
-Lines: 1
-Processor
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/cooling_device1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device1/cur_state
-Lines: 1
--1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device1/max_state
-Lines: 1
-27
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device1/type
-Lines: 1
-intel_powerclamp
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/thermal_zone0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone0/policy
-Lines: 1
-step_wise
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone0/temp
-Lines: 1
-49925
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone0/type
-Lines: 1
-bcm2835_thermal
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/thermal_zone1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/mode
-Lines: 1
-enabled
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/passive
-Lines: 1
-0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/policy
-Lines: 1
-step_wise
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/temp
-Lines: 1
--44000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/type
-Lines: 1
-acpitz
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device
-SymlinkTo: ../../../ACPI0003:00
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async
-Lines: 1
-disabled
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control
-Lines: 1
-auto
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled
-Lines: 1
-disabled
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status
-Lines: 1
-unsupported
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup
-Lines: 1
-enabled
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms
-Lines: 1
-10598
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem
-SymlinkTo: ../../../../../../../../../class/power_supply
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type
-Lines: 1
-Mains
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent
-Lines: 2
-POWER_SUPPLY_NAME=AC
-POWER_SUPPLY_ONLINE=0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm
-Lines: 1
-2369000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity
-Lines: 1
-98
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level
-Lines: 1
-Normal
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold
-Lines: 1
-95
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device
-SymlinkTo: ../../../PNP0C0A:00
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full
-Lines: 1
-50060000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design
-Lines: 1
-47520000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now
-Lines: 1
-49450000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer
-Lines: 1
-LGC
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name
-Lines: 1
-LNV-45N1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async
-Lines: 1
-disabled
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control
-Lines: 1
-auto
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled
-Lines: 1
-disabled
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status
-Lines: 1
-unsupported
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now
-Lines: 1
-4830000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number
-Lines: 1
-38109
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status
-Lines: 1
-Discharging
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem
-SymlinkTo: ../../../../../../../../../class/power_supply
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology
-Lines: 1
-Li-ion
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type
-Lines: 1
-Battery
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent
-Lines: 16
-POWER_SUPPLY_NAME=BAT0
-POWER_SUPPLY_STATUS=Discharging
-POWER_SUPPLY_PRESENT=1
-POWER_SUPPLY_TECHNOLOGY=Li-ion
-POWER_SUPPLY_CYCLE_COUNT=0
-POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000
-POWER_SUPPLY_VOLTAGE_NOW=11750000
-POWER_SUPPLY_POWER_NOW=5064000
-POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000
-POWER_SUPPLY_ENERGY_FULL=47390000
-POWER_SUPPLY_ENERGY_NOW=40730000
-POWER_SUPPLY_CAPACITY=85
-POWER_SUPPLY_CAPACITY_LEVEL=Normal
-POWER_SUPPLY_MODEL_NAME=LNV-45N1
-POWER_SUPPLY_MANUFACTURER=LGC
-POWER_SUPPLY_SERIAL_NUMBER=38109
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design
-Lines: 1
-10800000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now
-Lines: 1
-12229000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits
-Lines: 1
-289
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits
-Lines: 1
-546
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written
-Lines: 1
-512
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats
-Lines: 5
-Unused: 99%
-Metadata: 0%
-Average: 10473
-Sectors per Q: 64
-Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946]
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class
-Lines: 1
-0x020000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits
-Lines: 1
-64
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device
-Lines: 1
-0x15d7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits
-Lines: 1
-64
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override
-Lines: 1
-(null)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq
-Lines: 1
-140
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist
-Lines: 1
-0-7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus
-Lines: 1
-ff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias
-Lines: 1
-pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node
-Lines: 1
--1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource
-Lines: 13
-0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision
-Lines: 1
-0x21
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device
-Lines: 1
-0x225a
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor
-Lines: 1
-0x17aa
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent
-Lines: 6
-DRIVER=e1000e
-PCI_CLASS=20000
-PCI_ID=8086:15D7
-PCI_SUBSYS_ID=17AA:225A
-PCI_SLOT_NAME=0000:00:1f.6
-MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor
-Lines: 1
-0x8086
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/rbd
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/rbd/0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/0/name
-Lines: 1
-demo
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/0/pool
-Lines: 1
-iscsi-images
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/rbd/1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/1/name
-Lines: 1
-wrong
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/1/pool
-Lines: 1
-wrong-images
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/node
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/node/node1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/node/node1/vmstat
-Lines: 6
-nr_free_pages 1
-nr_zone_inactive_anon 2
-nr_zone_active_anon 3
-nr_zone_inactive_file 4
-nr_zone_active_file 5
-nr_zone_unevictable 6
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/node/node2
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/node/node2/vmstat
-Lines: 6
-nr_free_pages 7
-nr_zone_inactive_anon 8
-nr_zone_active_anon 9
-nr_zone_inactive_file 10
-nr_zone_active_file 11
-nr_zone_unevictable 12
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/clocksource
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/clocksource/clocksource0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource
-Lines: 1
-tsc hpet acpi_pm
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource
-Lines: 1
-tsc
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq
-SymlinkTo: ../cpufreq/policy0
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count
-Lines: 1
-10084
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count
-Lines: 1
-34818
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu0/topology
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings
-Lines: 1
-ff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list
-Lines: 1
-0-7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings
-Lines: 1
-11
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list
-Lines: 1
-0,4
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq
-Lines: 1
-1200195
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq
-Lines: 1
-3300000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq
-Lines: 1
-1200000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency
-Lines: 1
-4294967295
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus
-Lines: 1
-1
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors
-Lines: 1
-performance powersave
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver
-Lines: 1
-intel_pstate
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
-Lines: 1
-powersave
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq
-Lines: 1
-3300000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq
-Lines: 1
-1200000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed
-Lines: 1
-
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count
-Lines: 1
-523
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count
-Lines: 1
-34818
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1/topology
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings
-Lines: 1
-ff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list
-Lines: 1
-0-7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings
-Lines: 1
-22
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list
-Lines: 1
-1,5
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpufreq
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq
-Lines: 1
-2400000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq
-Lines: 1
-800000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors
-Lines: 1
-performance powersave
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq
-Lines: 1
-1219917
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver
-Lines: 1
-intel_pstate
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor
-Lines: 1
-powersave
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq
-Lines: 1
-2400000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq
-Lines: 1
-800000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed
-Lines: 1
-
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0
-Mode: 777
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits
-Lines: 1
-289
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits
-Lines: 1
-546
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug
-Lines: 7
-rate: 1.1M/sec
-dirty: 20.4G
-target: 20.4G
-proportional: 427.5k
-integral: 790.0k
-change: 321.5k/sec
-next io: 17ms
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0
-Mode: 777
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written
-Lines: 1
-512
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats
-Lines: 5
-Unused: 99%
-Metadata: 0%
-Average: 10473
-Sectors per Q: 64
-Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946]
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us
-Lines: 1
-1305
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits
-Lines: 1
-289
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits
-Lines: 1
-546
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used
-Lines: 1
-808189952
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used
-Lines: 1
-808189952
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes
-Lines: 1
-808189952
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly
-Lines: 1
-131072
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used
-Lines: 1
-933888
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used
-Lines: 1
-1867776
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags
-Lines: 1
-4
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes
-Lines: 1
-1073741824
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes
-Lines: 1
-933888
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes
-Lines: 1
-1073741824
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used
-Lines: 1
-32768
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags
-Lines: 1
-2
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes
-Lines: 1
-8388608
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes
-Lines: 1
-8388608
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size
-Lines: 1
-20971520
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size
-Lines: 1
-20971520
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label
-Lines: 1
-fixture
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid
-Lines: 1
-0abb23a9-579b-43e6-ad30-227ef47fcb9d
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total
-Lines: 1
-644087808
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes
-Lines: 1
-644087808
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes
-Lines: 1
-644087808
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly
-Lines: 1
-262144
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used
-Lines: 1
-114688
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total
-Lines: 1
-429391872
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used
-Lines: 1
-114688
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags
-Lines: 1
-4
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes
-Lines: 1
-429391872
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes
-Lines: 1
-114688
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes
-Lines: 1
-429391872
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags
-Lines: 1
-2
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22
-SymlinkTo: ../../../../devices/virtual/block/loop22
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23
-SymlinkTo: ../../../../devices/virtual/block/loop23
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24
-SymlinkTo: ../../../../devices/virtual/block/loop24
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25
-SymlinkTo: ../../../../devices/virtual/block/loop25
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid
-Lines: 1
-7f07c59f-6136-449c-ab87-e1cf2328731b
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sda1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sda1/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/xfs/sda1/stats/stats
-Lines: 1
-extent_alloc 1 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sdb1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sdb1/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/xfs/sdb1/stats/stats
-Lines: 1
-extent_alloc 2 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/fileio_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/fileio_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/udev_path
-Lines: 1
-/home/iscsi/file_back_1G
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/iblock_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/udev_path
-Lines: 1
-/dev/rbd1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rbd_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/udev_path
-Lines: 1
-/dev/rbd/iscsi-images/demo
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/udev_path
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/7f4a4eb56d
-SymlinkTo: ../../../../../../target/core/rd_mcp_119/ramdisk_lio_1G
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-204950
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-10325
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-40325
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/795b7c7026
-SymlinkTo: ../../../../../../target/core/iblock_0/block_lio_rbd1
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-104950
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-20095
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-71235
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/fff5e16686
-SymlinkTo: ../../../../../../target/core/fileio_1/file_lio_1G
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-301950
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-10195
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-30195
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/eba1edf893
-SymlinkTo: ../../../../../../target/core/rbd_0/iscsi-images-demo
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-1234
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-1504
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-4733
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
deleted file mode 100644
index 0102ab0fd85..00000000000
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "github.com/prometheus/procfs/internal/fs"
-)
-
-// FS represents the pseudo-filesystem sys, which provides an interface to
-// kernel data structures.
-type FS struct {
- proc fs.FS
-}
-
-// DefaultMountPoint is the common mount point of the proc filesystem.
-const DefaultMountPoint = fs.DefaultProcMountPoint
-
-// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
-// It will error if the mount point directory can't be read or is a file.
-func NewDefaultFS() (FS, error) {
- return NewFS(DefaultMountPoint)
-}
-
-// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
-// if the mount point directory can't be read or is a file.
-func NewFS(mountPoint string) (FS, error) {
- fs, err := fs.NewFS(mountPoint)
- if err != nil {
- return FS{}, err
- }
- return FS{fs}, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
deleted file mode 100644
index f8070e6e2be..00000000000
--- a/vendor/github.com/prometheus/procfs/fscache.go
+++ /dev/null
@@ -1,422 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Fscacheinfo represents fscache statistics.
-type Fscacheinfo struct {
- // Number of index cookies allocated
- IndexCookiesAllocated uint64
- // data storage cookies allocated
- DataStorageCookiesAllocated uint64
- // Number of special cookies allocated
- SpecialCookiesAllocated uint64
- // Number of objects allocated
- ObjectsAllocated uint64
- // Number of object allocation failures
- ObjectAllocationsFailure uint64
- // Number of objects that reached the available state
- ObjectsAvailable uint64
- // Number of objects that reached the dead state
- ObjectsDead uint64
- // Number of objects that didn't have a coherency check
- ObjectsWithoutCoherencyCheck uint64
- // Number of objects that passed a coherency check
- ObjectsWithCoherencyCheck uint64
- // Number of objects that needed a coherency data update
- ObjectsNeedCoherencyCheckUpdate uint64
- // Number of objects that were declared obsolete
- ObjectsDeclaredObsolete uint64
- // Number of pages marked as being cached
- PagesMarkedAsBeingCached uint64
- // Number of uncache page requests seen
- UncachePagesRequestSeen uint64
- // Number of acquire cookie requests seen
- AcquireCookiesRequestSeen uint64
- // Number of acq reqs given a NULL parent
- AcquireRequestsWithNullParent uint64
- // Number of acq reqs rejected due to no cache available
- AcquireRequestsRejectedNoCacheAvailable uint64
- // Number of acq reqs succeeded
- AcquireRequestsSucceeded uint64
- // Number of acq reqs rejected due to error
- AcquireRequestsRejectedDueToError uint64
- // Number of acq reqs failed on ENOMEM
- AcquireRequestsFailedDueToEnomem uint64
- // Number of lookup calls made on cache backends
- LookupsNumber uint64
- // Number of negative lookups made
- LookupsNegative uint64
- // Number of positive lookups made
- LookupsPositive uint64
- // Number of objects created by lookup
- ObjectsCreatedByLookup uint64
- // Number of lookups timed out and requeued
- LookupsTimedOutAndRequed uint64
- InvalidationsNumber uint64
- InvalidationsRunning uint64
- // Number of update cookie requests seen
- UpdateCookieRequestSeen uint64
- // Number of upd reqs given a NULL parent
- UpdateRequestsWithNullParent uint64
- // Number of upd reqs granted CPU time
- UpdateRequestsRunning uint64
- // Number of relinquish cookie requests seen
- RelinquishCookiesRequestSeen uint64
- // Number of rlq reqs given a NULL parent
- RelinquishCookiesWithNullParent uint64
- // Number of rlq reqs waited on completion of creation
- RelinquishRequestsWaitingCompleteCreation uint64
- // Relinqs rtr
- RelinquishRetries uint64
- // Number of attribute changed requests seen
- AttributeChangedRequestsSeen uint64
- // Number of attr changed requests queued
- AttributeChangedRequestsQueued uint64
- // Number of attr changed rejected -ENOBUFS
- AttributeChangedRejectDueToEnobufs uint64
- // Number of attr changed failed -ENOMEM
- AttributeChangedFailedDueToEnomem uint64
- // Number of attr changed ops given CPU time
- AttributeChangedOps uint64
- // Number of allocation requests seen
- AllocationRequestsSeen uint64
- // Number of successful alloc reqs
- AllocationOkRequests uint64
- // Number of alloc reqs that waited on lookup completion
- AllocationWaitingOnLookup uint64
- // Number of alloc reqs rejected -ENOBUFS
- AllocationsRejectedDueToEnobufs uint64
- // Number of alloc reqs aborted -ERESTARTSYS
- AllocationsAbortedDueToErestartsys uint64
- // Number of alloc reqs submitted
- AllocationOperationsSubmitted uint64
- // Number of alloc reqs waited for CPU time
- AllocationsWaitedForCPU uint64
- // Number of alloc reqs aborted due to object death
- AllocationsAbortedDueToObjectDeath uint64
- // Number of retrieval (read) requests seen
- RetrievalsReadRequests uint64
- // Number of successful retr reqs
- RetrievalsOk uint64
- // Number of retr reqs that waited on lookup completion
- RetrievalsWaitingLookupCompletion uint64
- // Number of retr reqs returned -ENODATA
- RetrievalsReturnedEnodata uint64
- // Number of retr reqs rejected -ENOBUFS
- RetrievalsRejectedDueToEnobufs uint64
- // Number of retr reqs aborted -ERESTARTSYS
- RetrievalsAbortedDueToErestartsys uint64
- // Number of retr reqs failed -ENOMEM
- RetrievalsFailedDueToEnomem uint64
- // Number of retr reqs submitted
- RetrievalsRequests uint64
- // Number of retr reqs waited for CPU time
- RetrievalsWaitingCPU uint64
- // Number of retr reqs aborted due to object death
- RetrievalsAbortedDueToObjectDeath uint64
- // Number of storage (write) requests seen
- StoreWriteRequests uint64
- // Number of successful store reqs
- StoreSuccessfulRequests uint64
- // Number of store reqs on a page already pending storage
- StoreRequestsOnPendingStorage uint64
- // Number of store reqs rejected -ENOBUFS
- StoreRequestsRejectedDueToEnobufs uint64
- // Number of store reqs failed -ENOMEM
- StoreRequestsFailedDueToEnomem uint64
- // Number of store reqs submitted
- StoreRequestsSubmitted uint64
- // Number of store reqs granted CPU time
- StoreRequestsRunning uint64
- // Number of pages given store req processing time
- StorePagesWithRequestsProcessing uint64
- // Number of store reqs deleted from tracking tree
- StoreRequestsDeleted uint64
- // Number of store reqs over store limit
- StoreRequestsOverStoreLimit uint64
- // Number of release reqs against pages with no pending store
- ReleaseRequestsAgainstPagesWithNoPendingStorage uint64
- // Number of release reqs against pages stored by time lock granted
- ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
- // Number of release reqs ignored due to in-progress store
- ReleaseRequestsIgnoredDueToInProgressStore uint64
- // Number of page stores cancelled due to release req
- PageStoresCancelledByReleaseRequests uint64
- VmscanWaiting uint64
- // Number of times async ops added to pending queues
- OpsPending uint64
- // Number of times async ops given CPU time
- OpsRunning uint64
- // Number of times async ops queued for processing
- OpsEnqueued uint64
- // Number of async ops cancelled
- OpsCancelled uint64
- // Number of async ops rejected due to object lookup/create failure
- OpsRejected uint64
- // Number of async ops initialised
- OpsInitialised uint64
- // Number of async ops queued for deferred release
- OpsDeferred uint64
- // Number of async ops released (should equal ini=N when idle)
- OpsReleased uint64
- // Number of deferred-release async ops garbage collected
- OpsGarbageCollected uint64
- // Number of in-progress alloc_object() cache ops
- CacheopAllocationsinProgress uint64
- // Number of in-progress lookup_object() cache ops
- CacheopLookupObjectInProgress uint64
- // Number of in-progress lookup_complete() cache ops
- CacheopLookupCompleteInPorgress uint64
- // Number of in-progress grab_object() cache ops
- CacheopGrabObjectInProgress uint64
- CacheopInvalidations uint64
- // Number of in-progress update_object() cache ops
- CacheopUpdateObjectInProgress uint64
- // Number of in-progress drop_object() cache ops
- CacheopDropObjectInProgress uint64
- // Number of in-progress put_object() cache ops
- CacheopPutObjectInProgress uint64
- // Number of in-progress attr_changed() cache ops
- CacheopAttributeChangeInProgress uint64
- // Number of in-progress sync_cache() cache ops
- CacheopSyncCacheInProgress uint64
- // Number of in-progress read_or_alloc_page() cache ops
- CacheopReadOrAllocPageInProgress uint64
- // Number of in-progress read_or_alloc_pages() cache ops
- CacheopReadOrAllocPagesInProgress uint64
- // Number of in-progress allocate_page() cache ops
- CacheopAllocatePageInProgress uint64
- // Number of in-progress allocate_pages() cache ops
- CacheopAllocatePagesInProgress uint64
- // Number of in-progress write_page() cache ops
- CacheopWritePagesInProgress uint64
- // Number of in-progress uncache_page() cache ops
- CacheopUncachePagesInProgress uint64
- // Number of in-progress dissociate_pages() cache ops
- CacheopDissociatePagesInProgress uint64
- // Number of object lookups/creations rejected due to lack of space
- CacheevLookupsAndCreationsRejectedLackSpace uint64
- // Number of stale objects deleted
- CacheevStaleObjectsDeleted uint64
- // Number of objects retired when relinquished
- CacheevRetiredWhenReliquished uint64
- // Number of objects culled
- CacheevObjectsCulled uint64
-}
-
-// Fscacheinfo returns information about current fscache statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt
-func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
- b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats"))
- if err != nil {
- return Fscacheinfo{}, err
- }
-
- m, err := parseFscacheinfo(bytes.NewReader(b))
- if err != nil {
- return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err)
- }
-
- return *m, nil
-}
-
-func setFSCacheFields(fields []string, setFields ...*uint64) error {
- var err error
- if len(fields) < len(setFields) {
- return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields))
- }
-
- for i := range setFields {
- *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
- var m Fscacheinfo
- s := bufio.NewScanner(r)
- for s.Scan() {
- fields := strings.Fields(s.Text())
- if len(fields) < 2 {
- return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text())
- }
-
- switch fields[0] {
- case "Cookies:":
- err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated,
- &m.SpecialCookiesAllocated)
- if err != nil {
- return &m, err
- }
- case "Objects:":
- err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure,
- &m.ObjectsAvailable, &m.ObjectsDead)
- if err != nil {
- return &m, err
- }
- case "ChkAux":
- err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck,
- &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete)
- if err != nil {
- return &m, err
- }
- case "Pages":
- err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen)
- if err != nil {
- return &m, err
- }
- case "Acquire:":
- err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent,
- &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError,
- &m.AcquireRequestsFailedDueToEnomem)
- if err != nil {
- return &m, err
- }
- case "Lookups:":
- err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive,
- &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed)
- if err != nil {
- return &m, err
- }
- case "Invals":
- err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning)
- if err != nil {
- return &m, err
- }
- case "Updates:":
- err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent,
- &m.UpdateRequestsRunning)
- if err != nil {
- return &m, err
- }
- case "Relinqs:":
- err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent,
- &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries)
- if err != nil {
- return &m, err
- }
- case "AttrChg:":
- err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued,
- &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps)
- if err != nil {
- return &m, err
- }
- case "Allocs":
- if strings.Split(fields[2], "=")[0] == "n" {
- err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests,
- &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys)
- if err != nil {
- return &m, err
- }
- } else {
- err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU,
- &m.AllocationsAbortedDueToObjectDeath)
- if err != nil {
- return &m, err
- }
- }
- case "Retrvls:":
- if strings.Split(fields[1], "=")[0] == "n" {
- err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion,
- &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys,
- &m.RetrievalsFailedDueToEnomem)
- if err != nil {
- return &m, err
- }
- } else {
- err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath)
- if err != nil {
- return &m, err
- }
- }
- case "Stores":
- if strings.Split(fields[2], "=")[0] == "n" {
- err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests,
- &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem)
- if err != nil {
- return &m, err
- }
- } else {
- err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning,
- &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit)
- if err != nil {
- return &m, err
- }
- }
- case "VmScan":
- err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage,
- &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore,
- &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting)
- if err != nil {
- return &m, err
- }
- case "Ops":
- if strings.Split(fields[2], "=")[0] == "pend" {
- err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected)
- if err != nil {
- return &m, err
- }
- } else {
- err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected)
- if err != nil {
- return &m, err
- }
- }
- case "CacheOp:":
- if strings.Split(fields[1], "=")[0] == "alo" {
- err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress,
- &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress)
- if err != nil {
- return &m, err
- }
- } else if strings.Split(fields[1], "=")[0] == "inv" {
- err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress,
- &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress,
- &m.CacheopSyncCacheInProgress)
- if err != nil {
- return &m, err
- }
- } else {
- err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress,
- &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress,
- &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress)
- if err != nil {
- return &m, err
- }
- }
- case "CacheEv:":
- err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted,
- &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled)
- if err != nil {
- return &m, err
- }
- }
- }
-
- return &m, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod
deleted file mode 100644
index ba6681f5217..00000000000
--- a/vendor/github.com/prometheus/procfs/go.mod
+++ /dev/null
@@ -1,9 +0,0 @@
-module github.com/prometheus/procfs
-
-go 1.13
-
-require (
- github.com/google/go-cmp v0.5.4
- golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
- golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
-)
diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum
deleted file mode 100644
index 7ceaf56b7d5..00000000000
--- a/vendor/github.com/prometheus/procfs/go.sum
+++ /dev/null
@@ -1,8 +0,0 @@
-github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
deleted file mode 100644
index 0040753b1c1..00000000000
--- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fs
-
-import (
- "fmt"
- "os"
- "path/filepath"
-)
-
-const (
- // DefaultProcMountPoint is the common mount point of the proc filesystem.
- DefaultProcMountPoint = "/proc"
-
- // DefaultSysMountPoint is the common mount point of the sys filesystem.
- DefaultSysMountPoint = "/sys"
-
- // DefaultConfigfsMountPoint is the common mount point of the configfs
- DefaultConfigfsMountPoint = "/sys/kernel/config"
-)
-
-// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
-// interface to kernel data structures.
-type FS string
-
-// NewFS returns a new FS mounted under the given mountPoint. It will error
-// if the mount point can't be read.
-func NewFS(mountPoint string) (FS, error) {
- info, err := os.Stat(mountPoint)
- if err != nil {
- return "", fmt.Errorf("could not read %q: %w", mountPoint, err)
- }
- if !info.IsDir() {
- return "", fmt.Errorf("mount point %q is not a directory", mountPoint)
- }
-
- return FS(mountPoint), nil
-}
-
-// Path appends the given path elements to the filesystem path, adding separators
-// as necessary.
-func (fs FS) Path(p ...string) string {
- return filepath.Join(append([]string{string(fs)}, p...)...)
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
deleted file mode 100644
index 22cb07a6bbb..00000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "io/ioutil"
- "strconv"
- "strings"
-)
-
-// ParseUint32s parses a slice of strings into a slice of uint32s.
-func ParseUint32s(ss []string) ([]uint32, error) {
- us := make([]uint32, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseUint(s, 10, 32)
- if err != nil {
- return nil, err
- }
-
- us = append(us, uint32(u))
- }
-
- return us, nil
-}
-
-// ParseUint64s parses a slice of strings into a slice of uint64s.
-func ParseUint64s(ss []string) ([]uint64, error) {
- us := make([]uint64, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- us = append(us, u)
- }
-
- return us, nil
-}
-
-// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
-func ParsePInt64s(ss []string) ([]*int64, error) {
- us := make([]*int64, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseInt(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- us = append(us, &u)
- }
-
- return us, nil
-}
-
-// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
-func ReadUintFromFile(path string) (uint64, error) {
- data, err := ioutil.ReadFile(path)
- if err != nil {
- return 0, err
- }
- return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
-}
-
-// ReadIntFromFile reads a file and attempts to parse a int64 from it.
-func ReadIntFromFile(path string) (int64, error) {
- data, err := ioutil.ReadFile(path)
- if err != nil {
- return 0, err
- }
- return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
-}
-
-// ParseBool parses a string into a boolean pointer.
-func ParseBool(b string) *bool {
- var truth bool
- switch b {
- case "enabled":
- truth = true
- case "disabled":
- truth = false
- default:
- return nil
- }
- return &truth
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
deleted file mode 100644
index 8051161b2aa..00000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "io"
- "io/ioutil"
- "os"
-)
-
-// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file.
-// This is similar to ioutil.ReadFile but without the call to os.Stat, because
-// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
-// Reads a max file size of 512kB. For files larger than this, a scanner
-// should be used.
-func ReadFileNoStat(filename string) ([]byte, error) {
- const maxBufferSize = 1024 * 512
-
- f, err := os.Open(filename)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- reader := io.LimitReader(f, maxBufferSize)
- return ioutil.ReadAll(reader)
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
deleted file mode 100644
index c07de0b6c9c..00000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build linux,!appengine
-
-package util
-
-import (
- "bytes"
- "os"
- "syscall"
-)
-
-// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
-// https://github.com/prometheus/node_exporter/pull/728/files
-//
-// Note that this function will not read files larger than 128 bytes.
-func SysReadFile(file string) (string, error) {
- f, err := os.Open(file)
- if err != nil {
- return "", err
- }
- defer f.Close()
-
- // On some machines, hwmon drivers are broken and return EAGAIN. This causes
- // Go's ioutil.ReadFile implementation to poll forever.
- //
- // Since we either want to read data or bail immediately, do the simplest
- // possible read using syscall directly.
- const sysFileBufferSize = 128
- b := make([]byte, sysFileBufferSize)
- n, err := syscall.Read(int(f.Fd()), b)
- if err != nil {
- return "", err
- }
-
- return string(bytes.TrimSpace(b[:n])), nil
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
deleted file mode 100644
index fe2355d3c6f..00000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "strconv"
-)
-
-// TODO(mdlayher): util packages are an anti-pattern and this should be moved
-// somewhere else that is more focused in the future.
-
-// A ValueParser enables parsing a single string into a variety of data types
-// in a concise and safe way. The Err method must be invoked after invoking
-// any other methods to ensure a value was successfully parsed.
-type ValueParser struct {
- v string
- err error
-}
-
-// NewValueParser creates a ValueParser using the input string.
-func NewValueParser(v string) *ValueParser {
- return &ValueParser{v: v}
-}
-
-// Int interprets the underlying value as an int and returns that value.
-func (vp *ValueParser) Int() int { return int(vp.int64()) }
-
-// PInt64 interprets the underlying value as an int64 and returns a pointer to
-// that value.
-func (vp *ValueParser) PInt64() *int64 {
- if vp.err != nil {
- return nil
- }
-
- v := vp.int64()
- return &v
-}
-
-// int64 interprets the underlying value as an int64 and returns that value.
-// TODO: export if/when necessary.
-func (vp *ValueParser) int64() int64 {
- if vp.err != nil {
- return 0
- }
-
- // A base value of zero makes ParseInt infer the correct base using the
- // string's prefix, if any.
- const base = 0
- v, err := strconv.ParseInt(vp.v, base, 64)
- if err != nil {
- vp.err = err
- return 0
- }
-
- return v
-}
-
-// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
-// that value.
-func (vp *ValueParser) PUInt64() *uint64 {
- if vp.err != nil {
- return nil
- }
-
- // A base value of zero makes ParseInt infer the correct base using the
- // string's prefix, if any.
- const base = 0
- v, err := strconv.ParseUint(vp.v, base, 64)
- if err != nil {
- vp.err = err
- return nil
- }
-
- return &v
-}
-
-// Err returns the last error, if any, encountered by the ValueParser.
-func (vp *ValueParser) Err() error {
- return vp.err
-}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
deleted file mode 100644
index 89e447746cf..00000000000
--- a/vendor/github.com/prometheus/procfs/ipvs.go
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "encoding/hex"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "os"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
-type IPVSStats struct {
- // Total count of connections.
- Connections uint64
- // Total incoming packages processed.
- IncomingPackets uint64
- // Total outgoing packages processed.
- OutgoingPackets uint64
- // Total incoming traffic.
- IncomingBytes uint64
- // Total outgoing traffic.
- OutgoingBytes uint64
-}
-
-// IPVSBackendStatus holds current metrics of one virtual / real address pair.
-type IPVSBackendStatus struct {
- // The local (virtual) IP address.
- LocalAddress net.IP
- // The remote (real) IP address.
- RemoteAddress net.IP
- // The local (virtual) port.
- LocalPort uint16
- // The remote (real) port.
- RemotePort uint16
- // The local firewall mark
- LocalMark string
- // The transport protocol (TCP, UDP).
- Proto string
- // The current number of active connections for this virtual/real address pair.
- ActiveConn uint64
- // The current number of inactive connections for this virtual/real address pair.
- InactConn uint64
- // The current weight of this virtual/real address pair.
- Weight uint64
-}
-
-// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
-func (fs FS) IPVSStats() (IPVSStats, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats"))
- if err != nil {
- return IPVSStats{}, err
- }
-
- return parseIPVSStats(bytes.NewReader(data))
-}
-
-// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
-func parseIPVSStats(r io.Reader) (IPVSStats, error) {
- var (
- statContent []byte
- statLines []string
- statFields []string
- stats IPVSStats
- )
-
- statContent, err := ioutil.ReadAll(r)
- if err != nil {
- return IPVSStats{}, err
- }
-
- statLines = strings.SplitN(string(statContent), "\n", 4)
- if len(statLines) != 4 {
- return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
- }
-
- statFields = strings.Fields(statLines[2])
- if len(statFields) != 5 {
- return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
- }
-
- stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
-
- return stats, nil
-}
-
-// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
-func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) {
- file, err := os.Open(fs.proc.Path("net/ip_vs"))
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- return parseIPVSBackendStatus(file)
-}
-
-func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
- var (
- status []IPVSBackendStatus
- scanner = bufio.NewScanner(file)
- proto string
- localMark string
- localAddress net.IP
- localPort uint16
- err error
- )
-
- for scanner.Scan() {
- fields := strings.Fields(scanner.Text())
- if len(fields) == 0 {
- continue
- }
- switch {
- case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
- continue
- case fields[0] == "TCP" || fields[0] == "UDP":
- if len(fields) < 2 {
- continue
- }
- proto = fields[0]
- localMark = ""
- localAddress, localPort, err = parseIPPort(fields[1])
- if err != nil {
- return nil, err
- }
- case fields[0] == "FWM":
- if len(fields) < 2 {
- continue
- }
- proto = fields[0]
- localMark = fields[1]
- localAddress = nil
- localPort = 0
- case fields[0] == "->":
- if len(fields) < 6 {
- continue
- }
- remoteAddress, remotePort, err := parseIPPort(fields[1])
- if err != nil {
- return nil, err
- }
- weight, err := strconv.ParseUint(fields[3], 10, 64)
- if err != nil {
- return nil, err
- }
- activeConn, err := strconv.ParseUint(fields[4], 10, 64)
- if err != nil {
- return nil, err
- }
- inactConn, err := strconv.ParseUint(fields[5], 10, 64)
- if err != nil {
- return nil, err
- }
- status = append(status, IPVSBackendStatus{
- LocalAddress: localAddress,
- LocalPort: localPort,
- LocalMark: localMark,
- RemoteAddress: remoteAddress,
- RemotePort: remotePort,
- Proto: proto,
- Weight: weight,
- ActiveConn: activeConn,
- InactConn: inactConn,
- })
- }
- }
- return status, nil
-}
-
-func parseIPPort(s string) (net.IP, uint16, error) {
- var (
- ip net.IP
- err error
- )
-
- switch len(s) {
- case 13:
- ip, err = hex.DecodeString(s[0:8])
- if err != nil {
- return nil, 0, err
- }
- case 46:
- ip = net.ParseIP(s[1:40])
- if ip == nil {
- return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
- }
- default:
- return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
- }
-
- portString := s[len(s)-4:]
- if len(portString) != 4 {
- return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
- }
- port, err := strconv.ParseUint(portString, 16, 16)
- if err != nil {
- return nil, 0, err
- }
-
- return ip, uint16(port), nil
-}
diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go
deleted file mode 100644
index da3a941d60b..00000000000
--- a/vendor/github.com/prometheus/procfs/kernel_random.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package procfs
-
-import (
- "os"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// KernelRandom contains information about to the kernel's random number generator.
-type KernelRandom struct {
- // EntropyAvaliable gives the available entropy, in bits.
- EntropyAvaliable *uint64
- // PoolSize gives the size of the entropy pool, in bits.
- PoolSize *uint64
- // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded.
- URandomMinReseedSeconds *uint64
- // WriteWakeupThreshold the number of bits of entropy below which we wake up processes
- // that do a select(2) or poll(2) for write access to /dev/random.
- WriteWakeupThreshold *uint64
- // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep
- // waiting for entropy from /dev/random.
- ReadWakeupThreshold *uint64
-}
-
-// KernelRandom returns values from /proc/sys/kernel/random.
-func (fs FS) KernelRandom() (KernelRandom, error) {
- random := KernelRandom{}
-
- for file, p := range map[string]**uint64{
- "entropy_avail": &random.EntropyAvaliable,
- "poolsize": &random.PoolSize,
- "urandom_min_reseed_secs": &random.URandomMinReseedSeconds,
- "write_wakeup_threshold": &random.WriteWakeupThreshold,
- "read_wakeup_threshold": &random.ReadWakeupThreshold,
- } {
- val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file))
- if os.IsNotExist(err) {
- continue
- }
- if err != nil {
- return random, err
- }
- *p = &val
- }
-
- return random, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go
deleted file mode 100644
index 0cce190ec22..00000000000
--- a/vendor/github.com/prometheus/procfs/loadavg.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// LoadAvg represents an entry in /proc/loadavg
-type LoadAvg struct {
- Load1 float64
- Load5 float64
- Load15 float64
-}
-
-// LoadAvg returns loadavg from /proc.
-func (fs FS) LoadAvg() (*LoadAvg, error) {
- path := fs.proc.Path("loadavg")
-
- data, err := util.ReadFileNoStat(path)
- if err != nil {
- return nil, err
- }
- return parseLoad(data)
-}
-
-// Parse /proc loadavg and return 1m, 5m and 15m.
-func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
- loads := make([]float64, 3)
- parts := strings.Fields(string(loadavgBytes))
- if len(parts) < 3 {
- return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes))
- }
-
- var err error
- for i, load := range parts[0:3] {
- loads[i], err = strconv.ParseFloat(load, 64)
- if err != nil {
- return nil, fmt.Errorf("could not parse load %q: %w", load, err)
- }
- }
- return &LoadAvg{
- Load1: loads[0],
- Load5: loads[1],
- Load15: loads[2],
- }, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
deleted file mode 100644
index 4c4493bfa50..00000000000
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ /dev/null
@@ -1,213 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "io/ioutil"
- "regexp"
- "strconv"
- "strings"
-)
-
-var (
- statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
- recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
- componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
-)
-
-// MDStat holds info parsed from /proc/mdstat.
-type MDStat struct {
- // Name of the device.
- Name string
- // activity-state of the device.
- ActivityState string
- // Number of active disks.
- DisksActive int64
- // Total number of disks the device requires.
- DisksTotal int64
- // Number of failed disks.
- DisksFailed int64
- // Spare disks in the device.
- DisksSpare int64
- // Number of blocks the device holds.
- BlocksTotal int64
- // Number of blocks on the device that are in sync.
- BlocksSynced int64
- // Name of md component devices
- Devices []string
-}
-
-// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
-// structs containing the relevant info. More information available here:
-// https://raid.wiki.kernel.org/index.php/Mdstat
-func (fs FS) MDStat() ([]MDStat, error) {
- data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
- if err != nil {
- return nil, err
- }
- mdstat, err := parseMDStat(data)
- if err != nil {
- return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err)
- }
- return mdstat, nil
-}
-
-// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of
-// structs containing the relevant info.
-func parseMDStat(mdStatData []byte) ([]MDStat, error) {
- mdStats := []MDStat{}
- lines := strings.Split(string(mdStatData), "\n")
-
- for i, line := range lines {
- if strings.TrimSpace(line) == "" || line[0] == ' ' ||
- strings.HasPrefix(line, "Personalities") ||
- strings.HasPrefix(line, "unused") {
- continue
- }
-
- deviceFields := strings.Fields(line)
- if len(deviceFields) < 3 {
- return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line)
- }
- mdName := deviceFields[0] // mdx
- state := deviceFields[2] // active or inactive
-
- if len(lines) <= i+3 {
- return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName)
- }
-
- // Failed disks have the suffix (F) & Spare disks have the suffix (S).
- fail := int64(strings.Count(line, "(F)"))
- spare := int64(strings.Count(line, "(S)"))
- active, total, size, err := evalStatusLine(lines[i], lines[i+1])
-
- if err != nil {
- return nil, fmt.Errorf("error parsing md device lines: %w", err)
- }
-
- syncLineIdx := i + 2
- if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
- syncLineIdx++
- }
-
- // If device is syncing at the moment, get the number of currently
- // synced bytes, otherwise that number equals the size of the device.
- syncedBlocks := size
- recovering := strings.Contains(lines[syncLineIdx], "recovery")
- resyncing := strings.Contains(lines[syncLineIdx], "resync")
- checking := strings.Contains(lines[syncLineIdx], "check")
-
- // Append recovery and resyncing state info.
- if recovering || resyncing || checking {
- if recovering {
- state = "recovering"
- } else if checking {
- state = "checking"
- } else {
- state = "resyncing"
- }
-
- // Handle case when resync=PENDING or resync=DELAYED.
- if strings.Contains(lines[syncLineIdx], "PENDING") ||
- strings.Contains(lines[syncLineIdx], "DELAYED") {
- syncedBlocks = 0
- } else {
- syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
- if err != nil {
- return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
- }
- }
- }
-
- mdStats = append(mdStats, MDStat{
- Name: mdName,
- ActivityState: state,
- DisksActive: active,
- DisksFailed: fail,
- DisksSpare: spare,
- DisksTotal: total,
- BlocksTotal: size,
- BlocksSynced: syncedBlocks,
- Devices: evalComponentDevices(deviceFields),
- })
- }
-
- return mdStats, nil
-}
-
-func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) {
-
- sizeStr := strings.Fields(statusLine)[0]
- size, err = strconv.ParseInt(sizeStr, 10, 64)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
- }
-
- if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
- // In the device deviceLine, only disks have a number associated with them in [].
- total = int64(strings.Count(deviceLine, "["))
- return total, total, size, nil
- }
-
- if strings.Contains(deviceLine, "inactive") {
- return 0, 0, size, nil
- }
-
- matches := statusLineRE.FindStringSubmatch(statusLine)
- if len(matches) != 4 {
- return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
- }
-
- total, err = strconv.ParseInt(matches[2], 10, 64)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
- }
-
- active, err = strconv.ParseInt(matches[3], 10, 64)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
- }
-
- return active, total, size, nil
-}
-
-func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
- matches := recoveryLineRE.FindStringSubmatch(recoveryLine)
- if len(matches) != 2 {
- return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
- }
-
- syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
- if err != nil {
- return 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
- }
-
- return syncedBlocks, nil
-}
-
-func evalComponentDevices(deviceFields []string) []string {
- mdComponentDevices := make([]string, 0)
- if len(deviceFields) > 3 {
- for _, field := range deviceFields[4:] {
- match := componentDeviceRE.FindStringSubmatch(field)
- if match == nil {
- continue
- }
- mdComponentDevices = append(mdComponentDevices, match[1])
- }
- }
-
- return mdComponentDevices
-}
diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go
deleted file mode 100644
index f65e174e57b..00000000000
--- a/vendor/github.com/prometheus/procfs/meminfo.go
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Meminfo represents memory statistics.
-type Meminfo struct {
- // Total usable ram (i.e. physical ram minus a few reserved
- // bits and the kernel binary code)
- MemTotal *uint64
- // The sum of LowFree+HighFree
- MemFree *uint64
- // An estimate of how much memory is available for starting
- // new applications, without swapping. Calculated from
- // MemFree, SReclaimable, the size of the file LRU lists, and
- // the low watermarks in each zone. The estimate takes into
- // account that the system needs some page cache to function
- // well, and that not all reclaimable slab will be
- // reclaimable, due to items being in use. The impact of those
- // factors will vary from system to system.
- MemAvailable *uint64
- // Relatively temporary storage for raw disk blocks shouldn't
- // get tremendously large (20MB or so)
- Buffers *uint64
- Cached *uint64
- // Memory that once was swapped out, is swapped back in but
- // still also is in the swapfile (if memory is needed it
- // doesn't need to be swapped out AGAIN because it is already
- // in the swapfile. This saves I/O)
- SwapCached *uint64
- // Memory that has been used more recently and usually not
- // reclaimed unless absolutely necessary.
- Active *uint64
- // Memory which has been less recently used. It is more
- // eligible to be reclaimed for other purposes
- Inactive *uint64
- ActiveAnon *uint64
- InactiveAnon *uint64
- ActiveFile *uint64
- InactiveFile *uint64
- Unevictable *uint64
- Mlocked *uint64
- // total amount of swap space available
- SwapTotal *uint64
- // Memory which has been evicted from RAM, and is temporarily
- // on the disk
- SwapFree *uint64
- // Memory which is waiting to get written back to the disk
- Dirty *uint64
- // Memory which is actively being written back to the disk
- Writeback *uint64
- // Non-file backed pages mapped into userspace page tables
- AnonPages *uint64
- // files which have been mapped, such as libraries
- Mapped *uint64
- Shmem *uint64
- // in-kernel data structures cache
- Slab *uint64
- // Part of Slab, that might be reclaimed, such as caches
- SReclaimable *uint64
- // Part of Slab, that cannot be reclaimed on memory pressure
- SUnreclaim *uint64
- KernelStack *uint64
- // amount of memory dedicated to the lowest level of page
- // tables.
- PageTables *uint64
- // NFS pages sent to the server, but not yet committed to
- // stable storage
- NFSUnstable *uint64
- // Memory used for block device "bounce buffers"
- Bounce *uint64
- // Memory used by FUSE for temporary writeback buffers
- WritebackTmp *uint64
- // Based on the overcommit ratio ('vm.overcommit_ratio'),
- // this is the total amount of memory currently available to
- // be allocated on the system. This limit is only adhered to
- // if strict overcommit accounting is enabled (mode 2 in
- // 'vm.overcommit_memory').
- // The CommitLimit is calculated with the following formula:
- // CommitLimit = ([total RAM pages] - [total huge TLB pages]) *
- // overcommit_ratio / 100 + [total swap pages]
- // For example, on a system with 1G of physical RAM and 7G
- // of swap with a `vm.overcommit_ratio` of 30 it would
- // yield a CommitLimit of 7.3G.
- // For more details, see the memory overcommit documentation
- // in vm/overcommit-accounting.
- CommitLimit *uint64
- // The amount of memory presently allocated on the system.
- // The committed memory is a sum of all of the memory which
- // has been allocated by processes, even if it has not been
- // "used" by them as of yet. A process which malloc()'s 1G
- // of memory, but only touches 300M of it will show up as
- // using 1G. This 1G is memory which has been "committed" to
- // by the VM and can be used at any time by the allocating
- // application. With strict overcommit enabled on the system
- // (mode 2 in 'vm.overcommit_memory'),allocations which would
- // exceed the CommitLimit (detailed above) will not be permitted.
- // This is useful if one needs to guarantee that processes will
- // not fail due to lack of memory once that memory has been
- // successfully allocated.
- CommittedAS *uint64
- // total size of vmalloc memory area
- VmallocTotal *uint64
- // amount of vmalloc area which is used
- VmallocUsed *uint64
- // largest contiguous block of vmalloc area which is free
- VmallocChunk *uint64
- HardwareCorrupted *uint64
- AnonHugePages *uint64
- ShmemHugePages *uint64
- ShmemPmdMapped *uint64
- CmaTotal *uint64
- CmaFree *uint64
- HugePagesTotal *uint64
- HugePagesFree *uint64
- HugePagesRsvd *uint64
- HugePagesSurp *uint64
- Hugepagesize *uint64
- DirectMap4k *uint64
- DirectMap2M *uint64
- DirectMap1G *uint64
-}
-
-// Meminfo returns an information about current kernel/system memory statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-func (fs FS) Meminfo() (Meminfo, error) {
- b, err := util.ReadFileNoStat(fs.proc.Path("meminfo"))
- if err != nil {
- return Meminfo{}, err
- }
-
- m, err := parseMemInfo(bytes.NewReader(b))
- if err != nil {
- return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err)
- }
-
- return *m, nil
-}
-
-func parseMemInfo(r io.Reader) (*Meminfo, error) {
- var m Meminfo
- s := bufio.NewScanner(r)
- for s.Scan() {
- // Each line has at least a name and value; we ignore the unit.
- fields := strings.Fields(s.Text())
- if len(fields) < 2 {
- return nil, fmt.Errorf("malformed meminfo line: %q", s.Text())
- }
-
- v, err := strconv.ParseUint(fields[1], 0, 64)
- if err != nil {
- return nil, err
- }
-
- switch fields[0] {
- case "MemTotal:":
- m.MemTotal = &v
- case "MemFree:":
- m.MemFree = &v
- case "MemAvailable:":
- m.MemAvailable = &v
- case "Buffers:":
- m.Buffers = &v
- case "Cached:":
- m.Cached = &v
- case "SwapCached:":
- m.SwapCached = &v
- case "Active:":
- m.Active = &v
- case "Inactive:":
- m.Inactive = &v
- case "Active(anon):":
- m.ActiveAnon = &v
- case "Inactive(anon):":
- m.InactiveAnon = &v
- case "Active(file):":
- m.ActiveFile = &v
- case "Inactive(file):":
- m.InactiveFile = &v
- case "Unevictable:":
- m.Unevictable = &v
- case "Mlocked:":
- m.Mlocked = &v
- case "SwapTotal:":
- m.SwapTotal = &v
- case "SwapFree:":
- m.SwapFree = &v
- case "Dirty:":
- m.Dirty = &v
- case "Writeback:":
- m.Writeback = &v
- case "AnonPages:":
- m.AnonPages = &v
- case "Mapped:":
- m.Mapped = &v
- case "Shmem:":
- m.Shmem = &v
- case "Slab:":
- m.Slab = &v
- case "SReclaimable:":
- m.SReclaimable = &v
- case "SUnreclaim:":
- m.SUnreclaim = &v
- case "KernelStack:":
- m.KernelStack = &v
- case "PageTables:":
- m.PageTables = &v
- case "NFS_Unstable:":
- m.NFSUnstable = &v
- case "Bounce:":
- m.Bounce = &v
- case "WritebackTmp:":
- m.WritebackTmp = &v
- case "CommitLimit:":
- m.CommitLimit = &v
- case "Committed_AS:":
- m.CommittedAS = &v
- case "VmallocTotal:":
- m.VmallocTotal = &v
- case "VmallocUsed:":
- m.VmallocUsed = &v
- case "VmallocChunk:":
- m.VmallocChunk = &v
- case "HardwareCorrupted:":
- m.HardwareCorrupted = &v
- case "AnonHugePages:":
- m.AnonHugePages = &v
- case "ShmemHugePages:":
- m.ShmemHugePages = &v
- case "ShmemPmdMapped:":
- m.ShmemPmdMapped = &v
- case "CmaTotal:":
- m.CmaTotal = &v
- case "CmaFree:":
- m.CmaFree = &v
- case "HugePages_Total:":
- m.HugePagesTotal = &v
- case "HugePages_Free:":
- m.HugePagesFree = &v
- case "HugePages_Rsvd:":
- m.HugePagesRsvd = &v
- case "HugePages_Surp:":
- m.HugePagesSurp = &v
- case "Hugepagesize:":
- m.Hugepagesize = &v
- case "DirectMap4k:":
- m.DirectMap4k = &v
- case "DirectMap2M:":
- m.DirectMap2M = &v
- case "DirectMap1G:":
- m.DirectMap1G = &v
- }
- }
-
- return &m, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go
deleted file mode 100644
index 59f4d505583..00000000000
--- a/vendor/github.com/prometheus/procfs/mountinfo.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// A MountInfo is a type that describes the details, options
-// for each mount, parsed from /proc/self/mountinfo.
-// The fields described in each entry of /proc/self/mountinfo
-// is described in the following man page.
-// http://man7.org/linux/man-pages/man5/proc.5.html
-type MountInfo struct {
- // Unique ID for the mount
- MountID int
- // The ID of the parent mount
- ParentID int
- // The value of `st_dev` for the files on this FS
- MajorMinorVer string
- // The pathname of the directory in the FS that forms
- // the root for this mount
- Root string
- // The pathname of the mount point relative to the root
- MountPoint string
- // Mount options
- Options map[string]string
- // Zero or more optional fields
- OptionalFields map[string]string
- // The Filesystem type
- FSType string
- // FS specific information or "none"
- Source string
- // Superblock options
- SuperOptions map[string]string
-}
-
-// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs.
-func parseMountInfo(info []byte) ([]*MountInfo, error) {
- mounts := []*MountInfo{}
- scanner := bufio.NewScanner(bytes.NewReader(info))
- for scanner.Scan() {
- mountString := scanner.Text()
- parsedMounts, err := parseMountInfoString(mountString)
- if err != nil {
- return nil, err
- }
- mounts = append(mounts, parsedMounts)
- }
-
- err := scanner.Err()
- return mounts, err
-}
-
-// Parses a mountinfo file line, and converts it to a MountInfo struct.
-// An important check here is to see if the hyphen separator, as if it does not exist,
-// it means that the line is malformed.
-func parseMountInfoString(mountString string) (*MountInfo, error) {
- var err error
-
- mountInfo := strings.Split(mountString, " ")
- mountInfoLength := len(mountInfo)
- if mountInfoLength < 10 {
- return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString)
- }
-
- if mountInfo[mountInfoLength-4] != "-" {
- return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4])
- }
-
- mount := &MountInfo{
- MajorMinorVer: mountInfo[2],
- Root: mountInfo[3],
- MountPoint: mountInfo[4],
- Options: mountOptionsParser(mountInfo[5]),
- OptionalFields: nil,
- FSType: mountInfo[mountInfoLength-3],
- Source: mountInfo[mountInfoLength-2],
- SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]),
- }
-
- mount.MountID, err = strconv.Atoi(mountInfo[0])
- if err != nil {
- return nil, fmt.Errorf("failed to parse mount ID")
- }
- mount.ParentID, err = strconv.Atoi(mountInfo[1])
- if err != nil {
- return nil, fmt.Errorf("failed to parse parent ID")
- }
- // Has optional fields, which is a space separated list of values.
- // Example: shared:2 master:7
- if mountInfo[6] != "" {
- mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
- if err != nil {
- return nil, err
- }
- }
- return mount, nil
-}
-
-// mountOptionsIsValidField checks a string against a valid list of optional fields keys.
-func mountOptionsIsValidField(s string) bool {
- switch s {
- case
- "shared",
- "master",
- "propagate_from",
- "unbindable":
- return true
- }
- return false
-}
-
-// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings.
-func mountOptionsParseOptionalFields(o []string) (map[string]string, error) {
- optionalFields := make(map[string]string)
- for _, field := range o {
- optionSplit := strings.SplitN(field, ":", 2)
- value := ""
- if len(optionSplit) == 2 {
- value = optionSplit[1]
- }
- if mountOptionsIsValidField(optionSplit[0]) {
- optionalFields[optionSplit[0]] = value
- }
- }
- return optionalFields, nil
-}
-
-// mountOptionsParser parses the mount options, superblock options.
-func mountOptionsParser(mountOptions string) map[string]string {
- opts := make(map[string]string)
- options := strings.Split(mountOptions, ",")
- for _, opt := range options {
- splitOption := strings.Split(opt, "=")
- if len(splitOption) < 2 {
- key := splitOption[0]
- opts[key] = ""
- } else {
- key, value := splitOption[0], splitOption[1]
- opts[key] = value
- }
- }
- return opts
-}
-
-// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`.
-func GetMounts() ([]*MountInfo, error) {
- data, err := util.ReadFileNoStat("/proc/self/mountinfo")
- if err != nil {
- return nil, err
- }
- return parseMountInfo(data)
-}
-
-// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`.
-func GetProcMounts(pid int) ([]*MountInfo, error) {
- data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid))
- if err != nil {
- return nil, err
- }
- return parseMountInfo(data)
-}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
deleted file mode 100644
index f7a828bb1da..00000000000
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ /dev/null
@@ -1,638 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-// While implementing parsing of /proc/[pid]/mountstats, this blog was used
-// heavily as a reference:
-// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
-//
-// Special thanks to Chris Siebenmann for all of his posts explaining the
-// various statistics available for NFS.
-
-import (
- "bufio"
- "fmt"
- "io"
- "strconv"
- "strings"
- "time"
-)
-
-// Constants shared between multiple functions.
-const (
- deviceEntryLen = 8
-
- fieldBytesLen = 8
- fieldEventsLen = 27
-
- statVersion10 = "1.0"
- statVersion11 = "1.1"
-
- fieldTransport10TCPLen = 10
- fieldTransport10UDPLen = 7
-
- fieldTransport11TCPLen = 13
- fieldTransport11UDPLen = 10
-)
-
-// A Mount is a device mount parsed from /proc/[pid]/mountstats.
-type Mount struct {
- // Name of the device.
- Device string
- // The mount point of the device.
- Mount string
- // The filesystem type used by the device.
- Type string
- // If available additional statistics related to this Mount.
- // Use a type assertion to determine if additional statistics are available.
- Stats MountStats
-}
-
-// A MountStats is a type which contains detailed statistics for a specific
-// type of Mount.
-type MountStats interface {
- mountStats()
-}
-
-// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
-type MountStatsNFS struct {
- // The version of statistics provided.
- StatVersion string
- // The mount options of the NFS mount.
- Opts map[string]string
- // The age of the NFS mount.
- Age time.Duration
- // Statistics related to byte counters for various operations.
- Bytes NFSBytesStats
- // Statistics related to various NFS event occurrences.
- Events NFSEventsStats
- // Statistics broken down by filesystem operation.
- Operations []NFSOperationStats
- // Statistics about the NFS RPC transport.
- Transport NFSTransportStats
-}
-
-// mountStats implements MountStats.
-func (m MountStatsNFS) mountStats() {}
-
-// A NFSBytesStats contains statistics about the number of bytes read and written
-// by an NFS client to and from an NFS server.
-type NFSBytesStats struct {
- // Number of bytes read using the read() syscall.
- Read uint64
- // Number of bytes written using the write() syscall.
- Write uint64
- // Number of bytes read using the read() syscall in O_DIRECT mode.
- DirectRead uint64
- // Number of bytes written using the write() syscall in O_DIRECT mode.
- DirectWrite uint64
- // Number of bytes read from the NFS server, in total.
- ReadTotal uint64
- // Number of bytes written to the NFS server, in total.
- WriteTotal uint64
- // Number of pages read directly via mmap()'d files.
- ReadPages uint64
- // Number of pages written directly via mmap()'d files.
- WritePages uint64
-}
-
-// A NFSEventsStats contains statistics about NFS event occurrences.
-type NFSEventsStats struct {
- // Number of times cached inode attributes are re-validated from the server.
- InodeRevalidate uint64
- // Number of times cached dentry nodes are re-validated from the server.
- DnodeRevalidate uint64
- // Number of times an inode cache is cleared.
- DataInvalidate uint64
- // Number of times cached inode attributes are invalidated.
- AttributeInvalidate uint64
- // Number of times files or directories have been open()'d.
- VFSOpen uint64
- // Number of times a directory lookup has occurred.
- VFSLookup uint64
- // Number of times permissions have been checked.
- VFSAccess uint64
- // Number of updates (and potential writes) to pages.
- VFSUpdatePage uint64
- // Number of pages read directly via mmap()'d files.
- VFSReadPage uint64
- // Number of times a group of pages have been read.
- VFSReadPages uint64
- // Number of pages written directly via mmap()'d files.
- VFSWritePage uint64
- // Number of times a group of pages have been written.
- VFSWritePages uint64
- // Number of times directory entries have been read with getdents().
- VFSGetdents uint64
- // Number of times attributes have been set on inodes.
- VFSSetattr uint64
- // Number of pending writes that have been forcefully flushed to the server.
- VFSFlush uint64
- // Number of times fsync() has been called on directories and files.
- VFSFsync uint64
- // Number of times locking has been attempted on a file.
- VFSLock uint64
- // Number of times files have been closed and released.
- VFSFileRelease uint64
- // Unknown. Possibly unused.
- CongestionWait uint64
- // Number of times files have been truncated.
- Truncation uint64
- // Number of times a file has been grown due to writes beyond its existing end.
- WriteExtension uint64
- // Number of times a file was removed while still open by another process.
- SillyRename uint64
- // Number of times the NFS server gave less data than expected while reading.
- ShortRead uint64
- // Number of times the NFS server wrote less data than expected while writing.
- ShortWrite uint64
- // Number of times the NFS server indicated EJUKEBOX; retrieving data from
- // offline storage.
- JukeboxDelay uint64
- // Number of NFS v4.1+ pNFS reads.
- PNFSRead uint64
- // Number of NFS v4.1+ pNFS writes.
- PNFSWrite uint64
-}
-
-// A NFSOperationStats contains statistics for a single operation.
-type NFSOperationStats struct {
- // The name of the operation.
- Operation string
- // Number of requests performed for this operation.
- Requests uint64
- // Number of times an actual RPC request has been transmitted for this operation.
- Transmissions uint64
- // Number of times a request has had a major timeout.
- MajorTimeouts uint64
- // Number of bytes sent for this operation, including RPC headers and payload.
- BytesSent uint64
- // Number of bytes received for this operation, including RPC headers and payload.
- BytesReceived uint64
- // Duration all requests spent queued for transmission before they were sent.
- CumulativeQueueMilliseconds uint64
- // Duration it took to get a reply back after the request was transmitted.
- CumulativeTotalResponseMilliseconds uint64
- // Duration from when a request was enqueued to when it was completely handled.
- CumulativeTotalRequestMilliseconds uint64
- // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
- Errors uint64
-}
-
-// A NFSTransportStats contains statistics for the NFS mount RPC requests and
-// responses.
-type NFSTransportStats struct {
- // The transport protocol used for the NFS mount.
- Protocol string
- // The local port used for the NFS mount.
- Port uint64
- // Number of times the client has had to establish a connection from scratch
- // to the NFS server.
- Bind uint64
- // Number of times the client has made a TCP connection to the NFS server.
- Connect uint64
- // Duration (in jiffies, a kernel internal unit of time) the NFS mount has
- // spent waiting for connections to the server to be established.
- ConnectIdleTime uint64
- // Duration since the NFS mount last saw any RPC traffic.
- IdleTimeSeconds uint64
- // Number of RPC requests for this mount sent to the NFS server.
- Sends uint64
- // Number of RPC responses for this mount received from the NFS server.
- Receives uint64
- // Number of times the NFS server sent a response with a transaction ID
- // unknown to this client.
- BadTransactionIDs uint64
- // A running counter, incremented on each request as the current difference
- // ebetween sends and receives.
- CumulativeActiveRequests uint64
- // A running counter, incremented on each request by the current backlog
- // queue size.
- CumulativeBacklog uint64
-
- // Stats below only available with stat version 1.1.
-
- // Maximum number of simultaneously active RPC requests ever used.
- MaximumRPCSlotsUsed uint64
- // A running counter, incremented on each request as the current size of the
- // sending queue.
- CumulativeSendingQueue uint64
- // A running counter, incremented on each request as the current size of the
- // pending queue.
- CumulativePendingQueue uint64
-}
-
-// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
-// of Mount structures containing detailed information about each mount.
-// If available, statistics for each mount are parsed as well.
-func parseMountStats(r io.Reader) ([]*Mount, error) {
- const (
- device = "device"
- statVersionPrefix = "statvers="
-
- nfs3Type = "nfs"
- nfs4Type = "nfs4"
- )
-
- var mounts []*Mount
-
- s := bufio.NewScanner(r)
- for s.Scan() {
- // Only look for device entries in this function
- ss := strings.Fields(string(s.Bytes()))
- if len(ss) == 0 || ss[0] != device {
- continue
- }
-
- m, err := parseMount(ss)
- if err != nil {
- return nil, err
- }
-
- // Does this mount also possess statistics information?
- if len(ss) > deviceEntryLen {
- // Only NFSv3 and v4 are supported for parsing statistics
- if m.Type != nfs3Type && m.Type != nfs4Type {
- return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
- }
-
- statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
-
- stats, err := parseMountStatsNFS(s, statVersion)
- if err != nil {
- return nil, err
- }
-
- m.Stats = stats
- }
-
- mounts = append(mounts, m)
- }
-
- return mounts, s.Err()
-}
-
-// parseMount parses an entry in /proc/[pid]/mountstats in the format:
-// device [device] mounted on [mount] with fstype [type]
-func parseMount(ss []string) (*Mount, error) {
- if len(ss) < deviceEntryLen {
- return nil, fmt.Errorf("invalid device entry: %v", ss)
- }
-
- // Check for specific words appearing at specific indices to ensure
- // the format is consistent with what we expect
- format := []struct {
- i int
- s string
- }{
- {i: 0, s: "device"},
- {i: 2, s: "mounted"},
- {i: 3, s: "on"},
- {i: 5, s: "with"},
- {i: 6, s: "fstype"},
- }
-
- for _, f := range format {
- if ss[f.i] != f.s {
- return nil, fmt.Errorf("invalid device entry: %v", ss)
- }
- }
-
- return &Mount{
- Device: ss[1],
- Mount: ss[4],
- Type: ss[7],
- }, nil
-}
-
-// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
-// related to NFS statistics.
-func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
- // Field indicators for parsing specific types of data
- const (
- fieldOpts = "opts:"
- fieldAge = "age:"
- fieldBytes = "bytes:"
- fieldEvents = "events:"
- fieldPerOpStats = "per-op"
- fieldTransport = "xprt:"
- )
-
- stats := &MountStatsNFS{
- StatVersion: statVersion,
- }
-
- for s.Scan() {
- ss := strings.Fields(string(s.Bytes()))
- if len(ss) == 0 {
- break
- }
-
- switch ss[0] {
- case fieldOpts:
- if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
- }
- if stats.Opts == nil {
- stats.Opts = map[string]string{}
- }
- for _, opt := range strings.Split(ss[1], ",") {
- split := strings.Split(opt, "=")
- if len(split) == 2 {
- stats.Opts[split[0]] = split[1]
- } else {
- stats.Opts[opt] = ""
- }
- }
- case fieldAge:
- if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
- }
- // Age integer is in seconds
- d, err := time.ParseDuration(ss[1] + "s")
- if err != nil {
- return nil, err
- }
-
- stats.Age = d
- case fieldBytes:
- if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
- }
- bstats, err := parseNFSBytesStats(ss[1:])
- if err != nil {
- return nil, err
- }
-
- stats.Bytes = *bstats
- case fieldEvents:
- if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
- }
- estats, err := parseNFSEventsStats(ss[1:])
- if err != nil {
- return nil, err
- }
-
- stats.Events = *estats
- case fieldTransport:
- if len(ss) < 3 {
- return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
- }
-
- tstats, err := parseNFSTransportStats(ss[1:], statVersion)
- if err != nil {
- return nil, err
- }
-
- stats.Transport = *tstats
- }
-
- // When encountering "per-operation statistics", we must break this
- // loop and parse them separately to ensure we can terminate parsing
- // before reaching another device entry; hence why this 'if' statement
- // is not just another switch case
- if ss[0] == fieldPerOpStats {
- break
- }
- }
-
- if err := s.Err(); err != nil {
- return nil, err
- }
-
- // NFS per-operation stats appear last before the next device entry
- perOpStats, err := parseNFSOperationStats(s)
- if err != nil {
- return nil, err
- }
-
- stats.Operations = perOpStats
-
- return stats, nil
-}
-
-// parseNFSBytesStats parses a NFSBytesStats line using an input set of
-// integer fields.
-func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
- if len(ss) != fieldBytesLen {
- return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
- }
-
- ns := make([]uint64, 0, fieldBytesLen)
- for _, s := range ss {
- n, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- ns = append(ns, n)
- }
-
- return &NFSBytesStats{
- Read: ns[0],
- Write: ns[1],
- DirectRead: ns[2],
- DirectWrite: ns[3],
- ReadTotal: ns[4],
- WriteTotal: ns[5],
- ReadPages: ns[6],
- WritePages: ns[7],
- }, nil
-}
-
-// parseNFSEventsStats parses a NFSEventsStats line using an input set of
-// integer fields.
-func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
- if len(ss) != fieldEventsLen {
- return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
- }
-
- ns := make([]uint64, 0, fieldEventsLen)
- for _, s := range ss {
- n, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- ns = append(ns, n)
- }
-
- return &NFSEventsStats{
- InodeRevalidate: ns[0],
- DnodeRevalidate: ns[1],
- DataInvalidate: ns[2],
- AttributeInvalidate: ns[3],
- VFSOpen: ns[4],
- VFSLookup: ns[5],
- VFSAccess: ns[6],
- VFSUpdatePage: ns[7],
- VFSReadPage: ns[8],
- VFSReadPages: ns[9],
- VFSWritePage: ns[10],
- VFSWritePages: ns[11],
- VFSGetdents: ns[12],
- VFSSetattr: ns[13],
- VFSFlush: ns[14],
- VFSFsync: ns[15],
- VFSLock: ns[16],
- VFSFileRelease: ns[17],
- CongestionWait: ns[18],
- Truncation: ns[19],
- WriteExtension: ns[20],
- SillyRename: ns[21],
- ShortRead: ns[22],
- ShortWrite: ns[23],
- JukeboxDelay: ns[24],
- PNFSRead: ns[25],
- PNFSWrite: ns[26],
- }, nil
-}
-
-// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
-// additional information about per-operation statistics until an empty
-// line is reached.
-func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
- const (
- // Minimum number of expected fields in each per-operation statistics set
- minFields = 9
- )
-
- var ops []NFSOperationStats
-
- for s.Scan() {
- ss := strings.Fields(string(s.Bytes()))
- if len(ss) == 0 {
- // Must break when reading a blank line after per-operation stats to
- // enable top-level function to parse the next device entry
- break
- }
-
- if len(ss) < minFields {
- return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
- }
-
- // Skip string operation name for integers
- ns := make([]uint64, 0, minFields-1)
- for _, st := range ss[1:] {
- n, err := strconv.ParseUint(st, 10, 64)
- if err != nil {
- return nil, err
- }
-
- ns = append(ns, n)
- }
-
- opStats := NFSOperationStats{
- Operation: strings.TrimSuffix(ss[0], ":"),
- Requests: ns[0],
- Transmissions: ns[1],
- MajorTimeouts: ns[2],
- BytesSent: ns[3],
- BytesReceived: ns[4],
- CumulativeQueueMilliseconds: ns[5],
- CumulativeTotalResponseMilliseconds: ns[6],
- CumulativeTotalRequestMilliseconds: ns[7],
- }
-
- if len(ns) > 8 {
- opStats.Errors = ns[8]
- }
-
- ops = append(ops, opStats)
- }
-
- return ops, s.Err()
-}
-
-// parseNFSTransportStats parses a NFSTransportStats line using an input set of
-// integer fields matched to a specific stats version.
-func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
- // Extract the protocol field. It is the only string value in the line
- protocol := ss[0]
- ss = ss[1:]
-
- switch statVersion {
- case statVersion10:
- var expectedLength int
- if protocol == "tcp" {
- expectedLength = fieldTransport10TCPLen
- } else if protocol == "udp" {
- expectedLength = fieldTransport10UDPLen
- } else {
- return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
- }
- if len(ss) != expectedLength {
- return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
- }
- case statVersion11:
- var expectedLength int
- if protocol == "tcp" {
- expectedLength = fieldTransport11TCPLen
- } else if protocol == "udp" {
- expectedLength = fieldTransport11UDPLen
- } else {
- return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
- }
- if len(ss) != expectedLength {
- return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
- }
- default:
- return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
- }
-
- // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
- // in a v1.0 response. Since the stat length is bigger for TCP stats, we use
- // the TCP length here.
- //
- // Note: slice length must be set to length of v1.1 stats to avoid a panic when
- // only v1.0 stats are present.
- // See: https://github.com/prometheus/node_exporter/issues/571.
- ns := make([]uint64, fieldTransport11TCPLen)
- for i, s := range ss {
- n, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- ns[i] = n
- }
-
- // The fields differ depending on the transport protocol (TCP or UDP)
- // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
- //
- // For the udp RPC transport there is no connection count, connect idle time,
- // or idle time (fields #3, #4, and #5); all other fields are the same. So
- // we set them to 0 here.
- if protocol == "udp" {
- ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
- }
-
- return &NFSTransportStats{
- Protocol: protocol,
- Port: ns[0],
- Bind: ns[1],
- Connect: ns[2],
- ConnectIdleTime: ns[3],
- IdleTimeSeconds: ns[4],
- Sends: ns[5],
- Receives: ns[6],
- BadTransactionIDs: ns[7],
- CumulativeActiveRequests: ns[8],
- CumulativeBacklog: ns[9],
- MaximumRPCSlotsUsed: ns[10],
- CumulativeSendingQueue: ns[11],
- CumulativePendingQueue: ns[12],
- }, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
deleted file mode 100644
index 9964a3600b4..00000000000
--- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
-// and contains netfilter conntrack statistics at one CPU core
-type ConntrackStatEntry struct {
- Entries uint64
- Found uint64
- Invalid uint64
- Ignore uint64
- Insert uint64
- InsertFailed uint64
- Drop uint64
- EarlyDrop uint64
- SearchRestart uint64
-}
-
-// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores
-func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
- return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
-}
-
-// Parses a slice of ConntrackStatEntries from the given filepath
-func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
- // This file is small and can be read with one syscall.
- b, err := util.ReadFileNoStat(path)
- if err != nil {
- // Do not wrap this error so the caller can detect os.IsNotExist and
- // similar conditions.
- return nil, err
- }
-
- stat, err := parseConntrackStat(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err)
- }
-
- return stat, nil
-}
-
-// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries
-func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
- var entries []ConntrackStatEntry
-
- scanner := bufio.NewScanner(r)
- scanner.Scan()
- for scanner.Scan() {
- fields := strings.Fields(scanner.Text())
- conntrackEntry, err := parseConntrackStatEntry(fields)
- if err != nil {
- return nil, err
- }
- entries = append(entries, *conntrackEntry)
- }
-
- return entries, nil
-}
-
-// Parses a ConntrackStatEntry from given array of fields
-func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
- if len(fields) != 17 {
- return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
- }
- entry := &ConntrackStatEntry{}
-
- entries, err := parseConntrackStatField(fields[0])
- if err != nil {
- return nil, err
- }
- entry.Entries = entries
-
- found, err := parseConntrackStatField(fields[2])
- if err != nil {
- return nil, err
- }
- entry.Found = found
-
- invalid, err := parseConntrackStatField(fields[4])
- if err != nil {
- return nil, err
- }
- entry.Invalid = invalid
-
- ignore, err := parseConntrackStatField(fields[5])
- if err != nil {
- return nil, err
- }
- entry.Ignore = ignore
-
- insert, err := parseConntrackStatField(fields[8])
- if err != nil {
- return nil, err
- }
- entry.Insert = insert
-
- insertFailed, err := parseConntrackStatField(fields[9])
- if err != nil {
- return nil, err
- }
- entry.InsertFailed = insertFailed
-
- drop, err := parseConntrackStatField(fields[10])
- if err != nil {
- return nil, err
- }
- entry.Drop = drop
-
- earlyDrop, err := parseConntrackStatField(fields[11])
- if err != nil {
- return nil, err
- }
- entry.EarlyDrop = earlyDrop
-
- searchRestart, err := parseConntrackStatField(fields[16])
- if err != nil {
- return nil, err
- }
- entry.SearchRestart = searchRestart
-
- return entry, nil
-}
-
-// Parses a uint64 from given hex in string
-func parseConntrackStatField(field string) (uint64, error) {
- val, err := strconv.ParseUint(field, 16, 64)
- if err != nil {
- return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
- }
- return val, err
-}
diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go
deleted file mode 100644
index 47a710befb9..00000000000
--- a/vendor/github.com/prometheus/procfs/net_dev.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "errors"
- "os"
- "sort"
- "strconv"
- "strings"
-)
-
-// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
-type NetDevLine struct {
- Name string `json:"name"` // The name of the interface.
- RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received.
- RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received.
- RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered.
- RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving.
- RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors.
- RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors.
- RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
- RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver.
- TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted.
- TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted.
- TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered.
- TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting.
- TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors.
- TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
- TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver.
- TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
-}
-
-// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
-// are interface names.
-type NetDev map[string]NetDevLine
-
-// NetDev returns kernel/system statistics read from /proc/net/dev.
-func (fs FS) NetDev() (NetDev, error) {
- return newNetDev(fs.proc.Path("net/dev"))
-}
-
-// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
-func (p Proc) NetDev() (NetDev, error) {
- return newNetDev(p.path("net/dev"))
-}
-
-// newNetDev creates a new NetDev from the contents of the given file.
-func newNetDev(file string) (NetDev, error) {
- f, err := os.Open(file)
- if err != nil {
- return NetDev{}, err
- }
- defer f.Close()
-
- netDev := NetDev{}
- s := bufio.NewScanner(f)
- for n := 0; s.Scan(); n++ {
- // Skip the 2 header lines.
- if n < 2 {
- continue
- }
-
- line, err := netDev.parseLine(s.Text())
- if err != nil {
- return netDev, err
- }
-
- netDev[line.Name] = *line
- }
-
- return netDev, s.Err()
-}
-
-// parseLine parses a single line from the /proc/net/dev file. Header lines
-// must be filtered prior to calling this method.
-func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
- parts := strings.SplitN(rawLine, ":", 2)
- if len(parts) != 2 {
- return nil, errors.New("invalid net/dev line, missing colon")
- }
- fields := strings.Fields(strings.TrimSpace(parts[1]))
-
- var err error
- line := &NetDevLine{}
-
- // Interface Name
- line.Name = strings.TrimSpace(parts[0])
- if line.Name == "" {
- return nil, errors.New("invalid net/dev line, empty interface name")
- }
-
- // RX
- line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
- if err != nil {
- return nil, err
- }
-
- // TX
- line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
- if err != nil {
- return nil, err
- }
-
- return line, nil
-}
-
-// Total aggregates the values across interfaces and returns a new NetDevLine.
-// The Name field will be a sorted comma separated list of interface names.
-func (netDev NetDev) Total() NetDevLine {
- total := NetDevLine{}
-
- names := make([]string, 0, len(netDev))
- for _, ifc := range netDev {
- names = append(names, ifc.Name)
- total.RxBytes += ifc.RxBytes
- total.RxPackets += ifc.RxPackets
- total.RxErrors += ifc.RxErrors
- total.RxDropped += ifc.RxDropped
- total.RxFIFO += ifc.RxFIFO
- total.RxFrame += ifc.RxFrame
- total.RxCompressed += ifc.RxCompressed
- total.RxMulticast += ifc.RxMulticast
- total.TxBytes += ifc.TxBytes
- total.TxPackets += ifc.TxPackets
- total.TxErrors += ifc.TxErrors
- total.TxDropped += ifc.TxDropped
- total.TxFIFO += ifc.TxFIFO
- total.TxCollisions += ifc.TxCollisions
- total.TxCarrier += ifc.TxCarrier
- total.TxCompressed += ifc.TxCompressed
- }
- sort.Strings(names)
- total.Name = strings.Join(names, ", ")
-
- return total
-}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
deleted file mode 100644
index ac01dd84753..00000000000
--- a/vendor/github.com/prometheus/procfs/net_ip_socket.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "encoding/hex"
- "fmt"
- "io"
- "net"
- "os"
- "strconv"
- "strings"
-)
-
-const (
- // readLimit is used by io.LimitReader while reading the content of the
- // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
- // as each line represents a single used socket.
- // In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
- // With e.g. 150 Byte per line and the maximum number of 65535,
- // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
- readLimit = 4294967296 // Byte -> 4 GiB
-)
-
-// this contains generic data structures for both udp and tcp sockets
-type (
- // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
- NetIPSocket []*netIPSocketLine
-
- // NetIPSocketSummary provides already computed values like the total queue lengths or
- // the total number of used sockets. In contrast to NetIPSocket it does not collect
- // the parsed lines into a slice.
- NetIPSocketSummary struct {
- // TxQueueLength shows the total queue length of all parsed tx_queue lengths.
- TxQueueLength uint64
- // RxQueueLength shows the total queue length of all parsed rx_queue lengths.
- RxQueueLength uint64
- // UsedSockets shows the total number of parsed lines representing the
- // number of used sockets.
- UsedSockets uint64
- }
-
- // netIPSocketLine represents the fields parsed from a single line
- // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
- // For the proc file format details, see https://linux.die.net/man/5/proc.
- netIPSocketLine struct {
- Sl uint64
- LocalAddr net.IP
- LocalPort uint64
- RemAddr net.IP
- RemPort uint64
- St uint64
- TxQueue uint64
- RxQueue uint64
- UID uint64
- }
-)
-
-func newNetIPSocket(file string) (NetIPSocket, error) {
- f, err := os.Open(file)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- var netIPSocket NetIPSocket
-
- lr := io.LimitReader(f, readLimit)
- s := bufio.NewScanner(lr)
- s.Scan() // skip first line with headers
- for s.Scan() {
- fields := strings.Fields(s.Text())
- line, err := parseNetIPSocketLine(fields)
- if err != nil {
- return nil, err
- }
- netIPSocket = append(netIPSocket, line)
- }
- if err := s.Err(); err != nil {
- return nil, err
- }
- return netIPSocket, nil
-}
-
-// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file.
-func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
- f, err := os.Open(file)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- var netIPSocketSummary NetIPSocketSummary
-
- lr := io.LimitReader(f, readLimit)
- s := bufio.NewScanner(lr)
- s.Scan() // skip first line with headers
- for s.Scan() {
- fields := strings.Fields(s.Text())
- line, err := parseNetIPSocketLine(fields)
- if err != nil {
- return nil, err
- }
- netIPSocketSummary.TxQueueLength += line.TxQueue
- netIPSocketSummary.RxQueueLength += line.RxQueue
- netIPSocketSummary.UsedSockets++
- }
- if err := s.Err(); err != nil {
- return nil, err
- }
- return &netIPSocketSummary, nil
-}
-
-// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order.
-
-func parseIP(hexIP string) (net.IP, error) {
- var byteIP []byte
- byteIP, err := hex.DecodeString(hexIP)
- if err != nil {
- return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP)
- }
- switch len(byteIP) {
- case 4:
- return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil
- case 16:
- i := net.IP{
- byteIP[3], byteIP[2], byteIP[1], byteIP[0],
- byteIP[7], byteIP[6], byteIP[5], byteIP[4],
- byteIP[11], byteIP[10], byteIP[9], byteIP[8],
- byteIP[15], byteIP[14], byteIP[13], byteIP[12],
- }
- return i, nil
- default:
- return nil, fmt.Errorf("Unable to parse IP %s", hexIP)
- }
-}
-
-// parseNetIPSocketLine parses a single line, represented by a list of fields.
-func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
- line := &netIPSocketLine{}
- if len(fields) < 8 {
- return nil, fmt.Errorf(
- "cannot parse net socket line as it has less then 8 columns %q",
- strings.Join(fields, " "),
- )
- }
- var err error // parse error
-
- // sl
- s := strings.Split(fields[0], ":")
- if len(s) != 2 {
- return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0])
- }
-
- if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
- return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err)
- }
- // local_address
- l := strings.Split(fields[1], ":")
- if len(l) != 2 {
- return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1])
- }
- if line.LocalAddr, err = parseIP(l[0]); err != nil {
- return nil, err
- }
- if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err)
- }
-
- // remote_address
- r := strings.Split(fields[2], ":")
- if len(r) != 2 {
- return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1])
- }
- if line.RemAddr, err = parseIP(r[0]); err != nil {
- return nil, err
- }
- if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err)
- }
-
- // st
- if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse st value in socket line: %w", err)
- }
-
- // tx_queue and rx_queue
- q := strings.Split(fields[4], ":")
- if len(q) != 2 {
- return nil, fmt.Errorf(
- "cannot parse tx/rx queues in socket line as it has a missing colon %q",
- fields[4],
- )
- }
- if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err)
- }
- if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err)
- }
-
- // uid
- if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
- return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
- }
-
- return line, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go
deleted file mode 100644
index 8c6de3791ba..00000000000
--- a/vendor/github.com/prometheus/procfs/net_protocols.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// NetProtocolStats stores the contents from /proc/net/protocols
-type NetProtocolStats map[string]NetProtocolStatLine
-
-// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
-// only care about the first six columns as the rest are not likely to change
-// and only serve to provide a set of capabilities for each protocol.
-type NetProtocolStatLine struct {
- Name string // 0 The name of the protocol
- Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock)
- Sockets int64 // 2 Number of sockets in use by this protocol
- Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol
- Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure.
- MaxHeader uint64 // 5 Protocol specific max header size
- Slab bool // 6 Indicates whether or not memory is allocated from the SLAB
- ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module
- Capabilities NetProtocolCapabilities
-}
-
-// NetProtocolCapabilities contains a list of capabilities for each protocol
-type NetProtocolCapabilities struct {
- Close bool // 8
- Connect bool // 9
- Disconnect bool // 10
- Accept bool // 11
- IoCtl bool // 12
- Init bool // 13
- Destroy bool // 14
- Shutdown bool // 15
- SetSockOpt bool // 16
- GetSockOpt bool // 17
- SendMsg bool // 18
- RecvMsg bool // 19
- SendPage bool // 20
- Bind bool // 21
- BacklogRcv bool // 22
- Hash bool // 23
- UnHash bool // 24
- GetPort bool // 25
- EnterMemoryPressure bool // 26
-}
-
-// NetProtocols reads stats from /proc/net/protocols and returns a map of
-// PortocolStatLine entries. As of this writing no official Linux Documentation
-// exists, however the source is fairly self-explanatory and the format seems
-// stable since its introduction in 2.6.12-rc2
-// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452
-// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586
-func (fs FS) NetProtocols() (NetProtocolStats, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols"))
- if err != nil {
- return NetProtocolStats{}, err
- }
- return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data)))
-}
-
-func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) {
- nps := NetProtocolStats{}
-
- // Skip the header line
- s.Scan()
-
- for s.Scan() {
- line, err := nps.parseLine(s.Text())
- if err != nil {
- return NetProtocolStats{}, err
- }
-
- nps[line.Name] = *line
- }
- return nps, nil
-}
-
-func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) {
- line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}}
- var err error
- const enabled = "yes"
- const disabled = "no"
-
- fields := strings.Fields(rawLine)
- line.Name = fields[0]
- line.Size, err = strconv.ParseUint(fields[1], 10, 64)
- if err != nil {
- return nil, err
- }
- line.Sockets, err = strconv.ParseInt(fields[2], 10, 64)
- if err != nil {
- return nil, err
- }
- line.Memory, err = strconv.ParseInt(fields[3], 10, 64)
- if err != nil {
- return nil, err
- }
- if fields[4] == enabled {
- line.Pressure = 1
- } else if fields[4] == disabled {
- line.Pressure = 0
- } else {
- line.Pressure = -1
- }
- line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
- if err != nil {
- return nil, err
- }
- if fields[6] == enabled {
- line.Slab = true
- } else if fields[6] == disabled {
- line.Slab = false
- } else {
- return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name)
- }
- line.ModuleName = fields[7]
-
- err = line.Capabilities.parseCapabilities(fields[8:])
- if err != nil {
- return nil, err
- }
-
- return line, nil
-}
-
-func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error {
- // The capabilities are all bools so we can loop over to map them
- capabilityFields := [...]*bool{
- &pc.Close,
- &pc.Connect,
- &pc.Disconnect,
- &pc.Accept,
- &pc.IoCtl,
- &pc.Init,
- &pc.Destroy,
- &pc.Shutdown,
- &pc.SetSockOpt,
- &pc.GetSockOpt,
- &pc.SendMsg,
- &pc.RecvMsg,
- &pc.SendPage,
- &pc.Bind,
- &pc.BacklogRcv,
- &pc.Hash,
- &pc.UnHash,
- &pc.GetPort,
- &pc.EnterMemoryPressure,
- }
-
- for i := 0; i < len(capabilities); i++ {
- if capabilities[i] == "y" {
- *capabilityFields[i] = true
- } else if capabilities[i] == "n" {
- *capabilityFields[i] = false
- } else {
- return fmt.Errorf("unable to parse capability block for protocol: position %d", i)
- }
- }
- return nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go
deleted file mode 100644
index e36f4872dd6..00000000000
--- a/vendor/github.com/prometheus/procfs/net_sockstat.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6,
-// respectively.
-type NetSockstat struct {
- // Used is non-nil for IPv4 sockstat results, but nil for IPv6.
- Used *int
- Protocols []NetSockstatProtocol
-}
-
-// A NetSockstatProtocol contains statistics about a given socket protocol.
-// Pointer fields indicate that the value may or may not be present on any
-// given protocol.
-type NetSockstatProtocol struct {
- Protocol string
- InUse int
- Orphan *int
- TW *int
- Alloc *int
- Mem *int
- Memory *int
-}
-
-// NetSockstat retrieves IPv4 socket statistics.
-func (fs FS) NetSockstat() (*NetSockstat, error) {
- return readSockstat(fs.proc.Path("net", "sockstat"))
-}
-
-// NetSockstat6 retrieves IPv6 socket statistics.
-//
-// If IPv6 is disabled on this kernel, the returned error can be checked with
-// os.IsNotExist.
-func (fs FS) NetSockstat6() (*NetSockstat, error) {
- return readSockstat(fs.proc.Path("net", "sockstat6"))
-}
-
-// readSockstat opens and parses a NetSockstat from the input file.
-func readSockstat(name string) (*NetSockstat, error) {
- // This file is small and can be read with one syscall.
- b, err := util.ReadFileNoStat(name)
- if err != nil {
- // Do not wrap this error so the caller can detect os.IsNotExist and
- // similar conditions.
- return nil, err
- }
-
- stat, err := parseSockstat(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err)
- }
-
- return stat, nil
-}
-
-// parseSockstat reads the contents of a sockstat file and parses a NetSockstat.
-func parseSockstat(r io.Reader) (*NetSockstat, error) {
- var stat NetSockstat
- s := bufio.NewScanner(r)
- for s.Scan() {
- // Expect a minimum of a protocol and one key/value pair.
- fields := strings.Split(s.Text(), " ")
- if len(fields) < 3 {
- return nil, fmt.Errorf("malformed sockstat line: %q", s.Text())
- }
-
- // The remaining fields are key/value pairs.
- kvs, err := parseSockstatKVs(fields[1:])
- if err != nil {
- return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err)
- }
-
- // The first field is the protocol. We must trim its colon suffix.
- proto := strings.TrimSuffix(fields[0], ":")
- switch proto {
- case "sockets":
- // Special case: IPv4 has a sockets "used" key/value pair that we
- // embed at the top level of the structure.
- used := kvs["used"]
- stat.Used = &used
- default:
- // Parse all other lines as individual protocols.
- nsp := parseSockstatProtocol(kvs)
- nsp.Protocol = proto
- stat.Protocols = append(stat.Protocols, nsp)
- }
- }
-
- if err := s.Err(); err != nil {
- return nil, err
- }
-
- return &stat, nil
-}
-
-// parseSockstatKVs parses a string slice into a map of key/value pairs.
-func parseSockstatKVs(kvs []string) (map[string]int, error) {
- if len(kvs)%2 != 0 {
- return nil, errors.New("odd number of fields in key/value pairs")
- }
-
- // Iterate two values at a time to gather key/value pairs.
- out := make(map[string]int, len(kvs)/2)
- for i := 0; i < len(kvs); i += 2 {
- vp := util.NewValueParser(kvs[i+1])
- out[kvs[i]] = vp.Int()
-
- if err := vp.Err(); err != nil {
- return nil, err
- }
- }
-
- return out, nil
-}
-
-// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map.
-func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol {
- var nsp NetSockstatProtocol
- for k, v := range kvs {
- // Capture the range variable to ensure we get unique pointers for
- // each of the optional fields.
- v := v
- switch k {
- case "inuse":
- nsp.InUse = v
- case "orphan":
- nsp.Orphan = &v
- case "tw":
- nsp.TW = &v
- case "alloc":
- nsp.Alloc = &v
- case "mem":
- nsp.Mem = &v
- case "memory":
- nsp.Memory = &v
- }
- }
-
- return nsp
-}
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
deleted file mode 100644
index 46f12c61d3e..00000000000
--- a/vendor/github.com/prometheus/procfs/net_softnet.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// For the proc file format details,
-// See:
-// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
-// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
-// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
-
-// SoftnetStat contains a single row of data from /proc/net/softnet_stat
-type SoftnetStat struct {
- // Number of processed packets
- Processed uint32
- // Number of dropped packets
- Dropped uint32
- // Number of times processing packets ran out of quota
- TimeSqueezed uint32
-}
-
-var softNetProcFile = "net/softnet_stat"
-
-// NetSoftnetStat reads data from /proc/net/softnet_stat.
-func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
- b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile))
- if err != nil {
- return nil, err
- }
-
- entries, err := parseSoftnet(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err)
- }
-
- return entries, nil
-}
-
-func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
- const minColumns = 9
-
- s := bufio.NewScanner(r)
-
- var stats []SoftnetStat
- for s.Scan() {
- columns := strings.Fields(s.Text())
- width := len(columns)
-
- if width < minColumns {
- return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
- }
-
- // We only parse the first three columns at the moment.
- us, err := parseHexUint32s(columns[0:3])
- if err != nil {
- return nil, err
- }
-
- stats = append(stats, SoftnetStat{
- Processed: us[0],
- Dropped: us[1],
- TimeSqueezed: us[2],
- })
- }
-
- return stats, nil
-}
-
-func parseHexUint32s(ss []string) ([]uint32, error) {
- us := make([]uint32, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseUint(s, 16, 32)
- if err != nil {
- return nil, err
- }
-
- us = append(us, uint32(u))
- }
-
- return us, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go
deleted file mode 100644
index 52776295572..00000000000
--- a/vendor/github.com/prometheus/procfs/net_tcp.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-type (
- // NetTCP represents the contents of /proc/net/tcp{,6} file without the header.
- NetTCP []*netIPSocketLine
-
- // NetTCPSummary provides already computed values like the total queue lengths or
- // the total number of used sockets. In contrast to NetTCP it does not collect
- // the parsed lines into a slice.
- NetTCPSummary NetIPSocketSummary
-)
-
-// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
-// read from /proc/net/tcp.
-func (fs FS) NetTCP() (NetTCP, error) {
- return newNetTCP(fs.proc.Path("net/tcp"))
-}
-
-// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
-// read from /proc/net/tcp6.
-func (fs FS) NetTCP6() (NetTCP, error) {
- return newNetTCP(fs.proc.Path("net/tcp6"))
-}
-
-// NetTCPSummary returns already computed statistics like the total queue lengths
-// for TCP datagrams read from /proc/net/tcp.
-func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
- return newNetTCPSummary(fs.proc.Path("net/tcp"))
-}
-
-// NetTCP6Summary returns already computed statistics like the total queue lengths
-// for TCP datagrams read from /proc/net/tcp6.
-func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
- return newNetTCPSummary(fs.proc.Path("net/tcp6"))
-}
-
-// newNetTCP creates a new NetTCP{,6} from the contents of the given file.
-func newNetTCP(file string) (NetTCP, error) {
- n, err := newNetIPSocket(file)
- n1 := NetTCP(n)
- return n1, err
-}
-
-func newNetTCPSummary(file string) (*NetTCPSummary, error) {
- n, err := newNetIPSocketSummary(file)
- if n == nil {
- return nil, err
- }
- n1 := NetTCPSummary(*n)
- return &n1, err
-}
diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go
deleted file mode 100644
index 9ac3daf2d4c..00000000000
--- a/vendor/github.com/prometheus/procfs/net_udp.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-type (
- // NetUDP represents the contents of /proc/net/udp{,6} file without the header.
- NetUDP []*netIPSocketLine
-
- // NetUDPSummary provides already computed values like the total queue lengths or
- // the total number of used sockets. In contrast to NetUDP it does not collect
- // the parsed lines into a slice.
- NetUDPSummary NetIPSocketSummary
-)
-
-// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
-// read from /proc/net/udp.
-func (fs FS) NetUDP() (NetUDP, error) {
- return newNetUDP(fs.proc.Path("net/udp"))
-}
-
-// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams
-// read from /proc/net/udp6.
-func (fs FS) NetUDP6() (NetUDP, error) {
- return newNetUDP(fs.proc.Path("net/udp6"))
-}
-
-// NetUDPSummary returns already computed statistics like the total queue lengths
-// for UDP datagrams read from /proc/net/udp.
-func (fs FS) NetUDPSummary() (*NetUDPSummary, error) {
- return newNetUDPSummary(fs.proc.Path("net/udp"))
-}
-
-// NetUDP6Summary returns already computed statistics like the total queue lengths
-// for UDP datagrams read from /proc/net/udp6.
-func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
- return newNetUDPSummary(fs.proc.Path("net/udp6"))
-}
-
-// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
-func newNetUDP(file string) (NetUDP, error) {
- n, err := newNetIPSocket(file)
- n1 := NetUDP(n)
- return n1, err
-}
-
-func newNetUDPSummary(file string) (*NetUDPSummary, error) {
- n, err := newNetIPSocketSummary(file)
- if n == nil {
- return nil, err
- }
- n1 := NetUDPSummary(*n)
- return &n1, err
-}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
deleted file mode 100644
index 98aa8e1c31c..00000000000
--- a/vendor/github.com/prometheus/procfs/net_unix.go
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strconv"
- "strings"
-)
-
-// For the proc file format details,
-// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
-// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
-
-// Constants for the various /proc/net/unix enumerations.
-// TODO: match against x/sys/unix or similar?
-const (
- netUnixTypeStream = 1
- netUnixTypeDgram = 2
- netUnixTypeSeqpacket = 5
-
- netUnixFlagDefault = 0
- netUnixFlagListen = 1 << 16
-
- netUnixStateUnconnected = 1
- netUnixStateConnecting = 2
- netUnixStateConnected = 3
- netUnixStateDisconnected = 4
-)
-
-// NetUNIXType is the type of the type field.
-type NetUNIXType uint64
-
-// NetUNIXFlags is the type of the flags field.
-type NetUNIXFlags uint64
-
-// NetUNIXState is the type of the state field.
-type NetUNIXState uint64
-
-// NetUNIXLine represents a line of /proc/net/unix.
-type NetUNIXLine struct {
- KernelPtr string
- RefCount uint64
- Protocol uint64
- Flags NetUNIXFlags
- Type NetUNIXType
- State NetUNIXState
- Inode uint64
- Path string
-}
-
-// NetUNIX holds the data read from /proc/net/unix.
-type NetUNIX struct {
- Rows []*NetUNIXLine
-}
-
-// NetUNIX returns data read from /proc/net/unix.
-func (fs FS) NetUNIX() (*NetUNIX, error) {
- return readNetUNIX(fs.proc.Path("net/unix"))
-}
-
-// readNetUNIX reads data in /proc/net/unix format from the specified file.
-func readNetUNIX(file string) (*NetUNIX, error) {
- // This file could be quite large and a streaming read is desirable versus
- // reading the entire contents at once.
- f, err := os.Open(file)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- return parseNetUNIX(f)
-}
-
-// parseNetUNIX creates a NetUnix structure from the incoming stream.
-func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
- // Begin scanning by checking for the existence of Inode.
- s := bufio.NewScanner(r)
- s.Scan()
-
- // From the man page of proc(5), it does not contain an Inode field,
- // but in actually it exists. This code works for both cases.
- hasInode := strings.Contains(s.Text(), "Inode")
-
- // Expect a minimum number of fields, but Inode and Path are optional:
- // Num RefCount Protocol Flags Type St Inode Path
- minFields := 6
- if hasInode {
- minFields++
- }
-
- var nu NetUNIX
- for s.Scan() {
- line := s.Text()
- item, err := nu.parseLine(line, hasInode, minFields)
- if err != nil {
- return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err)
- }
-
- nu.Rows = append(nu.Rows, item)
- }
-
- if err := s.Err(); err != nil {
- return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err)
- }
-
- return &nu, nil
-}
-
-func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
- fields := strings.Fields(line)
-
- l := len(fields)
- if l < min {
- return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
- }
-
- // Field offsets are as follows:
- // Num RefCount Protocol Flags Type St Inode Path
-
- kernelPtr := strings.TrimSuffix(fields[0], ":")
-
- users, err := u.parseUsers(fields[1])
- if err != nil {
- return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err)
- }
-
- flags, err := u.parseFlags(fields[3])
- if err != nil {
- return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err)
- }
-
- typ, err := u.parseType(fields[4])
- if err != nil {
- return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err)
- }
-
- state, err := u.parseState(fields[5])
- if err != nil {
- return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err)
- }
-
- var inode uint64
- if hasInode {
- inode, err = u.parseInode(fields[6])
- if err != nil {
- return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err)
- }
- }
-
- n := &NetUNIXLine{
- KernelPtr: kernelPtr,
- RefCount: users,
- Type: typ,
- Flags: flags,
- State: state,
- Inode: inode,
- }
-
- // Path field is optional.
- if l > min {
- // Path occurs at either index 6 or 7 depending on whether inode is
- // already present.
- pathIdx := 7
- if !hasInode {
- pathIdx--
- }
-
- n.Path = fields[pathIdx]
- }
-
- return n, nil
-}
-
-func (u NetUNIX) parseUsers(s string) (uint64, error) {
- return strconv.ParseUint(s, 16, 32)
-}
-
-func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
- typ, err := strconv.ParseUint(s, 16, 16)
- if err != nil {
- return 0, err
- }
-
- return NetUNIXType(typ), nil
-}
-
-func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
- flags, err := strconv.ParseUint(s, 16, 32)
- if err != nil {
- return 0, err
- }
-
- return NetUNIXFlags(flags), nil
-}
-
-func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
- st, err := strconv.ParseInt(s, 16, 8)
- if err != nil {
- return 0, err
- }
-
- return NetUNIXState(st), nil
-}
-
-func (u NetUNIX) parseInode(s string) (uint64, error) {
- return strconv.ParseUint(s, 10, 64)
-}
-
-func (t NetUNIXType) String() string {
- switch t {
- case netUnixTypeStream:
- return "stream"
- case netUnixTypeDgram:
- return "dgram"
- case netUnixTypeSeqpacket:
- return "seqpacket"
- }
- return "unknown"
-}
-
-func (f NetUNIXFlags) String() string {
- switch f {
- case netUnixFlagListen:
- return "listen"
- default:
- return "default"
- }
-}
-
-func (s NetUNIXState) String() string {
- switch s {
- case netUnixStateUnconnected:
- return "unconnected"
- case netUnixStateConnecting:
- return "connecting"
- case netUnixStateConnected:
- return "connected"
- case netUnixStateDisconnected:
- return "disconnected"
- }
- return "unknown"
-}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
deleted file mode 100644
index 28f696803f6..00000000000
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "os"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/fs"
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Proc provides information about a running process.
-type Proc struct {
- // The process ID.
- PID int
-
- fs fs.FS
-}
-
-// Procs represents a list of Proc structs.
-type Procs []Proc
-
-func (p Procs) Len() int { return len(p) }
-func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
-
-// Self returns a process for the current process read via /proc/self.
-func Self() (Proc, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return Proc{}, err
- }
- return fs.Self()
-}
-
-// NewProc returns a process for the given pid under /proc.
-func NewProc(pid int) (Proc, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return Proc{}, err
- }
- return fs.Proc(pid)
-}
-
-// AllProcs returns a list of all currently available processes under /proc.
-func AllProcs() (Procs, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return Procs{}, err
- }
- return fs.AllProcs()
-}
-
-// Self returns a process for the current process.
-func (fs FS) Self() (Proc, error) {
- p, err := os.Readlink(fs.proc.Path("self"))
- if err != nil {
- return Proc{}, err
- }
- pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
- if err != nil {
- return Proc{}, err
- }
- return fs.Proc(pid)
-}
-
-// NewProc returns a process for the given pid.
-//
-// Deprecated: use fs.Proc() instead
-func (fs FS) NewProc(pid int) (Proc, error) {
- return fs.Proc(pid)
-}
-
-// Proc returns a process for the given pid.
-func (fs FS) Proc(pid int) (Proc, error) {
- if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
- return Proc{}, err
- }
- return Proc{PID: pid, fs: fs.proc}, nil
-}
-
-// AllProcs returns a list of all currently available processes.
-func (fs FS) AllProcs() (Procs, error) {
- d, err := os.Open(fs.proc.Path())
- if err != nil {
- return Procs{}, err
- }
- defer d.Close()
-
- names, err := d.Readdirnames(-1)
- if err != nil {
- return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
- }
-
- p := Procs{}
- for _, n := range names {
- pid, err := strconv.ParseInt(n, 10, 64)
- if err != nil {
- continue
- }
- p = append(p, Proc{PID: int(pid), fs: fs.proc})
- }
-
- return p, nil
-}
-
-// CmdLine returns the command line of a process.
-func (p Proc) CmdLine() ([]string, error) {
- data, err := util.ReadFileNoStat(p.path("cmdline"))
- if err != nil {
- return nil, err
- }
-
- if len(data) < 1 {
- return []string{}, nil
- }
-
- return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
-}
-
-// Wchan returns the wchan (wait channel) of a process.
-func (p Proc) Wchan() (string, error) {
- f, err := os.Open(p.path("wchan"))
- if err != nil {
- return "", err
- }
- defer f.Close()
-
- data, err := ioutil.ReadAll(f)
- if err != nil {
- return "", err
- }
-
- wchan := string(data)
- if wchan == "" || wchan == "0" {
- return "", nil
- }
-
- return wchan, nil
-}
-
-// Comm returns the command name of a process.
-func (p Proc) Comm() (string, error) {
- data, err := util.ReadFileNoStat(p.path("comm"))
- if err != nil {
- return "", err
- }
-
- return strings.TrimSpace(string(data)), nil
-}
-
-// Executable returns the absolute path of the executable command of a process.
-func (p Proc) Executable() (string, error) {
- exe, err := os.Readlink(p.path("exe"))
- if os.IsNotExist(err) {
- return "", nil
- }
-
- return exe, err
-}
-
-// Cwd returns the absolute path to the current working directory of the process.
-func (p Proc) Cwd() (string, error) {
- wd, err := os.Readlink(p.path("cwd"))
- if os.IsNotExist(err) {
- return "", nil
- }
-
- return wd, err
-}
-
-// RootDir returns the absolute path to the process's root directory (as set by chroot)
-func (p Proc) RootDir() (string, error) {
- rdir, err := os.Readlink(p.path("root"))
- if os.IsNotExist(err) {
- return "", nil
- }
-
- return rdir, err
-}
-
-// FileDescriptors returns the currently open file descriptors of a process.
-func (p Proc) FileDescriptors() ([]uintptr, error) {
- names, err := p.fileDescriptors()
- if err != nil {
- return nil, err
- }
-
- fds := make([]uintptr, len(names))
- for i, n := range names {
- fd, err := strconv.ParseInt(n, 10, 32)
- if err != nil {
- return nil, fmt.Errorf("could not parse fd %q: %w", n, err)
- }
- fds[i] = uintptr(fd)
- }
-
- return fds, nil
-}
-
-// FileDescriptorTargets returns the targets of all file descriptors of a process.
-// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
-func (p Proc) FileDescriptorTargets() ([]string, error) {
- names, err := p.fileDescriptors()
- if err != nil {
- return nil, err
- }
-
- targets := make([]string, len(names))
-
- for i, name := range names {
- target, err := os.Readlink(p.path("fd", name))
- if err == nil {
- targets[i] = target
- }
- }
-
- return targets, nil
-}
-
-// FileDescriptorsLen returns the number of currently open file descriptors of
-// a process.
-func (p Proc) FileDescriptorsLen() (int, error) {
- fds, err := p.fileDescriptors()
- if err != nil {
- return 0, err
- }
-
- return len(fds), nil
-}
-
-// MountStats retrieves statistics and configuration for mount points in a
-// process's namespace.
-func (p Proc) MountStats() ([]*Mount, error) {
- f, err := os.Open(p.path("mountstats"))
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- return parseMountStats(f)
-}
-
-// MountInfo retrieves mount information for mount points in a
-// process's namespace.
-// It supplies information missing in `/proc/self/mounts` and
-// fixes various other problems with that file too.
-func (p Proc) MountInfo() ([]*MountInfo, error) {
- data, err := util.ReadFileNoStat(p.path("mountinfo"))
- if err != nil {
- return nil, err
- }
- return parseMountInfo(data)
-}
-
-func (p Proc) fileDescriptors() ([]string, error) {
- d, err := os.Open(p.path("fd"))
- if err != nil {
- return nil, err
- }
- defer d.Close()
-
- names, err := d.Readdirnames(-1)
- if err != nil {
- return nil, fmt.Errorf("could not read %q: %w", d.Name(), err)
- }
-
- return names, nil
-}
-
-func (p Proc) path(pa ...string) string {
- return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
-}
-
-// FileDescriptorsInfo retrieves information about all file descriptors of
-// the process.
-func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
- names, err := p.fileDescriptors()
- if err != nil {
- return nil, err
- }
-
- var fdinfos ProcFDInfos
-
- for _, n := range names {
- fdinfo, err := p.FDInfo(n)
- if err != nil {
- continue
- }
- fdinfos = append(fdinfos, *fdinfo)
- }
-
- return fdinfos, nil
-}
-
-// Schedstat returns task scheduling information for the process.
-func (p Proc) Schedstat() (ProcSchedstat, error) {
- contents, err := ioutil.ReadFile(p.path("schedstat"))
- if err != nil {
- return ProcSchedstat{}, err
- }
- return parseProcSchedstat(string(contents))
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
deleted file mode 100644
index 0094a13c05d..00000000000
--- a/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a
-// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
-// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
-// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
-// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
-// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID
-// in this hierarchy
-//
-// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
-type Cgroup struct {
- // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one
- // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number
- HierarchyID int
- // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For
- // Cgroups V2 this may be empty, as all active controllers use the same hierarchy
- Controllers []string
- // Path of this control group, relative to the mount point of the cgroupfs representing this specific
- // hierarchy
- Path string
-}
-
-// parseCgroupString parses each line of the /proc/[pid]/cgroup file
-// Line format is hierarchyID:[controller1,controller2]:path
-func parseCgroupString(cgroupStr string) (*Cgroup, error) {
- var err error
-
- fields := strings.SplitN(cgroupStr, ":", 3)
- if len(fields) < 3 {
- return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr)
- }
-
- cgroup := &Cgroup{
- Path: fields[2],
- Controllers: nil,
- }
- cgroup.HierarchyID, err = strconv.Atoi(fields[0])
- if err != nil {
- return nil, fmt.Errorf("failed to parse hierarchy ID")
- }
- if fields[1] != "" {
- ssNames := strings.Split(fields[1], ",")
- cgroup.Controllers = append(cgroup.Controllers, ssNames...)
- }
- return cgroup, nil
-}
-
-// parseCgroups reads each line of the /proc/[pid]/cgroup file
-func parseCgroups(data []byte) ([]Cgroup, error) {
- var cgroups []Cgroup
- scanner := bufio.NewScanner(bytes.NewReader(data))
- for scanner.Scan() {
- mountString := scanner.Text()
- parsedMounts, err := parseCgroupString(mountString)
- if err != nil {
- return nil, err
- }
- cgroups = append(cgroups, *parsedMounts)
- }
-
- err := scanner.Err()
- return cgroups, err
-}
-
-// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process
-// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
-// so the len of the returned struct is equal to the number of active hierarchies on this system
-func (p Proc) Cgroups() ([]Cgroup, error) {
- data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID))
- if err != nil {
- return nil, err
- }
- return parseCgroups(data)
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go
deleted file mode 100644
index 6134b3580c4..00000000000
--- a/vendor/github.com/prometheus/procfs/proc_environ.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Environ reads process environments from /proc//environ
-func (p Proc) Environ() ([]string, error) {
- environments := make([]string, 0)
-
- data, err := util.ReadFileNoStat(p.path("environ"))
- if err != nil {
- return environments, err
- }
-
- environments = strings.Split(string(data), "\000")
- if len(environments) > 0 {
- environments = environments[:len(environments)-1]
- }
-
- return environments, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
deleted file mode 100644
index cf63227f064..00000000000
--- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "regexp"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Regexp variables
-var (
- rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
- rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
- rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
- rInotify = regexp.MustCompile(`^inotify`)
- rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
-)
-
-// ProcFDInfo contains represents file descriptor information.
-type ProcFDInfo struct {
- // File descriptor
- FD string
- // File offset
- Pos string
- // File access mode and status flags
- Flags string
- // Mount point ID
- MntID string
- // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
- InotifyInfos []InotifyInfo
-}
-
-// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty.
-func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
- data, err := util.ReadFileNoStat(p.path("fdinfo", fd))
- if err != nil {
- return nil, err
- }
-
- var text, pos, flags, mntid string
- var inotify []InotifyInfo
-
- scanner := bufio.NewScanner(bytes.NewReader(data))
- for scanner.Scan() {
- text = scanner.Text()
- if rPos.MatchString(text) {
- pos = rPos.FindStringSubmatch(text)[1]
- } else if rFlags.MatchString(text) {
- flags = rFlags.FindStringSubmatch(text)[1]
- } else if rMntID.MatchString(text) {
- mntid = rMntID.FindStringSubmatch(text)[1]
- } else if rInotify.MatchString(text) {
- newInotify, err := parseInotifyInfo(text)
- if err != nil {
- return nil, err
- }
- inotify = append(inotify, *newInotify)
- }
- }
-
- i := &ProcFDInfo{
- FD: fd,
- Pos: pos,
- Flags: flags,
- MntID: mntid,
- InotifyInfos: inotify,
- }
-
- return i, nil
-}
-
-// InotifyInfo represents a single inotify line in the fdinfo file.
-type InotifyInfo struct {
- // Watch descriptor number
- WD string
- // Inode number
- Ino string
- // Device ID
- Sdev string
- // Mask of events being monitored
- Mask string
-}
-
-// InotifyInfo constructor. Only available on kernel 3.8+.
-func parseInotifyInfo(line string) (*InotifyInfo, error) {
- m := rInotifyParts.FindStringSubmatch(line)
- if len(m) >= 4 {
- var mask string
- if len(m) == 5 {
- mask = m[4]
- }
- i := &InotifyInfo{
- WD: m[1],
- Ino: m[2],
- Sdev: m[3],
- Mask: mask,
- }
- return i, nil
- }
- return nil, fmt.Errorf("invalid inode entry: %q", line)
-}
-
-// ProcFDInfos represents a list of ProcFDInfo structs.
-type ProcFDInfos []ProcFDInfo
-
-func (p ProcFDInfos) Len() int { return len(p) }
-func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
-
-// InotifyWatchLen returns the total number of inotify watches
-func (p ProcFDInfos) InotifyWatchLen() (int, error) {
- length := 0
- for _, f := range p {
- length += len(f.InotifyInfos)
- }
-
- return length, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
deleted file mode 100644
index 776f3497173..00000000000
--- a/vendor/github.com/prometheus/procfs/proc_io.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// ProcIO models the content of /proc//io.
-type ProcIO struct {
- // Chars read.
- RChar uint64
- // Chars written.
- WChar uint64
- // Read syscalls.
- SyscR uint64
- // Write syscalls.
- SyscW uint64
- // Bytes read.
- ReadBytes uint64
- // Bytes written.
- WriteBytes uint64
- // Bytes written, but taking into account truncation. See
- // Documentation/filesystems/proc.txt in the kernel sources for
- // detailed explanation.
- CancelledWriteBytes int64
-}
-
-// IO creates a new ProcIO instance from a given Proc instance.
-func (p Proc) IO() (ProcIO, error) {
- pio := ProcIO{}
-
- data, err := util.ReadFileNoStat(p.path("io"))
- if err != nil {
- return pio, err
- }
-
- ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
- "read_bytes: %d\nwrite_bytes: %d\n" +
- "cancelled_write_bytes: %d\n"
-
- _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
- &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
-
- return pio, err
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
deleted file mode 100644
index dd20f198a30..00000000000
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "os"
- "regexp"
- "strconv"
-)
-
-// ProcLimits represents the soft limits for each of the process's resource
-// limits. For more information see getrlimit(2):
-// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
-type ProcLimits struct {
- // CPU time limit in seconds.
- CPUTime uint64
- // Maximum size of files that the process may create.
- FileSize uint64
- // Maximum size of the process's data segment (initialized data,
- // uninitialized data, and heap).
- DataSize uint64
- // Maximum size of the process stack in bytes.
- StackSize uint64
- // Maximum size of a core file.
- CoreFileSize uint64
- // Limit of the process's resident set in pages.
- ResidentSet uint64
- // Maximum number of processes that can be created for the real user ID of
- // the calling process.
- Processes uint64
- // Value one greater than the maximum file descriptor number that can be
- // opened by this process.
- OpenFiles uint64
- // Maximum number of bytes of memory that may be locked into RAM.
- LockedMemory uint64
- // Maximum size of the process's virtual memory address space in bytes.
- AddressSpace uint64
- // Limit on the combined number of flock(2) locks and fcntl(2) leases that
- // this process may establish.
- FileLocks uint64
- // Limit of signals that may be queued for the real user ID of the calling
- // process.
- PendingSignals uint64
- // Limit on the number of bytes that can be allocated for POSIX message
- // queues for the real user ID of the calling process.
- MsqqueueSize uint64
- // Limit of the nice priority set using setpriority(2) or nice(2).
- NicePriority uint64
- // Limit of the real-time priority set using sched_setscheduler(2) or
- // sched_setparam(2).
- RealtimePriority uint64
- // Limit (in microseconds) on the amount of CPU time that a process
- // scheduled under a real-time scheduling policy may consume without making
- // a blocking system call.
- RealtimeTimeout uint64
-}
-
-const (
- limitsFields = 4
- limitsUnlimited = "unlimited"
-)
-
-var (
- limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`)
-)
-
-// NewLimits returns the current soft limits of the process.
-//
-// Deprecated: use p.Limits() instead
-func (p Proc) NewLimits() (ProcLimits, error) {
- return p.Limits()
-}
-
-// Limits returns the current soft limits of the process.
-func (p Proc) Limits() (ProcLimits, error) {
- f, err := os.Open(p.path("limits"))
- if err != nil {
- return ProcLimits{}, err
- }
- defer f.Close()
-
- var (
- l = ProcLimits{}
- s = bufio.NewScanner(f)
- )
-
- s.Scan() // Skip limits header
-
- for s.Scan() {
- //fields := limitsMatch.Split(s.Text(), limitsFields)
- fields := limitsMatch.FindStringSubmatch(s.Text())
- if len(fields) != limitsFields {
- return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text())
- }
-
- switch fields[1] {
- case "Max cpu time":
- l.CPUTime, err = parseUint(fields[2])
- case "Max file size":
- l.FileSize, err = parseUint(fields[2])
- case "Max data size":
- l.DataSize, err = parseUint(fields[2])
- case "Max stack size":
- l.StackSize, err = parseUint(fields[2])
- case "Max core file size":
- l.CoreFileSize, err = parseUint(fields[2])
- case "Max resident set":
- l.ResidentSet, err = parseUint(fields[2])
- case "Max processes":
- l.Processes, err = parseUint(fields[2])
- case "Max open files":
- l.OpenFiles, err = parseUint(fields[2])
- case "Max locked memory":
- l.LockedMemory, err = parseUint(fields[2])
- case "Max address space":
- l.AddressSpace, err = parseUint(fields[2])
- case "Max file locks":
- l.FileLocks, err = parseUint(fields[2])
- case "Max pending signals":
- l.PendingSignals, err = parseUint(fields[2])
- case "Max msgqueue size":
- l.MsqqueueSize, err = parseUint(fields[2])
- case "Max nice priority":
- l.NicePriority, err = parseUint(fields[2])
- case "Max realtime priority":
- l.RealtimePriority, err = parseUint(fields[2])
- case "Max realtime timeout":
- l.RealtimeTimeout, err = parseUint(fields[2])
- }
- if err != nil {
- return ProcLimits{}, err
- }
- }
-
- return l, s.Err()
-}
-
-func parseUint(s string) (uint64, error) {
- if s == limitsUnlimited {
- return 18446744073709551615, nil
- }
- i, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return 0, fmt.Errorf("couldn't parse value %q: %w", s, err)
- }
- return i, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go
deleted file mode 100644
index 1d7772d516a..00000000000
--- a/vendor/github.com/prometheus/procfs/proc_maps.go
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "os"
- "strconv"
- "strings"
-
- "golang.org/x/sys/unix"
-)
-
-// ProcMapPermissions contains permission settings read from /proc/[pid]/maps
-type ProcMapPermissions struct {
- // mapping has the [R]ead flag set
- Read bool
- // mapping has the [W]rite flag set
- Write bool
- // mapping has the [X]ecutable flag set
- Execute bool
- // mapping has the [S]hared flag set
- Shared bool
- // mapping is marked as [P]rivate (copy on write)
- Private bool
-}
-
-// ProcMap contains the process memory-mappings of the process,
-// read from /proc/[pid]/maps
-type ProcMap struct {
- // The start address of current mapping.
- StartAddr uintptr
- // The end address of the current mapping
- EndAddr uintptr
- // The permissions for this mapping
- Perms *ProcMapPermissions
- // The current offset into the file/fd (e.g., shared libs)
- Offset int64
- // Device owner of this mapping (major:minor) in Mkdev format.
- Dev uint64
- // The inode of the device above
- Inode uint64
- // The file or psuedofile (or empty==anonymous)
- Pathname string
-}
-
-// parseDevice parses the device token of a line and converts it to a dev_t
-// (mkdev) like structure.
-func parseDevice(s string) (uint64, error) {
- toks := strings.Split(s, ":")
- if len(toks) < 2 {
- return 0, fmt.Errorf("unexpected number of fields")
- }
-
- major, err := strconv.ParseUint(toks[0], 16, 0)
- if err != nil {
- return 0, err
- }
-
- minor, err := strconv.ParseUint(toks[1], 16, 0)
- if err != nil {
- return 0, err
- }
-
- return unix.Mkdev(uint32(major), uint32(minor)), nil
-}
-
-// parseAddress just converts a hex-string to a uintptr
-func parseAddress(s string) (uintptr, error) {
- a, err := strconv.ParseUint(s, 16, 0)
- if err != nil {
- return 0, err
- }
-
- return uintptr(a), nil
-}
-
-// parseAddresses parses the start-end address
-func parseAddresses(s string) (uintptr, uintptr, error) {
- toks := strings.Split(s, "-")
- if len(toks) < 2 {
- return 0, 0, fmt.Errorf("invalid address")
- }
-
- saddr, err := parseAddress(toks[0])
- if err != nil {
- return 0, 0, err
- }
-
- eaddr, err := parseAddress(toks[1])
- if err != nil {
- return 0, 0, err
- }
-
- return saddr, eaddr, nil
-}
-
-// parsePermissions parses a token and returns any that are set.
-func parsePermissions(s string) (*ProcMapPermissions, error) {
- if len(s) < 4 {
- return nil, fmt.Errorf("invalid permissions token")
- }
-
- perms := ProcMapPermissions{}
- for _, ch := range s {
- switch ch {
- case 'r':
- perms.Read = true
- case 'w':
- perms.Write = true
- case 'x':
- perms.Execute = true
- case 'p':
- perms.Private = true
- case 's':
- perms.Shared = true
- }
- }
-
- return &perms, nil
-}
-
-// parseProcMap will attempt to parse a single line within a proc/[pid]/maps
-// buffer.
-func parseProcMap(text string) (*ProcMap, error) {
- fields := strings.Fields(text)
- if len(fields) < 5 {
- return nil, fmt.Errorf("truncated procmap entry")
- }
-
- saddr, eaddr, err := parseAddresses(fields[0])
- if err != nil {
- return nil, err
- }
-
- perms, err := parsePermissions(fields[1])
- if err != nil {
- return nil, err
- }
-
- offset, err := strconv.ParseInt(fields[2], 16, 0)
- if err != nil {
- return nil, err
- }
-
- device, err := parseDevice(fields[3])
- if err != nil {
- return nil, err
- }
-
- inode, err := strconv.ParseUint(fields[4], 10, 0)
- if err != nil {
- return nil, err
- }
-
- pathname := ""
-
- if len(fields) >= 5 {
- pathname = strings.Join(fields[5:], " ")
- }
-
- return &ProcMap{
- StartAddr: saddr,
- EndAddr: eaddr,
- Perms: perms,
- Offset: offset,
- Dev: device,
- Inode: inode,
- Pathname: pathname,
- }, nil
-}
-
-// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the
-// process.
-func (p Proc) ProcMaps() ([]*ProcMap, error) {
- file, err := os.Open(p.path("maps"))
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- maps := []*ProcMap{}
- scan := bufio.NewScanner(file)
-
- for scan.Scan() {
- m, err := parseProcMap(scan.Text())
- if err != nil {
- return nil, err
- }
-
- maps = append(maps, m)
- }
-
- return maps, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
deleted file mode 100644
index 391b4cbd11b..00000000000
--- a/vendor/github.com/prometheus/procfs/proc_ns.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "os"
- "strconv"
- "strings"
-)
-
-// Namespace represents a single namespace of a process.
-type Namespace struct {
- Type string // Namespace type.
- Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match.
-}
-
-// Namespaces contains all of the namespaces that the process is contained in.
-type Namespaces map[string]Namespace
-
-// Namespaces reads from /proc//ns/* to get the namespaces of which the
-// process is a member.
-func (p Proc) Namespaces() (Namespaces, error) {
- d, err := os.Open(p.path("ns"))
- if err != nil {
- return nil, err
- }
- defer d.Close()
-
- names, err := d.Readdirnames(-1)
- if err != nil {
- return nil, fmt.Errorf("failed to read contents of ns dir: %w", err)
- }
-
- ns := make(Namespaces, len(names))
- for _, name := range names {
- target, err := os.Readlink(p.path("ns", name))
- if err != nil {
- return nil, err
- }
-
- fields := strings.SplitN(target, ":", 2)
- if len(fields) != 2 {
- return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target)
- }
-
- typ := fields[0]
- inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
- if err != nil {
- return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err)
- }
-
- ns[name] = Namespace{typ, uint32(inode)}
- }
-
- return ns, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go
deleted file mode 100644
index dc6c14f0a4c..00000000000
--- a/vendor/github.com/prometheus/procfs/proc_psi.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-// The PSI / pressure interface is described at
-// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt
-// Each resource (cpu, io, memory, ...) is exposed as a single file.
-// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure.
-// Each line contains several averages (over n seconds) and a total in µs.
-//
-// Example io pressure file:
-// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362
-// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
-
-// PSILine is a single line of values as returned by /proc/pressure/*
-// The Avg entries are averages over n seconds, as a percentage
-// The Total line is in microseconds
-type PSILine struct {
- Avg10 float64
- Avg60 float64
- Avg300 float64
- Total uint64
-}
-
-// PSIStats represent pressure stall information from /proc/pressure/*
-// Some indicates the share of time in which at least some tasks are stalled
-// Full indicates the share of time in which all non-idle tasks are stalled simultaneously
-type PSIStats struct {
- Some *PSILine
- Full *PSILine
-}
-
-// PSIStatsForResource reads pressure stall information for the specified
-// resource from /proc/pressure/. At time of writing this can be
-// either "cpu", "memory" or "io".
-func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
- if err != nil {
- return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err)
- }
-
- return parsePSIStats(resource, bytes.NewReader(data))
-}
-
-// parsePSIStats parses the specified file for pressure stall information
-func parsePSIStats(resource string, r io.Reader) (PSIStats, error) {
- psiStats := PSIStats{}
-
- scanner := bufio.NewScanner(r)
- for scanner.Scan() {
- l := scanner.Text()
- prefix := strings.Split(l, " ")[0]
- switch prefix {
- case "some":
- psi := PSILine{}
- _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
- if err != nil {
- return PSIStats{}, err
- }
- psiStats.Some = &psi
- case "full":
- psi := PSILine{}
- _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
- if err != nil {
- return PSIStats{}, err
- }
- psiStats.Full = &psi
- default:
- // If we encounter a line with an unknown prefix, ignore it and move on
- // Should new measurement types be added in the future we'll simply ignore them instead
- // of erroring on retrieval
- continue
- }
- }
-
- return psiStats, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
deleted file mode 100644
index a576a720a44..00000000000
--- a/vendor/github.com/prometheus/procfs/proc_smaps.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package procfs
-
-import (
- "bufio"
- "errors"
- "fmt"
- "os"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-var (
- // match the header line before each mapped zone in /proc/pid/smaps
- procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
-)
-
-type ProcSMapsRollup struct {
- // Amount of the mapping that is currently resident in RAM
- Rss uint64
- // Process's proportional share of this mapping
- Pss uint64
- // Size in bytes of clean shared pages
- SharedClean uint64
- // Size in bytes of dirty shared pages
- SharedDirty uint64
- // Size in bytes of clean private pages
- PrivateClean uint64
- // Size in bytes of dirty private pages
- PrivateDirty uint64
- // Amount of memory currently marked as referenced or accessed
- Referenced uint64
- // Amount of memory that does not belong to any file
- Anonymous uint64
- // Amount would-be-anonymous memory currently on swap
- Swap uint64
- // Process's proportional memory on swap
- SwapPss uint64
-}
-
-// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the
-// process.
-//
-// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will
-// we read and summed.
-func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) {
- data, err := util.ReadFileNoStat(p.path("smaps_rollup"))
- if err != nil && os.IsNotExist(err) {
- return p.procSMapsRollupManual()
- }
- if err != nil {
- return ProcSMapsRollup{}, err
- }
-
- lines := strings.Split(string(data), "\n")
- smaps := ProcSMapsRollup{}
-
- // skip first line which don't contains information we need
- lines = lines[1:]
- for _, line := range lines {
- if line == "" {
- continue
- }
-
- if err := smaps.parseLine(line); err != nil {
- return ProcSMapsRollup{}, err
- }
- }
-
- return smaps, nil
-}
-
-// Read /proc/pid/smaps and do the roll-up in Go code.
-func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
- file, err := os.Open(p.path("smaps"))
- if err != nil {
- return ProcSMapsRollup{}, err
- }
- defer file.Close()
-
- smaps := ProcSMapsRollup{}
- scan := bufio.NewScanner(file)
-
- for scan.Scan() {
- line := scan.Text()
-
- if procSMapsHeaderLine.MatchString(line) {
- continue
- }
-
- if err := smaps.parseLine(line); err != nil {
- return ProcSMapsRollup{}, err
- }
- }
-
- return smaps, nil
-}
-
-func (s *ProcSMapsRollup) parseLine(line string) error {
- kv := strings.SplitN(line, ":", 2)
- if len(kv) != 2 {
- fmt.Println(line)
- return errors.New("invalid net/dev line, missing colon")
- }
-
- k := kv[0]
- if k == "VmFlags" {
- return nil
- }
-
- v := strings.TrimSpace(kv[1])
- v = strings.TrimRight(v, " kB")
-
- vKBytes, err := strconv.ParseUint(v, 10, 64)
- if err != nil {
- return err
- }
- vBytes := vKBytes * 1024
-
- s.addValue(k, v, vKBytes, vBytes)
-
- return nil
-}
-
-func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) {
- switch k {
- case "Rss":
- s.Rss += vUintBytes
- case "Pss":
- s.Pss += vUintBytes
- case "Shared_Clean":
- s.SharedClean += vUintBytes
- case "Shared_Dirty":
- s.SharedDirty += vUintBytes
- case "Private_Clean":
- s.PrivateClean += vUintBytes
- case "Private_Dirty":
- s.PrivateDirty += vUintBytes
- case "Referenced":
- s.Referenced += vUintBytes
- case "Anonymous":
- s.Anonymous += vUintBytes
- case "Swap":
- s.Swap += vUintBytes
- case "SwapPss":
- s.SwapPss += vUintBytes
- }
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
deleted file mode 100644
index 67ca0e9fbc9..00000000000
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bytes"
- "fmt"
- "os"
-
- "github.com/prometheus/procfs/internal/fs"
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
-// which required cgo. However, that caused a lot of problems regarding
-// cross-compilation. Alternatives such as running a binary to determine the
-// value, or trying to derive it in some other way were all problematic. After
-// much research it was determined that USER_HZ is actually hardcoded to 100 on
-// all Go-supported platforms as of the time of this writing. This is why we
-// decided to hardcode it here as well. It is not impossible that there could
-// be systems with exceptions, but they should be very exotic edge cases, and
-// in that case, the worst outcome will be two misreported metrics.
-//
-// See also the following discussions:
-//
-// - https://github.com/prometheus/node_exporter/issues/52
-// - https://github.com/prometheus/procfs/pull/2
-// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
-const userHZ = 100
-
-// ProcStat provides status information about the process,
-// read from /proc/[pid]/stat.
-type ProcStat struct {
- // The process ID.
- PID int
- // The filename of the executable.
- Comm string
- // The process state.
- State string
- // The PID of the parent of this process.
- PPID int
- // The process group ID of the process.
- PGRP int
- // The session ID of the process.
- Session int
- // The controlling terminal of the process.
- TTY int
- // The ID of the foreground process group of the controlling terminal of
- // the process.
- TPGID int
- // The kernel flags word of the process.
- Flags uint
- // The number of minor faults the process has made which have not required
- // loading a memory page from disk.
- MinFlt uint
- // The number of minor faults that the process's waited-for children have
- // made.
- CMinFlt uint
- // The number of major faults the process has made which have required
- // loading a memory page from disk.
- MajFlt uint
- // The number of major faults that the process's waited-for children have
- // made.
- CMajFlt uint
- // Amount of time that this process has been scheduled in user mode,
- // measured in clock ticks.
- UTime uint
- // Amount of time that this process has been scheduled in kernel mode,
- // measured in clock ticks.
- STime uint
- // Amount of time that this process's waited-for children have been
- // scheduled in user mode, measured in clock ticks.
- CUTime uint
- // Amount of time that this process's waited-for children have been
- // scheduled in kernel mode, measured in clock ticks.
- CSTime uint
- // For processes running a real-time scheduling policy, this is the negated
- // scheduling priority, minus one.
- Priority int
- // The nice value, a value in the range 19 (low priority) to -20 (high
- // priority).
- Nice int
- // Number of threads in this process.
- NumThreads int
- // The time the process started after system boot, the value is expressed
- // in clock ticks.
- Starttime uint64
- // Virtual memory size in bytes.
- VSize uint
- // Resident set size in pages.
- RSS int
-
- proc fs.FS
-}
-
-// NewStat returns the current status information of the process.
-//
-// Deprecated: use p.Stat() instead
-func (p Proc) NewStat() (ProcStat, error) {
- return p.Stat()
-}
-
-// Stat returns the current status information of the process.
-func (p Proc) Stat() (ProcStat, error) {
- data, err := util.ReadFileNoStat(p.path("stat"))
- if err != nil {
- return ProcStat{}, err
- }
-
- var (
- ignore int
-
- s = ProcStat{PID: p.PID, proc: p.fs}
- l = bytes.Index(data, []byte("("))
- r = bytes.LastIndex(data, []byte(")"))
- )
-
- if l < 0 || r < 0 {
- return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data)
- }
-
- s.Comm = string(data[l+1 : r])
- _, err = fmt.Fscan(
- bytes.NewBuffer(data[r+2:]),
- &s.State,
- &s.PPID,
- &s.PGRP,
- &s.Session,
- &s.TTY,
- &s.TPGID,
- &s.Flags,
- &s.MinFlt,
- &s.CMinFlt,
- &s.MajFlt,
- &s.CMajFlt,
- &s.UTime,
- &s.STime,
- &s.CUTime,
- &s.CSTime,
- &s.Priority,
- &s.Nice,
- &s.NumThreads,
- &ignore,
- &s.Starttime,
- &s.VSize,
- &s.RSS,
- )
- if err != nil {
- return ProcStat{}, err
- }
-
- return s, nil
-}
-
-// VirtualMemory returns the virtual memory size in bytes.
-func (s ProcStat) VirtualMemory() uint {
- return s.VSize
-}
-
-// ResidentMemory returns the resident memory size in bytes.
-func (s ProcStat) ResidentMemory() int {
- return s.RSS * os.Getpagesize()
-}
-
-// StartTime returns the unix timestamp of the process in seconds.
-func (s ProcStat) StartTime() (float64, error) {
- fs := FS{proc: s.proc}
- stat, err := fs.Stat()
- if err != nil {
- return 0, err
- }
- return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
-}
-
-// CPUTime returns the total CPU user and system time in seconds.
-func (s ProcStat) CPUTime() float64 {
- return float64(s.UTime+s.STime) / userHZ
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
deleted file mode 100644
index 6edd8333b33..00000000000
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bytes"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// ProcStatus provides status information about the process,
-// read from /proc/[pid]/stat.
-type ProcStatus struct {
- // The process ID.
- PID int
- // The process name.
- Name string
-
- // Thread group ID.
- TGID int
-
- // Peak virtual memory size.
- VmPeak uint64 // nolint:golint
- // Virtual memory size.
- VmSize uint64 // nolint:golint
- // Locked memory size.
- VmLck uint64 // nolint:golint
- // Pinned memory size.
- VmPin uint64 // nolint:golint
- // Peak resident set size.
- VmHWM uint64 // nolint:golint
- // Resident set size (sum of RssAnnon RssFile and RssShmem).
- VmRSS uint64 // nolint:golint
- // Size of resident anonymous memory.
- RssAnon uint64 // nolint:golint
- // Size of resident file mappings.
- RssFile uint64 // nolint:golint
- // Size of resident shared memory.
- RssShmem uint64 // nolint:golint
- // Size of data segments.
- VmData uint64 // nolint:golint
- // Size of stack segments.
- VmStk uint64 // nolint:golint
- // Size of text segments.
- VmExe uint64 // nolint:golint
- // Shared library code size.
- VmLib uint64 // nolint:golint
- // Page table entries size.
- VmPTE uint64 // nolint:golint
- // Size of second-level page tables.
- VmPMD uint64 // nolint:golint
- // Swapped-out virtual memory size by anonymous private.
- VmSwap uint64 // nolint:golint
- // Size of hugetlb memory portions
- HugetlbPages uint64
-
- // Number of voluntary context switches.
- VoluntaryCtxtSwitches uint64
- // Number of involuntary context switches.
- NonVoluntaryCtxtSwitches uint64
-
- // UIDs of the process (Real, effective, saved set, and filesystem UIDs)
- UIDs [4]string
- // GIDs of the process (Real, effective, saved set, and filesystem GIDs)
- GIDs [4]string
-}
-
-// NewStatus returns the current status information of the process.
-func (p Proc) NewStatus() (ProcStatus, error) {
- data, err := util.ReadFileNoStat(p.path("status"))
- if err != nil {
- return ProcStatus{}, err
- }
-
- s := ProcStatus{PID: p.PID}
-
- lines := strings.Split(string(data), "\n")
- for _, line := range lines {
- if !bytes.Contains([]byte(line), []byte(":")) {
- continue
- }
-
- kv := strings.SplitN(line, ":", 2)
-
- // removes spaces
- k := string(strings.TrimSpace(kv[0]))
- v := string(strings.TrimSpace(kv[1]))
- // removes "kB"
- v = string(bytes.Trim([]byte(v), " kB"))
-
- // value to int when possible
- // we can skip error check here, 'cause vKBytes is not used when value is a string
- vKBytes, _ := strconv.ParseUint(v, 10, 64)
- // convert kB to B
- vBytes := vKBytes * 1024
-
- s.fillStatus(k, v, vKBytes, vBytes)
- }
-
- return s, nil
-}
-
-func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
- switch k {
- case "Tgid":
- s.TGID = int(vUint)
- case "Name":
- s.Name = vString
- case "Uid":
- copy(s.UIDs[:], strings.Split(vString, "\t"))
- case "Gid":
- copy(s.GIDs[:], strings.Split(vString, "\t"))
- case "VmPeak":
- s.VmPeak = vUintBytes
- case "VmSize":
- s.VmSize = vUintBytes
- case "VmLck":
- s.VmLck = vUintBytes
- case "VmPin":
- s.VmPin = vUintBytes
- case "VmHWM":
- s.VmHWM = vUintBytes
- case "VmRSS":
- s.VmRSS = vUintBytes
- case "RssAnon":
- s.RssAnon = vUintBytes
- case "RssFile":
- s.RssFile = vUintBytes
- case "RssShmem":
- s.RssShmem = vUintBytes
- case "VmData":
- s.VmData = vUintBytes
- case "VmStk":
- s.VmStk = vUintBytes
- case "VmExe":
- s.VmExe = vUintBytes
- case "VmLib":
- s.VmLib = vUintBytes
- case "VmPTE":
- s.VmPTE = vUintBytes
- case "VmPMD":
- s.VmPMD = vUintBytes
- case "VmSwap":
- s.VmSwap = vUintBytes
- case "HugetlbPages":
- s.HugetlbPages = vUintBytes
- case "voluntary_ctxt_switches":
- s.VoluntaryCtxtSwitches = vUint
- case "nonvoluntary_ctxt_switches":
- s.NonVoluntaryCtxtSwitches = vUint
- }
-}
-
-// TotalCtxtSwitches returns the total context switch.
-func (s ProcStatus) TotalCtxtSwitches() uint64 {
- return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
-}
diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go
deleted file mode 100644
index 28228164efb..00000000000
--- a/vendor/github.com/prometheus/procfs/schedstat.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "errors"
- "os"
- "regexp"
- "strconv"
-)
-
-var (
- cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`)
- procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`)
-)
-
-// Schedstat contains scheduler statistics from /proc/schedstat
-//
-// See
-// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt
-// for a detailed description of what these numbers mean.
-//
-// Note the current kernel documentation claims some of the time units are in
-// jiffies when they are actually in nanoseconds since 2.6.23 with the
-// introduction of CFS. A fix to the documentation is pending. See
-// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473
-type Schedstat struct {
- CPUs []*SchedstatCPU
-}
-
-// SchedstatCPU contains the values from one "cpu" line
-type SchedstatCPU struct {
- CPUNum string
-
- RunningNanoseconds uint64
- WaitingNanoseconds uint64
- RunTimeslices uint64
-}
-
-// ProcSchedstat contains the values from /proc//schedstat
-type ProcSchedstat struct {
- RunningNanoseconds uint64
- WaitingNanoseconds uint64
- RunTimeslices uint64
-}
-
-// Schedstat reads data from /proc/schedstat
-func (fs FS) Schedstat() (*Schedstat, error) {
- file, err := os.Open(fs.proc.Path("schedstat"))
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- stats := &Schedstat{}
- scanner := bufio.NewScanner(file)
-
- for scanner.Scan() {
- match := cpuLineRE.FindStringSubmatch(scanner.Text())
- if match != nil {
- cpu := &SchedstatCPU{}
- cpu.CPUNum = match[1]
-
- cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64)
- if err != nil {
- continue
- }
-
- cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64)
- if err != nil {
- continue
- }
-
- cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64)
- if err != nil {
- continue
- }
-
- stats.CPUs = append(stats.CPUs, cpu)
- }
- }
-
- return stats, nil
-}
-
-func parseProcSchedstat(contents string) (ProcSchedstat, error) {
- var (
- stats ProcSchedstat
- err error
- )
- match := procLineRE.FindStringSubmatch(contents)
-
- if match != nil {
- stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
- if err != nil {
- return stats, err
- }
-
- stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
- if err != nil {
- return stats, err
- }
-
- stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
- return stats, err
- }
-
- return stats, errors.New("could not parse schedstat")
-}
diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go
deleted file mode 100644
index 7896fd72428..00000000000
--- a/vendor/github.com/prometheus/procfs/slab.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-var (
- slabSpace = regexp.MustCompile(`\s+`)
- slabVer = regexp.MustCompile(`slabinfo -`)
- slabHeader = regexp.MustCompile(`# name`)
-)
-
-// Slab represents a slab pool in the kernel.
-type Slab struct {
- Name string
- ObjActive int64
- ObjNum int64
- ObjSize int64
- ObjPerSlab int64
- PagesPerSlab int64
- // tunables
- Limit int64
- Batch int64
- SharedFactor int64
- SlabActive int64
- SlabNum int64
- SharedAvail int64
-}
-
-// SlabInfo represents info for all slabs.
-type SlabInfo struct {
- Slabs []*Slab
-}
-
-func shouldParseSlab(line string) bool {
- if slabVer.MatchString(line) {
- return false
- }
- if slabHeader.MatchString(line) {
- return false
- }
- return true
-}
-
-// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1.
-func parseV21SlabEntry(line string) (*Slab, error) {
- // First cleanup whitespace.
- l := slabSpace.ReplaceAllString(line, " ")
- s := strings.Split(l, " ")
- if len(s) != 16 {
- return nil, fmt.Errorf("unable to parse: %q", line)
- }
- var err error
- i := &Slab{Name: s[0]}
- i.ObjActive, err = strconv.ParseInt(s[1], 10, 64)
- if err != nil {
- return nil, err
- }
- i.ObjNum, err = strconv.ParseInt(s[2], 10, 64)
- if err != nil {
- return nil, err
- }
- i.ObjSize, err = strconv.ParseInt(s[3], 10, 64)
- if err != nil {
- return nil, err
- }
- i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64)
- if err != nil {
- return nil, err
- }
- i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64)
- if err != nil {
- return nil, err
- }
- i.Limit, err = strconv.ParseInt(s[8], 10, 64)
- if err != nil {
- return nil, err
- }
- i.Batch, err = strconv.ParseInt(s[9], 10, 64)
- if err != nil {
- return nil, err
- }
- i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64)
- if err != nil {
- return nil, err
- }
- i.SlabActive, err = strconv.ParseInt(s[13], 10, 64)
- if err != nil {
- return nil, err
- }
- i.SlabNum, err = strconv.ParseInt(s[14], 10, 64)
- if err != nil {
- return nil, err
- }
- i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64)
- if err != nil {
- return nil, err
- }
- return i, nil
-}
-
-// parseSlabInfo21 is used to parse a slabinfo 2.1 file.
-func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) {
- scanner := bufio.NewScanner(r)
- s := SlabInfo{Slabs: []*Slab{}}
- for scanner.Scan() {
- line := scanner.Text()
- if !shouldParseSlab(line) {
- continue
- }
- slab, err := parseV21SlabEntry(line)
- if err != nil {
- return s, err
- }
- s.Slabs = append(s.Slabs, slab)
- }
- return s, nil
-}
-
-// SlabInfo reads data from /proc/slabinfo
-func (fs FS) SlabInfo() (SlabInfo, error) {
- // TODO: Consider passing options to allow for parsing different
- // slabinfo versions. However, slabinfo 2.1 has been stable since
- // kernel 2.6.10 and later.
- data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo"))
- if err != nil {
- return SlabInfo{}, err
- }
-
- return parseSlabInfo21(bytes.NewReader(data))
-}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
deleted file mode 100644
index 6d8727541e4..00000000000
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/fs"
- "github.com/prometheus/procfs/internal/util"
-)
-
-// CPUStat shows how much time the cpu spend in various stages.
-type CPUStat struct {
- User float64
- Nice float64
- System float64
- Idle float64
- Iowait float64
- IRQ float64
- SoftIRQ float64
- Steal float64
- Guest float64
- GuestNice float64
-}
-
-// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
-// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
-// It is possible to get per-cpu stats by reading /proc/softirqs
-type SoftIRQStat struct {
- Hi uint64
- Timer uint64
- NetTx uint64
- NetRx uint64
- Block uint64
- BlockIoPoll uint64
- Tasklet uint64
- Sched uint64
- Hrtimer uint64
- Rcu uint64
-}
-
-// Stat represents kernel/system statistics.
-type Stat struct {
- // Boot time in seconds since the Epoch.
- BootTime uint64
- // Summed up cpu statistics.
- CPUTotal CPUStat
- // Per-CPU statistics.
- CPU []CPUStat
- // Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
- IRQTotal uint64
- // Number of times a numbered IRQ was triggered.
- IRQ []uint64
- // Number of times a context switch happened.
- ContextSwitches uint64
- // Number of times a process was created.
- ProcessCreated uint64
- // Number of processes currently running.
- ProcessesRunning uint64
- // Number of processes currently blocked (waiting for IO).
- ProcessesBlocked uint64
- // Number of times a softirq was scheduled.
- SoftIRQTotal uint64
- // Detailed softirq statistics.
- SoftIRQ SoftIRQStat
-}
-
-// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
-func parseCPUStat(line string) (CPUStat, int64, error) {
- cpuStat := CPUStat{}
- var cpu string
-
- count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
- &cpu,
- &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
- &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
- &cpuStat.Guest, &cpuStat.GuestNice)
-
- if err != nil && err != io.EOF {
- return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err)
- }
- if count == 0 {
- return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line)
- }
-
- cpuStat.User /= userHZ
- cpuStat.Nice /= userHZ
- cpuStat.System /= userHZ
- cpuStat.Idle /= userHZ
- cpuStat.Iowait /= userHZ
- cpuStat.IRQ /= userHZ
- cpuStat.SoftIRQ /= userHZ
- cpuStat.Steal /= userHZ
- cpuStat.Guest /= userHZ
- cpuStat.GuestNice /= userHZ
-
- if cpu == "cpu" {
- return cpuStat, -1, nil
- }
-
- cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
- if err != nil {
- return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err)
- }
-
- return cpuStat, cpuID, nil
-}
-
-// Parse a softirq line.
-func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
- softIRQStat := SoftIRQStat{}
- var total uint64
- var prefix string
-
- _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
- &prefix, &total,
- &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
- &softIRQStat.Block, &softIRQStat.BlockIoPoll,
- &softIRQStat.Tasklet, &softIRQStat.Sched,
- &softIRQStat.Hrtimer, &softIRQStat.Rcu)
-
- if err != nil {
- return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err)
- }
-
- return softIRQStat, total, nil
-}
-
-// NewStat returns information about current cpu/process statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-//
-// Deprecated: use fs.Stat() instead
-func NewStat() (Stat, error) {
- fs, err := NewFS(fs.DefaultProcMountPoint)
- if err != nil {
- return Stat{}, err
- }
- return fs.Stat()
-}
-
-// NewStat returns information about current cpu/process statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-//
-// Deprecated: use fs.Stat() instead
-func (fs FS) NewStat() (Stat, error) {
- return fs.Stat()
-}
-
-// Stat returns information about current cpu/process statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-func (fs FS) Stat() (Stat, error) {
- fileName := fs.proc.Path("stat")
- data, err := util.ReadFileNoStat(fileName)
- if err != nil {
- return Stat{}, err
- }
-
- stat := Stat{}
-
- scanner := bufio.NewScanner(bytes.NewReader(data))
- for scanner.Scan() {
- line := scanner.Text()
- parts := strings.Fields(scanner.Text())
- // require at least
- if len(parts) < 2 {
- continue
- }
- switch {
- case parts[0] == "btime":
- if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err)
- }
- case parts[0] == "intr":
- if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err)
- }
- numberedIRQs := parts[2:]
- stat.IRQ = make([]uint64, len(numberedIRQs))
- for i, count := range numberedIRQs {
- if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err)
- }
- }
- case parts[0] == "ctxt":
- if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err)
- }
- case parts[0] == "processes":
- if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err)
- }
- case parts[0] == "procs_running":
- if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err)
- }
- case parts[0] == "procs_blocked":
- if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err)
- }
- case parts[0] == "softirq":
- softIRQStats, total, err := parseSoftIRQStat(line)
- if err != nil {
- return Stat{}, err
- }
- stat.SoftIRQTotal = total
- stat.SoftIRQ = softIRQStats
- case strings.HasPrefix(parts[0], "cpu"):
- cpuStat, cpuID, err := parseCPUStat(line)
- if err != nil {
- return Stat{}, err
- }
- if cpuID == -1 {
- stat.CPUTotal = cpuStat
- } else {
- for int64(len(stat.CPU)) <= cpuID {
- stat.CPU = append(stat.CPU, CPUStat{})
- }
- stat.CPU[cpuID] = cpuStat
- }
- }
- }
-
- if err := scanner.Err(); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err)
- }
-
- return stat, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go
deleted file mode 100644
index 15edc2212b6..00000000000
--- a/vendor/github.com/prometheus/procfs/swaps.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Swap represents an entry in /proc/swaps.
-type Swap struct {
- Filename string
- Type string
- Size int
- Used int
- Priority int
-}
-
-// Swaps returns a slice of all configured swap devices on the system.
-func (fs FS) Swaps() ([]*Swap, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("swaps"))
- if err != nil {
- return nil, err
- }
- return parseSwaps(data)
-}
-
-func parseSwaps(info []byte) ([]*Swap, error) {
- swaps := []*Swap{}
- scanner := bufio.NewScanner(bytes.NewReader(info))
- scanner.Scan() // ignore header line
- for scanner.Scan() {
- swapString := scanner.Text()
- parsedSwap, err := parseSwapString(swapString)
- if err != nil {
- return nil, err
- }
- swaps = append(swaps, parsedSwap)
- }
-
- err := scanner.Err()
- return swaps, err
-}
-
-func parseSwapString(swapString string) (*Swap, error) {
- var err error
-
- swapFields := strings.Fields(swapString)
- swapLength := len(swapFields)
- if swapLength < 5 {
- return nil, fmt.Errorf("too few fields in swap string: %s", swapString)
- }
-
- swap := &Swap{
- Filename: swapFields[0],
- Type: swapFields[1],
- }
-
- swap.Size, err = strconv.Atoi(swapFields[2])
- if err != nil {
- return nil, fmt.Errorf("invalid swap size: %s", swapFields[2])
- }
- swap.Used, err = strconv.Atoi(swapFields[3])
- if err != nil {
- return nil, fmt.Errorf("invalid swap used: %s", swapFields[3])
- }
- swap.Priority, err = strconv.Atoi(swapFields[4])
- if err != nil {
- return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4])
- }
-
- return swap, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar
deleted file mode 100644
index 19ef02b8d4e..00000000000
--- a/vendor/github.com/prometheus/procfs/ttar
+++ /dev/null
@@ -1,413 +0,0 @@
-#!/usr/bin/env bash
-
-# Purpose: plain text tar format
-# Limitations: - only suitable for text files, directories, and symlinks
-# - stores only filename, content, and mode
-# - not designed for untrusted input
-#
-# Note: must work with bash version 3.2 (macOS)
-
-# Copyright 2017 Roger Luethi
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -o errexit -o nounset
-
-# Sanitize environment (for instance, standard sorting of glob matches)
-export LC_ALL=C
-
-path=""
-CMD=""
-ARG_STRING="$*"
-
-#------------------------------------------------------------------------------
-# Not all sed implementations can work on null bytes. In order to make ttar
-# work out of the box on macOS, use Python as a stream editor.
-
-USE_PYTHON=0
-
-PYTHON_CREATE_FILTER=$(cat << 'PCF'
-#!/usr/bin/env python
-
-import re
-import sys
-
-for line in sys.stdin:
- line = re.sub(r'EOF', r'\EOF', line)
- line = re.sub(r'NULLBYTE', r'\NULLBYTE', line)
- line = re.sub('\x00', r'NULLBYTE', line)
- sys.stdout.write(line)
-PCF
-)
-
-PYTHON_EXTRACT_FILTER=$(cat << 'PEF'
-#!/usr/bin/env python
-
-import re
-import sys
-
-for line in sys.stdin:
- line = re.sub(r'(?/dev/null; then
- echo "ERROR Python not found. Aborting."
- exit 2
- fi
- USE_PYTHON=1
- fi
-}
-
-#------------------------------------------------------------------------------
-
-function usage {
- bname=$(basename "$0")
- cat << USAGE
-Usage: $bname [-C ] -c -f (create archive)
- $bname -t -f (list archive contents)
- $bname [-C ] -x -f (extract archive)
-
-Options:
- -C (change directory)
- -v (verbose)
- --recursive-unlink (recursively delete existing directory if path
- collides with file or directory to extract)
-
-Example: Change to sysfs directory, create ttar file from fixtures directory
- $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
-USAGE
-exit "$1"
-}
-
-function vecho {
- if [ "${VERBOSE:-}" == "yes" ]; then
- echo >&7 "$@"
- fi
-}
-
-function set_cmd {
- if [ -n "$CMD" ]; then
- echo "ERROR: more than one command given"
- echo
- usage 2
- fi
- CMD=$1
-}
-
-unset VERBOSE
-unset RECURSIVE_UNLINK
-
-while getopts :cf:-:htxvC: opt; do
- case $opt in
- c)
- set_cmd "create"
- ;;
- f)
- ARCHIVE=$OPTARG
- ;;
- h)
- usage 0
- ;;
- t)
- set_cmd "list"
- ;;
- x)
- set_cmd "extract"
- ;;
- v)
- VERBOSE=yes
- exec 7>&1
- ;;
- C)
- CDIR=$OPTARG
- ;;
- -)
- case $OPTARG in
- recursive-unlink)
- RECURSIVE_UNLINK="yes"
- ;;
- *)
- echo -e "Error: invalid option -$OPTARG"
- echo
- usage 1
- ;;
- esac
- ;;
- *)
- echo >&2 "ERROR: invalid option -$OPTARG"
- echo
- usage 1
- ;;
- esac
-done
-
-# Remove processed options from arguments
-shift $(( OPTIND - 1 ));
-
-if [ "${CMD:-}" == "" ]; then
- echo >&2 "ERROR: no command given"
- echo
- usage 1
-elif [ "${ARCHIVE:-}" == "" ]; then
- echo >&2 "ERROR: no archive name given"
- echo
- usage 1
-fi
-
-function list {
- local path=""
- local size=0
- local line_no=0
- local ttar_file=$1
- if [ -n "${2:-}" ]; then
- echo >&2 "ERROR: too many arguments."
- echo
- usage 1
- fi
- if [ ! -e "$ttar_file" ]; then
- echo >&2 "ERROR: file not found ($ttar_file)"
- echo
- usage 1
- fi
- while read -r line; do
- line_no=$(( line_no + 1 ))
- if [ $size -gt 0 ]; then
- size=$(( size - 1 ))
- continue
- fi
- if [[ $line =~ ^Path:\ (.*)$ ]]; then
- path=${BASH_REMATCH[1]}
- elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
- size=${BASH_REMATCH[1]}
- echo "$path"
- elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
- path=${BASH_REMATCH[1]}
- echo "$path/"
- elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
- echo "$path -> ${BASH_REMATCH[1]}"
- fi
- done < "$ttar_file"
-}
-
-function extract {
- local path=""
- local size=0
- local line_no=0
- local ttar_file=$1
- if [ -n "${2:-}" ]; then
- echo >&2 "ERROR: too many arguments."
- echo
- usage 1
- fi
- if [ ! -e "$ttar_file" ]; then
- echo >&2 "ERROR: file not found ($ttar_file)"
- echo
- usage 1
- fi
- while IFS= read -r line; do
- line_no=$(( line_no + 1 ))
- local eof_without_newline
- if [ "$size" -gt 0 ]; then
- if [[ "$line" =~ [^\\]EOF ]]; then
- # An EOF not preceded by a backslash indicates that the line
- # does not end with a newline
- eof_without_newline=1
- else
- eof_without_newline=0
- fi
- # Replace NULLBYTE with null byte if at beginning of line
- # Replace NULLBYTE with null byte unless preceded by backslash
- # Remove one backslash in front of NULLBYTE (if any)
- # Remove EOF unless preceded by backslash
- # Remove one backslash in front of EOF
- if [ $USE_PYTHON -eq 1 ]; then
- echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
- else
- # The repeated pattern makes up for sed's lack of negative
- # lookbehind assertions (for consecutive null bytes).
- echo -n "$line" | \
- sed -e 's/^NULLBYTE/\x0/g;
- s/\([^\\]\)NULLBYTE/\1\x0/g;
- s/\([^\\]\)NULLBYTE/\1\x0/g;
- s/\\NULLBYTE/NULLBYTE/g;
- s/\([^\\]\)EOF/\1/g;
- s/\\EOF/EOF/g;
- ' >> "$path"
- fi
- if [[ "$eof_without_newline" -eq 0 ]]; then
- echo >> "$path"
- fi
- size=$(( size - 1 ))
- continue
- fi
- if [[ $line =~ ^Path:\ (.*)$ ]]; then
- path=${BASH_REMATCH[1]}
- if [ -L "$path" ]; then
- rm "$path"
- elif [ -d "$path" ]; then
- if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then
- rm -r "$path"
- else
- # Safe because symlinks to directories are dealt with above
- rmdir "$path"
- fi
- elif [ -e "$path" ]; then
- rm "$path"
- fi
- elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
- size=${BASH_REMATCH[1]}
- # Create file even if it is zero-length.
- touch "$path"
- vecho " $path"
- elif [[ $line =~ ^Mode:\ (.*)$ ]]; then
- mode=${BASH_REMATCH[1]}
- chmod "$mode" "$path"
- vecho "$mode"
- elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
- path=${BASH_REMATCH[1]}
- mkdir -p "$path"
- vecho " $path/"
- elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
- ln -s "${BASH_REMATCH[1]}" "$path"
- vecho " $path -> ${BASH_REMATCH[1]}"
- elif [[ $line =~ ^# ]]; then
- # Ignore comments between files
- continue
- else
- echo >&2 "ERROR: Unknown keyword on line $line_no: $line"
- exit 1
- fi
- done < "$ttar_file"
-}
-
-function div {
- echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \
- "- - - - - -"
-}
-
-function get_mode {
- local mfile=$1
- if [ -z "${STAT_OPTION:-}" ]; then
- if stat -c '%a' "$mfile" >/dev/null 2>&1; then
- # GNU stat
- STAT_OPTION='-c'
- STAT_FORMAT='%a'
- else
- # BSD stat
- STAT_OPTION='-f'
- # Octal output, user/group/other (omit file type, sticky bit)
- STAT_FORMAT='%OLp'
- fi
- fi
- stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
-}
-
-function _create {
- shopt -s nullglob
- local mode
- local eof_without_newline
- while (( "$#" )); do
- file=$1
- if [ -L "$file" ]; then
- echo "Path: $file"
- symlinkTo=$(readlink "$file")
- echo "SymlinkTo: $symlinkTo"
- vecho " $file -> $symlinkTo"
- div
- elif [ -d "$file" ]; then
- # Strip trailing slash (if there is one)
- file=${file%/}
- echo "Directory: $file"
- mode=$(get_mode "$file")
- echo "Mode: $mode"
- vecho "$mode $file/"
- div
- # Find all files and dirs, including hidden/dot files
- for x in "$file/"{*,.[^.]*}; do
- _create "$x"
- done
- elif [ -f "$file" ]; then
- echo "Path: $file"
- lines=$(wc -l "$file"|awk '{print $1}')
- eof_without_newline=0
- if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \
- [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then
- eof_without_newline=1
- lines=$((lines+1))
- fi
- echo "Lines: $lines"
- # Add backslash in front of EOF
- # Add backslash in front of NULLBYTE
- # Replace null byte with NULLBYTE
- if [ $USE_PYTHON -eq 1 ]; then
- < "$file" python -c "$PYTHON_CREATE_FILTER"
- else
- < "$file" \
- sed 's/EOF/\\EOF/g;
- s/NULLBYTE/\\NULLBYTE/g;
- s/\x0/NULLBYTE/g;
- '
- fi
- if [[ "$eof_without_newline" -eq 1 ]]; then
- # Finish line with EOF to indicate that the original line did
- # not end with a linefeed
- echo "EOF"
- fi
- mode=$(get_mode "$file")
- echo "Mode: $mode"
- vecho "$mode $file"
- div
- else
- echo >&2 "ERROR: file not found ($file in $(pwd))"
- exit 2
- fi
- shift
- done
-}
-
-function create {
- ttar_file=$1
- shift
- if [ -z "${1:-}" ]; then
- echo >&2 "ERROR: missing arguments."
- echo
- usage 1
- fi
- if [ -e "$ttar_file" ]; then
- rm "$ttar_file"
- fi
- exec > "$ttar_file"
- echo "# Archive created by ttar $ARG_STRING"
- _create "$@"
-}
-
-test_environment
-
-if [ -n "${CDIR:-}" ]; then
- if [[ "$ARCHIVE" != /* ]]; then
- # Relative path: preserve the archive's location before changing
- # directory
- ARCHIVE="$(pwd)/$ARCHIVE"
- fi
- cd "$CDIR"
-fi
-
-"$CMD" "$ARCHIVE" "$@"
diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go
deleted file mode 100644
index cb13891414b..00000000000
--- a/vendor/github.com/prometheus/procfs/vm.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package procfs
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// The VM interface is described at
-// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
-// Each setting is exposed as a single file.
-// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
-// and numa_zonelist_order (deprecated) which is a string
-type VM struct {
- AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
- BlockDump *int64 // /proc/sys/vm/block_dump
- CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed
- DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes
- DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio
- DirtyBytes *int64 // /proc/sys/vm/dirty_bytes
- DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs
- DirtyRatio *int64 // /proc/sys/vm/dirty_ratio
- DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds
- DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs
- DropCaches *int64 // /proc/sys/vm/drop_caches
- ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold
- HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group
- LaptopMode *int64 // /proc/sys/vm/laptop_mode
- LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout
- LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio
- MaxMapCount *int64 // /proc/sys/vm/max_map_count
- MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill
- MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery
- MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes
- MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio
- MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio
- MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr
- NrHugepages *int64 // /proc/sys/vm/nr_hugepages
- NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy
- NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages
- NumaStat *int64 // /proc/sys/vm/numa_stat
- NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order
- OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks
- OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task
- OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes
- OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory
- OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio
- PageCluster *int64 // /proc/sys/vm/page-cluster
- PanicOnOom *int64 // /proc/sys/vm/panic_on_oom
- PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction
- StatInterval *int64 // /proc/sys/vm/stat_interval
- Swappiness *int64 // /proc/sys/vm/swappiness
- UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes
- VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure
- WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor
- WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor
- ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode
-}
-
-// VM reads the VM statistics from the specified `proc` filesystem.
-func (fs FS) VM() (*VM, error) {
- path := fs.proc.Path("sys/vm")
- file, err := os.Stat(path)
- if err != nil {
- return nil, err
- }
- if !file.Mode().IsDir() {
- return nil, fmt.Errorf("%s is not a directory", path)
- }
-
- files, err := ioutil.ReadDir(path)
- if err != nil {
- return nil, err
- }
-
- var vm VM
- for _, f := range files {
- if f.IsDir() {
- continue
- }
-
- name := filepath.Join(path, f.Name())
- // ignore errors on read, as there are some write only
- // in /proc/sys/vm
- value, err := util.SysReadFile(name)
- if err != nil {
- continue
- }
- vp := util.NewValueParser(value)
-
- switch f.Name() {
- case "admin_reserve_kbytes":
- vm.AdminReserveKbytes = vp.PInt64()
- case "block_dump":
- vm.BlockDump = vp.PInt64()
- case "compact_unevictable_allowed":
- vm.CompactUnevictableAllowed = vp.PInt64()
- case "dirty_background_bytes":
- vm.DirtyBackgroundBytes = vp.PInt64()
- case "dirty_background_ratio":
- vm.DirtyBackgroundRatio = vp.PInt64()
- case "dirty_bytes":
- vm.DirtyBytes = vp.PInt64()
- case "dirty_expire_centisecs":
- vm.DirtyExpireCentisecs = vp.PInt64()
- case "dirty_ratio":
- vm.DirtyRatio = vp.PInt64()
- case "dirtytime_expire_seconds":
- vm.DirtytimeExpireSeconds = vp.PInt64()
- case "dirty_writeback_centisecs":
- vm.DirtyWritebackCentisecs = vp.PInt64()
- case "drop_caches":
- vm.DropCaches = vp.PInt64()
- case "extfrag_threshold":
- vm.ExtfragThreshold = vp.PInt64()
- case "hugetlb_shm_group":
- vm.HugetlbShmGroup = vp.PInt64()
- case "laptop_mode":
- vm.LaptopMode = vp.PInt64()
- case "legacy_va_layout":
- vm.LegacyVaLayout = vp.PInt64()
- case "lowmem_reserve_ratio":
- stringSlice := strings.Fields(value)
- pint64Slice := make([]*int64, 0, len(stringSlice))
- for _, value := range stringSlice {
- vp := util.NewValueParser(value)
- pint64Slice = append(pint64Slice, vp.PInt64())
- }
- vm.LowmemReserveRatio = pint64Slice
- case "max_map_count":
- vm.MaxMapCount = vp.PInt64()
- case "memory_failure_early_kill":
- vm.MemoryFailureEarlyKill = vp.PInt64()
- case "memory_failure_recovery":
- vm.MemoryFailureRecovery = vp.PInt64()
- case "min_free_kbytes":
- vm.MinFreeKbytes = vp.PInt64()
- case "min_slab_ratio":
- vm.MinSlabRatio = vp.PInt64()
- case "min_unmapped_ratio":
- vm.MinUnmappedRatio = vp.PInt64()
- case "mmap_min_addr":
- vm.MmapMinAddr = vp.PInt64()
- case "nr_hugepages":
- vm.NrHugepages = vp.PInt64()
- case "nr_hugepages_mempolicy":
- vm.NrHugepagesMempolicy = vp.PInt64()
- case "nr_overcommit_hugepages":
- vm.NrOvercommitHugepages = vp.PInt64()
- case "numa_stat":
- vm.NumaStat = vp.PInt64()
- case "numa_zonelist_order":
- vm.NumaZonelistOrder = value
- case "oom_dump_tasks":
- vm.OomDumpTasks = vp.PInt64()
- case "oom_kill_allocating_task":
- vm.OomKillAllocatingTask = vp.PInt64()
- case "overcommit_kbytes":
- vm.OvercommitKbytes = vp.PInt64()
- case "overcommit_memory":
- vm.OvercommitMemory = vp.PInt64()
- case "overcommit_ratio":
- vm.OvercommitRatio = vp.PInt64()
- case "page-cluster":
- vm.PageCluster = vp.PInt64()
- case "panic_on_oom":
- vm.PanicOnOom = vp.PInt64()
- case "percpu_pagelist_fraction":
- vm.PercpuPagelistFraction = vp.PInt64()
- case "stat_interval":
- vm.StatInterval = vp.PInt64()
- case "swappiness":
- vm.Swappiness = vp.PInt64()
- case "user_reserve_kbytes":
- vm.UserReserveKbytes = vp.PInt64()
- case "vfs_cache_pressure":
- vm.VfsCachePressure = vp.PInt64()
- case "watermark_boost_factor":
- vm.WatermarkBoostFactor = vp.PInt64()
- case "watermark_scale_factor":
- vm.WatermarkScaleFactor = vp.PInt64()
- case "zone_reclaim_mode":
- vm.ZoneReclaimMode = vp.PInt64()
- }
- if err := vp.Err(); err != nil {
- return nil, err
- }
- }
-
- return &vm, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go
deleted file mode 100644
index eed07c7d774..00000000000
--- a/vendor/github.com/prometheus/procfs/xfrm.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2017 Prometheus Team
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "os"
- "strconv"
- "strings"
-)
-
-// XfrmStat models the contents of /proc/net/xfrm_stat.
-type XfrmStat struct {
- // All errors which are not matched by other
- XfrmInError int
- // No buffer is left
- XfrmInBufferError int
- // Header Error
- XfrmInHdrError int
- // No state found
- // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
- XfrmInNoStates int
- // Transformation protocol specific error
- // e.g. SA Key is wrong
- XfrmInStateProtoError int
- // Transformation mode specific error
- XfrmInStateModeError int
- // Sequence error
- // e.g. sequence number is out of window
- XfrmInStateSeqError int
- // State is expired
- XfrmInStateExpired int
- // State has mismatch option
- // e.g. UDP encapsulation type is mismatched
- XfrmInStateMismatch int
- // State is invalid
- XfrmInStateInvalid int
- // No matching template for states
- // e.g. Inbound SAs are correct but SP rule is wrong
- XfrmInTmplMismatch int
- // No policy is found for states
- // e.g. Inbound SAs are correct but no SP is found
- XfrmInNoPols int
- // Policy discards
- XfrmInPolBlock int
- // Policy error
- XfrmInPolError int
- // All errors which are not matched by others
- XfrmOutError int
- // Bundle generation error
- XfrmOutBundleGenError int
- // Bundle check error
- XfrmOutBundleCheckError int
- // No state was found
- XfrmOutNoStates int
- // Transformation protocol specific error
- XfrmOutStateProtoError int
- // Transportation mode specific error
- XfrmOutStateModeError int
- // Sequence error
- // i.e sequence number overflow
- XfrmOutStateSeqError int
- // State is expired
- XfrmOutStateExpired int
- // Policy discads
- XfrmOutPolBlock int
- // Policy is dead
- XfrmOutPolDead int
- // Policy Error
- XfrmOutPolError int
- XfrmFwdHdrError int
- XfrmOutStateInvalid int
- XfrmAcquireError int
-}
-
-// NewXfrmStat reads the xfrm_stat statistics.
-func NewXfrmStat() (XfrmStat, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return XfrmStat{}, err
- }
-
- return fs.NewXfrmStat()
-}
-
-// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
-func (fs FS) NewXfrmStat() (XfrmStat, error) {
- file, err := os.Open(fs.proc.Path("net/xfrm_stat"))
- if err != nil {
- return XfrmStat{}, err
- }
- defer file.Close()
-
- var (
- x = XfrmStat{}
- s = bufio.NewScanner(file)
- )
-
- for s.Scan() {
- fields := strings.Fields(s.Text())
-
- if len(fields) != 2 {
- return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text())
- }
-
- name := fields[0]
- value, err := strconv.Atoi(fields[1])
- if err != nil {
- return XfrmStat{}, err
- }
-
- switch name {
- case "XfrmInError":
- x.XfrmInError = value
- case "XfrmInBufferError":
- x.XfrmInBufferError = value
- case "XfrmInHdrError":
- x.XfrmInHdrError = value
- case "XfrmInNoStates":
- x.XfrmInNoStates = value
- case "XfrmInStateProtoError":
- x.XfrmInStateProtoError = value
- case "XfrmInStateModeError":
- x.XfrmInStateModeError = value
- case "XfrmInStateSeqError":
- x.XfrmInStateSeqError = value
- case "XfrmInStateExpired":
- x.XfrmInStateExpired = value
- case "XfrmInStateInvalid":
- x.XfrmInStateInvalid = value
- case "XfrmInTmplMismatch":
- x.XfrmInTmplMismatch = value
- case "XfrmInNoPols":
- x.XfrmInNoPols = value
- case "XfrmInPolBlock":
- x.XfrmInPolBlock = value
- case "XfrmInPolError":
- x.XfrmInPolError = value
- case "XfrmOutError":
- x.XfrmOutError = value
- case "XfrmInStateMismatch":
- x.XfrmInStateMismatch = value
- case "XfrmOutBundleGenError":
- x.XfrmOutBundleGenError = value
- case "XfrmOutBundleCheckError":
- x.XfrmOutBundleCheckError = value
- case "XfrmOutNoStates":
- x.XfrmOutNoStates = value
- case "XfrmOutStateProtoError":
- x.XfrmOutStateProtoError = value
- case "XfrmOutStateModeError":
- x.XfrmOutStateModeError = value
- case "XfrmOutStateSeqError":
- x.XfrmOutStateSeqError = value
- case "XfrmOutStateExpired":
- x.XfrmOutStateExpired = value
- case "XfrmOutPolBlock":
- x.XfrmOutPolBlock = value
- case "XfrmOutPolDead":
- x.XfrmOutPolDead = value
- case "XfrmOutPolError":
- x.XfrmOutPolError = value
- case "XfrmFwdHdrError":
- x.XfrmFwdHdrError = value
- case "XfrmOutStateInvalid":
- x.XfrmOutStateInvalid = value
- case "XfrmAcquireError":
- x.XfrmAcquireError = value
- }
-
- }
-
- return x, s.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go
deleted file mode 100644
index 0b9bb6796b3..00000000000
--- a/vendor/github.com/prometheus/procfs/zoneinfo.go
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package procfs
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "regexp"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Zoneinfo holds info parsed from /proc/zoneinfo.
-type Zoneinfo struct {
- Node string
- Zone string
- NrFreePages *int64
- Min *int64
- Low *int64
- High *int64
- Scanned *int64
- Spanned *int64
- Present *int64
- Managed *int64
- NrActiveAnon *int64
- NrInactiveAnon *int64
- NrIsolatedAnon *int64
- NrAnonPages *int64
- NrAnonTransparentHugepages *int64
- NrActiveFile *int64
- NrInactiveFile *int64
- NrIsolatedFile *int64
- NrFilePages *int64
- NrSlabReclaimable *int64
- NrSlabUnreclaimable *int64
- NrMlockStack *int64
- NrKernelStack *int64
- NrMapped *int64
- NrDirty *int64
- NrWriteback *int64
- NrUnevictable *int64
- NrShmem *int64
- NrDirtied *int64
- NrWritten *int64
- NumaHit *int64
- NumaMiss *int64
- NumaForeign *int64
- NumaInterleave *int64
- NumaLocal *int64
- NumaOther *int64
- Protection []*int64
-}
-
-var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
-
-// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of
-// structs containing the relevant info. More information available here:
-// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
-func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
- data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo"))
- if err != nil {
- return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
- }
- zoneinfo, err := parseZoneinfo(data)
- if err != nil {
- return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
- }
- return zoneinfo, nil
-}
-
-func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) {
-
- zoneinfo := []Zoneinfo{}
-
- zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode"))
- for _, block := range zoneinfoBlocks {
- var zoneinfoElement Zoneinfo
- lines := strings.Split(string(block), "\n")
- for _, line := range lines {
-
- if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil {
- zoneinfoElement.Node = nodeZone[1]
- zoneinfoElement.Zone = nodeZone[2]
- continue
- }
- if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
- zoneinfoElement.Zone = ""
- continue
- }
- parts := strings.Fields(strings.TrimSpace(line))
- if len(parts) < 2 {
- continue
- }
- vp := util.NewValueParser(parts[1])
- switch parts[0] {
- case "nr_free_pages":
- zoneinfoElement.NrFreePages = vp.PInt64()
- case "min":
- zoneinfoElement.Min = vp.PInt64()
- case "low":
- zoneinfoElement.Low = vp.PInt64()
- case "high":
- zoneinfoElement.High = vp.PInt64()
- case "scanned":
- zoneinfoElement.Scanned = vp.PInt64()
- case "spanned":
- zoneinfoElement.Spanned = vp.PInt64()
- case "present":
- zoneinfoElement.Present = vp.PInt64()
- case "managed":
- zoneinfoElement.Managed = vp.PInt64()
- case "nr_active_anon":
- zoneinfoElement.NrActiveAnon = vp.PInt64()
- case "nr_inactive_anon":
- zoneinfoElement.NrInactiveAnon = vp.PInt64()
- case "nr_isolated_anon":
- zoneinfoElement.NrIsolatedAnon = vp.PInt64()
- case "nr_anon_pages":
- zoneinfoElement.NrAnonPages = vp.PInt64()
- case "nr_anon_transparent_hugepages":
- zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64()
- case "nr_active_file":
- zoneinfoElement.NrActiveFile = vp.PInt64()
- case "nr_inactive_file":
- zoneinfoElement.NrInactiveFile = vp.PInt64()
- case "nr_isolated_file":
- zoneinfoElement.NrIsolatedFile = vp.PInt64()
- case "nr_file_pages":
- zoneinfoElement.NrFilePages = vp.PInt64()
- case "nr_slab_reclaimable":
- zoneinfoElement.NrSlabReclaimable = vp.PInt64()
- case "nr_slab_unreclaimable":
- zoneinfoElement.NrSlabUnreclaimable = vp.PInt64()
- case "nr_mlock_stack":
- zoneinfoElement.NrMlockStack = vp.PInt64()
- case "nr_kernel_stack":
- zoneinfoElement.NrKernelStack = vp.PInt64()
- case "nr_mapped":
- zoneinfoElement.NrMapped = vp.PInt64()
- case "nr_dirty":
- zoneinfoElement.NrDirty = vp.PInt64()
- case "nr_writeback":
- zoneinfoElement.NrWriteback = vp.PInt64()
- case "nr_unevictable":
- zoneinfoElement.NrUnevictable = vp.PInt64()
- case "nr_shmem":
- zoneinfoElement.NrShmem = vp.PInt64()
- case "nr_dirtied":
- zoneinfoElement.NrDirtied = vp.PInt64()
- case "nr_written":
- zoneinfoElement.NrWritten = vp.PInt64()
- case "numa_hit":
- zoneinfoElement.NumaHit = vp.PInt64()
- case "numa_miss":
- zoneinfoElement.NumaMiss = vp.PInt64()
- case "numa_foreign":
- zoneinfoElement.NumaForeign = vp.PInt64()
- case "numa_interleave":
- zoneinfoElement.NumaInterleave = vp.PInt64()
- case "numa_local":
- zoneinfoElement.NumaLocal = vp.PInt64()
- case "numa_other":
- zoneinfoElement.NumaOther = vp.PInt64()
- case "protection:":
- protectionParts := strings.Split(line, ":")
- protectionValues := strings.Replace(protectionParts[1], "(", "", 1)
- protectionValues = strings.Replace(protectionValues, ")", "", 1)
- protectionValues = strings.TrimSpace(protectionValues)
- protectionStringMap := strings.Split(protectionValues, ", ")
- val, err := util.ParsePInt64s(protectionStringMap)
- if err == nil {
- zoneinfoElement.Protection = val
- }
- }
-
- }
-
- zoneinfo = append(zoneinfo, zoneinfoElement)
- }
- return zoneinfo, nil
-}
diff --git a/vendor/github.com/seccomp/libseccomp-golang/.golangci.yml b/vendor/github.com/seccomp/libseccomp-golang/.golangci.yml
new file mode 100644
index 00000000000..7df8aa19838
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/.golangci.yml
@@ -0,0 +1,4 @@
+# For documentation, see https://golangci-lint.run/usage/configuration/
+linters:
+ enable:
+ - gofumpt
diff --git a/vendor/github.com/seccomp/libseccomp-golang/.travis.yml b/vendor/github.com/seccomp/libseccomp-golang/.travis.yml
deleted file mode 100644
index feef144d130..00000000000
--- a/vendor/github.com/seccomp/libseccomp-golang/.travis.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-# Travis CI configuration for libseccomp-golang
-
-# https://docs.travis-ci.com/user/reference/bionic
-# https://wiki.ubuntu.com/Releases
-
-dist: bionic
-sudo: false
-
-notifications:
- email:
- on_success: always
- on_failure: always
-
-arch:
- - amd64
-
-os:
- - linux
-
-language: go
-
-addons:
- apt:
- packages:
- - build-essential
- # TODO: use the main libseccomp git repo instead of a distro package
- - libseccomp2
- - libseccomp-dev
-
-install:
- - go get -u golang.org/x/lint/golint
-
-# run all of the tests independently, fail if any of the tests error
-script:
- - make check-syntax
- - make lint
- - make check
diff --git a/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md b/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md
index d6862cbd5f9..c2fc80d5af6 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md
+++ b/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md
@@ -1,31 +1,23 @@
-How to Submit Patches to the libseccomp Project
+How to Submit Patches to the libseccomp-golang Project
===============================================================================
https://github.com/seccomp/libseccomp-golang
This document is intended to act as a guide to help you contribute to the
-libseccomp project. It is not perfect, and there will always be exceptions
-to the rules described here, but by following the instructions below you
-should have a much easier time getting your work merged with the upstream
+libseccomp-golang project. It is not perfect, and there will always be
+exceptions to the rules described here, but by following the instructions below
+you should have a much easier time getting your work merged with the upstream
project.
## Test Your Code Using Existing Tests
-There are two possible tests you can run to verify your code. The first
-test is used to check the formatting and coding style of your changes, you
-can run the test with the following command:
-
- # make check-syntax
-
-... if there are any problems with your changes a diff/patch will be shown
-which indicates the problems and how to fix them.
-
-The second possible test is used to ensure the sanity of your code changes
-and to test these changes against the included tests. You can run the test
-with the following command:
+A number of tests and lint related recipes are provided in the Makefile, if
+you want to run the standard regression tests, you can execute the following:
# make check
-... if there are any faults or errors they will be displayed.
+In order to use it, the 'golangci-lint' tool is needed, which can be found at:
+
+* https://github.com/golangci/golangci-lint
## Add New Tests for New Functionality
diff --git a/vendor/github.com/seccomp/libseccomp-golang/Makefile b/vendor/github.com/seccomp/libseccomp-golang/Makefile
index 1ff4cc89859..530f5b4adbc 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/Makefile
+++ b/vendor/github.com/seccomp/libseccomp-golang/Makefile
@@ -4,7 +4,7 @@
all: check-build
-check: vet test
+check: lint test
check-build:
go build
@@ -16,11 +16,16 @@ fix-syntax:
gofmt -w .
vet:
- go vet -v
+ go vet -v ./...
+
+# Previous bugs have made the tests freeze until the timeout. Golang default
+# timeout for tests is 10 minutes, which is too long, considering current tests
+# can be executed in less than 1 second. Reduce the timeout, so problems can
+# be noticed earlier in the CI.
+TEST_TIMEOUT=10s
test:
- go test -v
+ go test -v -timeout $(TEST_TIMEOUT)
lint:
- @$(if $(shell which golint),true,$(error "install golint and include it in your PATH"))
- golint -set_exit_status
+ golangci-lint run .
diff --git a/vendor/github.com/seccomp/libseccomp-golang/README.md b/vendor/github.com/seccomp/libseccomp-golang/README.md
index 27423f2d97f..6430f1c9e25 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/README.md
+++ b/vendor/github.com/seccomp/libseccomp-golang/README.md
@@ -2,7 +2,9 @@
===============================================================================
https://github.com/seccomp/libseccomp-golang
-[](https://travis-ci.org/seccomp/libseccomp-golang)
+[](https://pkg.go.dev/github.com/seccomp/libseccomp-golang)
+[](https://github.com/seccomp/libseccomp-golang/actions/workflows/validate.yml)
+[](https://github.com/seccomp/libseccomp-golang/actions/workflows/test.yml)
The libseccomp library provides an easy to use, platform independent, interface
to the Linux Kernel's syscall filtering mechanism. The libseccomp API is
@@ -26,26 +28,14 @@ list.
* https://groups.google.com/d/forum/libseccomp
-Documentation is also available at:
+Documentation for this package is also available at:
-* https://godoc.org/github.com/seccomp/libseccomp-golang
+* https://pkg.go.dev/github.com/seccomp/libseccomp-golang
## Installing the package
-The libseccomp-golang bindings require at least Go v1.2.1 and GCC v4.8.4;
-earlier versions may yield unpredictable results. If you meet these
-requirements you can install this package using the command below:
-
# go get github.com/seccomp/libseccomp-golang
-## Testing the Library
-
-A number of tests and lint related recipes are provided in the Makefile, if
-you want to run the standard regression tests, you can excute the following:
-
- # make check
-
-In order to execute the 'make lint' recipe the 'golint' tool is needed, it
-can be found at:
+## Contributing
-* https://github.com/golang/lint
+See [CONTRIBUTING.md](CONTRIBUTING.md).
diff --git a/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md b/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md
new file mode 100644
index 00000000000..c448faa8e80
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md
@@ -0,0 +1,47 @@
+The libseccomp-golang Security Vulnerability Handling Process
+===============================================================================
+https://github.com/seccomp/libseccomp-golang
+
+This document document attempts to describe the processes through which
+sensitive security relevant bugs can be responsibly disclosed to the
+libseccomp-golang project and how the project maintainers should handle these
+reports. Just like the other libseccomp-golang process documents, this
+document should be treated as a guiding document and not a hard, unyielding set
+of regulations; the bug reporters and project maintainers are encouraged to
+work together to address the issues as best they can, in a manner which works
+best for all parties involved.
+
+### Reporting Problems
+
+Problems with the libseccomp-golang library that are not suitable for immediate
+public disclosure should be emailed to the current libseccomp-golang
+maintainers, the list is below. We typically request at most a 90 day time
+period to address the issue before it is made public, but we will make every
+effort to address the issue as quickly as possible and shorten the disclosure
+window.
+
+* Paul Moore, paul@paul-moore.com
+* Tom Hromatka, tom.hromatka@oracle.com
+
+### Resolving Sensitive Security Issues
+
+Upon disclosure of a bug, the maintainers should work together to investigate
+the problem and decide on a solution. In order to prevent an early disclosure
+of the problem, those working on the solution should do so privately and
+outside of the traditional libseccomp-golang development practices. One
+possible solution to this is to leverage the GitHub "Security" functionality to
+create a private development fork that can be shared among the maintainers, and
+optionally the reporter. A placeholder GitHub issue may be created, but
+details should remain extremely limited until such time as the problem has been
+fixed and responsibly disclosed. If a CVE, or other tag, has been assigned to
+the problem, the GitHub issue title should include the vulnerability tag once
+the problem has been disclosed.
+
+### Public Disclosure
+
+Whenever possible, responsible reporting and patching practices should be
+followed, including notification to the linux-distros and oss-security mailing
+lists.
+
+* https://oss-security.openwall.org/wiki/mailing-lists/distros
+* https://oss-security.openwall.org/wiki/mailing-lists/oss-security
diff --git a/vendor/github.com/seccomp/libseccomp-golang/go.sum b/vendor/github.com/seccomp/libseccomp-golang/go.sum
index 72ae1611149..e69de29bb2d 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/go.sum
+++ b/vendor/github.com/seccomp/libseccomp-golang/go.sum
@@ -1,23 +0,0 @@
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 h1:EBZoQjiKKPaLbPrbpssUfuHtwM6KV/vb4U85g/cigFY=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200313205530-4303120df7d8 h1:gkI/wGGwpcG5W4hLCzZNGxA4wzWBGGDStRI1MrjDl2Q=
-golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
index e489b9ebd4a..8dad12fdbb9 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
+++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
@@ -1,5 +1,3 @@
-// +build linux
-
// Public API specification for libseccomp Go bindings
// Contains public API for the bindings
@@ -18,35 +16,36 @@ import (
"unsafe"
)
-// C wrapping code
-
-// #cgo pkg-config: libseccomp
// #include
// #include
import "C"
// Exported types
-// VersionError denotes that the system libseccomp version is incompatible
-// with this package.
+// VersionError represents an error when either the system libseccomp version
+// or the kernel version is too old to perform the operation requested.
type VersionError struct {
- message string
- minimum string
+ op string // operation that failed or would fail
+ major, minor, micro uint // minimally required libseccomp version
+ curAPI, minAPI uint // current and minimally required API versions
+}
+
+func init() {
+ // This forces the cgo libseccomp to initialize its internal API support state,
+ // which is necessary on older versions of libseccomp in order to work
+ // correctly.
+ _, _ = getAPI()
}
func (e VersionError) Error() string {
- format := "Libseccomp version too low: "
- if e.message != "" {
- format += e.message + ": "
+ if e.minAPI != 0 {
+ return fmt.Sprintf("%s requires libseccomp >= %d.%d.%d and API level >= %d "+
+ "(current version: %d.%d.%d, API level: %d)",
+ e.op, e.major, e.minor, e.micro, e.minAPI,
+ verMajor, verMinor, verMicro, e.curAPI)
}
- format += "minimum supported is "
- if e.minimum != "" {
- format += e.minimum + ": "
- } else {
- format += "2.2.0: "
- }
- format += "detected %d.%d.%d"
- return fmt.Sprintf(format, verMajor, verMinor, verMicro)
+ return fmt.Sprintf("%s requires libseccomp >= %d.%d.%d (current version: %d.%d.%d)",
+ e.op, e.major, e.minor, e.micro, verMajor, verMinor, verMicro)
}
// ScmpArch represents a CPU architecture. Seccomp can restrict syscalls on a
@@ -69,9 +68,61 @@ type ScmpCondition struct {
Operand2 uint64 `json:"operand_two,omitempty"`
}
-// ScmpSyscall represents a Linux System Call
+// Seccomp userspace notification structures associated with filters that use the ActNotify action.
+
+// ScmpSyscall identifies a Linux System Call by its number.
type ScmpSyscall int32
+// ScmpFd represents a file-descriptor used for seccomp userspace notifications.
+type ScmpFd int32
+
+// ScmpNotifData describes the system call context that triggered a notification.
+//
+// Syscall: the syscall number
+// Arch: the filter architecture
+// InstrPointer: address of the instruction that triggered a notification
+// Args: arguments (up to 6) for the syscall
+//
+type ScmpNotifData struct {
+ Syscall ScmpSyscall `json:"syscall,omitempty"`
+ Arch ScmpArch `json:"arch,omitempty"`
+ InstrPointer uint64 `json:"instr_pointer,omitempty"`
+ Args []uint64 `json:"args,omitempty"`
+}
+
+// ScmpNotifReq represents a seccomp userspace notification. See NotifReceive() for
+// info on how to pull such a notification.
+//
+// ID: notification ID
+// Pid: process that triggered the notification event
+// Flags: filter flags (see seccomp(2))
+// Data: system call context that triggered the notification
+//
+type ScmpNotifReq struct {
+ ID uint64 `json:"id,omitempty"`
+ Pid uint32 `json:"pid,omitempty"`
+ Flags uint32 `json:"flags,omitempty"`
+ Data ScmpNotifData `json:"data,omitempty"`
+}
+
+// ScmpNotifResp represents a seccomp userspace notification response. See NotifRespond()
+// for info on how to push such a response.
+//
+// ID: notification ID (must match the corresponding ScmpNotifReq ID)
+// Error: must be 0 if no error occurred, or an error constant from package
+// syscall (e.g., syscall.EPERM, etc). In the latter case, it's used
+// as an error return from the syscall that created the notification.
+// Val: return value for the syscall that created the notification. Only
+// relevant if Error is 0.
+// Flags: userspace notification response flag (e.g., NotifRespFlagContinue)
+//
+type ScmpNotifResp struct {
+ ID uint64 `json:"id,omitempty"`
+ Error int32 `json:"error,omitempty"`
+ Val uint64 `json:"val,omitempty"`
+ Flags uint32 `json:"flags,omitempty"`
+}
+
// Exported Constants
const (
@@ -83,40 +134,46 @@ const (
// variables are invalid
ArchInvalid ScmpArch = iota
// ArchNative is the native architecture of the kernel
- ArchNative ScmpArch = iota
+ ArchNative
// ArchX86 represents 32-bit x86 syscalls
- ArchX86 ScmpArch = iota
+ ArchX86
// ArchAMD64 represents 64-bit x86-64 syscalls
- ArchAMD64 ScmpArch = iota
+ ArchAMD64
// ArchX32 represents 64-bit x86-64 syscalls (32-bit pointers)
- ArchX32 ScmpArch = iota
+ ArchX32
// ArchARM represents 32-bit ARM syscalls
- ArchARM ScmpArch = iota
+ ArchARM
// ArchARM64 represents 64-bit ARM syscalls
- ArchARM64 ScmpArch = iota
+ ArchARM64
// ArchMIPS represents 32-bit MIPS syscalls
- ArchMIPS ScmpArch = iota
+ ArchMIPS
// ArchMIPS64 represents 64-bit MIPS syscalls
- ArchMIPS64 ScmpArch = iota
+ ArchMIPS64
// ArchMIPS64N32 represents 64-bit MIPS syscalls (32-bit pointers)
- ArchMIPS64N32 ScmpArch = iota
+ ArchMIPS64N32
// ArchMIPSEL represents 32-bit MIPS syscalls (little endian)
- ArchMIPSEL ScmpArch = iota
+ ArchMIPSEL
// ArchMIPSEL64 represents 64-bit MIPS syscalls (little endian)
- ArchMIPSEL64 ScmpArch = iota
+ ArchMIPSEL64
// ArchMIPSEL64N32 represents 64-bit MIPS syscalls (little endian,
// 32-bit pointers)
- ArchMIPSEL64N32 ScmpArch = iota
+ ArchMIPSEL64N32
// ArchPPC represents 32-bit POWERPC syscalls
- ArchPPC ScmpArch = iota
+ ArchPPC
// ArchPPC64 represents 64-bit POWER syscalls (big endian)
- ArchPPC64 ScmpArch = iota
+ ArchPPC64
// ArchPPC64LE represents 64-bit POWER syscalls (little endian)
- ArchPPC64LE ScmpArch = iota
+ ArchPPC64LE
// ArchS390 represents 31-bit System z/390 syscalls
- ArchS390 ScmpArch = iota
+ ArchS390
// ArchS390X represents 64-bit System z/390 syscalls
- ArchS390X ScmpArch = iota
+ ArchS390X
+ // ArchPARISC represents 32-bit PA-RISC
+ ArchPARISC
+ // ArchPARISC64 represents 64-bit PA-RISC
+ ArchPARISC64
+ // ArchRISCV64 represents RISCV64
+ ArchRISCV64
)
const (
@@ -125,31 +182,36 @@ const (
// ActInvalid is a placeholder to ensure uninitialized ScmpAction
// variables are invalid
ActInvalid ScmpAction = iota
- // ActKill kills the thread that violated the rule. It is the same as ActKillThread.
+ // ActKillThread kills the thread that violated the rule.
// All other threads from the same thread group will continue to execute.
- ActKill ScmpAction = iota
+ ActKillThread
// ActTrap throws SIGSYS
- ActTrap ScmpAction = iota
+ ActTrap
+ // ActNotify triggers a userspace notification. This action is only usable when
+ // libseccomp API level 6 or higher is supported.
+ ActNotify
// ActErrno causes the syscall to return a negative error code. This
// code can be set with the SetReturnCode method
- ActErrno ScmpAction = iota
+ ActErrno
// ActTrace causes the syscall to notify tracing processes with the
// given error code. This code can be set with the SetReturnCode method
- ActTrace ScmpAction = iota
+ ActTrace
// ActAllow permits the syscall to continue execution
- ActAllow ScmpAction = iota
+ ActAllow
// ActLog permits the syscall to continue execution after logging it.
// This action is only usable when libseccomp API level 3 or higher is
// supported.
- ActLog ScmpAction = iota
- // ActKillThread kills the thread that violated the rule. It is the same as ActKill.
- // All other threads from the same thread group will continue to execute.
- ActKillThread ScmpAction = iota
+ ActLog
// ActKillProcess kills the process that violated the rule.
// All threads in the thread group are also terminated.
// This action is only usable when libseccomp API level 3 or higher is
// supported.
- ActKillProcess ScmpAction = iota
+ ActKillProcess
+ // ActKill kills the thread that violated the rule.
+ // All other threads from the same thread group will continue to execute.
+ //
+ // Deprecated: use ActKillThread
+ ActKill = ActKillThread
)
const (
@@ -162,23 +224,37 @@ const (
CompareInvalid ScmpCompareOp = iota
// CompareNotEqual returns true if the argument is not equal to the
// given value
- CompareNotEqual ScmpCompareOp = iota
+ CompareNotEqual
// CompareLess returns true if the argument is less than the given value
- CompareLess ScmpCompareOp = iota
+ CompareLess
// CompareLessOrEqual returns true if the argument is less than or equal
// to the given value
- CompareLessOrEqual ScmpCompareOp = iota
+ CompareLessOrEqual
// CompareEqual returns true if the argument is equal to the given value
- CompareEqual ScmpCompareOp = iota
+ CompareEqual
// CompareGreaterEqual returns true if the argument is greater than or
// equal to the given value
- CompareGreaterEqual ScmpCompareOp = iota
+ CompareGreaterEqual
// CompareGreater returns true if the argument is greater than the given
// value
- CompareGreater ScmpCompareOp = iota
- // CompareMaskedEqual returns true if the argument is equal to the given
- // value, when masked (bitwise &) against the second given value
- CompareMaskedEqual ScmpCompareOp = iota
+ CompareGreater
+ // CompareMaskedEqual returns true if the masked argument value is
+ // equal to the masked datum value. Mask is the first argument, and
+ // datum is the second one.
+ CompareMaskedEqual
+)
+
+// ErrSyscallDoesNotExist represents an error condition where
+// libseccomp is unable to resolve the syscall
+var ErrSyscallDoesNotExist = fmt.Errorf("could not resolve syscall name")
+
+const (
+ // Userspace notification response flags
+
+ // NotifRespFlagContinue tells the kernel to continue executing the system
+ // call that triggered the notification. Must only be used when the notification
+ // response's error is 0.
+ NotifRespFlagContinue uint32 = 1
)
// Helpers for types
@@ -223,6 +299,12 @@ func GetArchFromString(arch string) (ScmpArch, error) {
return ArchS390, nil
case "s390x":
return ArchS390X, nil
+ case "parisc":
+ return ArchPARISC, nil
+ case "parisc64":
+ return ArchPARISC64, nil
+ case "riscv64":
+ return ArchRISCV64, nil
default:
return ArchInvalid, fmt.Errorf("cannot convert unrecognized string %q", arch)
}
@@ -263,6 +345,12 @@ func (a ScmpArch) String() string {
return "s390"
case ArchS390X:
return "s390x"
+ case ArchPARISC:
+ return "parisc"
+ case ArchPARISC64:
+ return "parisc64"
+ case ArchRISCV64:
+ return "riscv64"
case ArchNative:
return "native"
case ArchInvalid:
@@ -299,7 +387,7 @@ func (a ScmpCompareOp) String() string {
// String returns a string representation of a seccomp match action
func (a ScmpAction) String() string {
switch a & 0xFFFF {
- case ActKill, ActKillThread:
+ case ActKillThread:
return "Action: Kill thread"
case ActKillProcess:
return "Action: Kill process"
@@ -310,6 +398,8 @@ func (a ScmpAction) String() string {
case ActTrace:
return fmt.Sprintf("Action: Notify tracing processes with code %d",
(a >> 16))
+ case ActNotify:
+ return "Action: Notify userspace"
case ActLog:
return "Action: Log system call"
case ActAllow:
@@ -349,7 +439,7 @@ func GetLibraryVersion() (major, minor, micro uint) {
// Returns a positive int containing the API level, or 0 with an error if the
// API level could not be detected due to the library being older than v2.4.0.
// See the seccomp_api_get(3) man page for details on available API levels:
-// https://github.com/seccomp/libseccomp/blob/master/doc/man/man3/seccomp_api_get.3
+// https://github.com/seccomp/libseccomp/blob/main/doc/man/man3/seccomp_api_get.3
func GetAPI() (uint, error) {
return getAPI()
}
@@ -359,7 +449,7 @@ func GetAPI() (uint, error) {
// Returns an error if the API level could not be set. An error is always
// returned if the library is older than v2.4.0
// See the seccomp_api_get(3) man page for details on available API levels:
-// https://github.com/seccomp/libseccomp/blob/master/doc/man/man3/seccomp_api_get.3
+// https://github.com/seccomp/libseccomp/blob/main/doc/man/man3/seccomp_api_get.3
func SetAPI(api uint) error {
return setAPI(api)
}
@@ -386,7 +476,7 @@ func (s ScmpSyscall) GetNameByArch(arch ScmpArch) (string, error) {
cString := C.seccomp_syscall_resolve_num_arch(arch.toNative(), C.int(s))
if cString == nil {
- return "", fmt.Errorf("could not resolve syscall name for %#x", int32(s))
+ return "", ErrSyscallDoesNotExist
}
defer C.free(unsafe.Pointer(cString))
@@ -409,7 +499,7 @@ func GetSyscallFromName(name string) (ScmpSyscall, error) {
result := C.seccomp_syscall_resolve_name(cString)
if result == scmpError {
- return 0, fmt.Errorf("could not resolve name to syscall: %q", name)
+ return 0, ErrSyscallDoesNotExist
}
return ScmpSyscall(result), nil
@@ -433,7 +523,7 @@ func GetSyscallFromNameByArch(name string, arch ScmpArch) (ScmpSyscall, error) {
result := C.seccomp_syscall_resolve_name_arch(arch.toNative(), cString)
if result == scmpError {
- return 0, fmt.Errorf("could not resolve name to syscall: %q on %v", name, arch)
+ return 0, ErrSyscallDoesNotExist
}
return ScmpSyscall(result), nil
@@ -459,8 +549,8 @@ func MakeCondition(arg uint, comparison ScmpCompareOp, values ...uint64) (ScmpCo
return condStruct, err
}
- if comparison == CompareInvalid {
- return condStruct, fmt.Errorf("invalid comparison operator")
+ if err := sanitizeCompareOp(comparison); err != nil {
+ return condStruct, err
} else if arg > 5 {
return condStruct, fmt.Errorf("syscalls only have up to 6 arguments (%d given)", arg)
} else if len(values) > 2 {
@@ -506,11 +596,10 @@ type ScmpFilter struct {
lock sync.Mutex
}
-// NewFilter creates and returns a new filter context.
-// Accepts a default action to be taken for syscalls which match no rules in
-// the filter.
-// Returns a reference to a valid filter context, or nil and an error if the
-// filter context could not be created or an invalid default action was given.
+// NewFilter creates and returns a new filter context. Accepts a default action to be
+// taken for syscalls which match no rules in the filter.
+// Returns a reference to a valid filter context, or nil and an error
+// if the filter context could not be created or an invalid default action was given.
func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) {
if err := ensureSupportedVersion(); err != nil {
return nil, err
@@ -530,8 +619,8 @@ func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) {
filter.valid = true
runtime.SetFinalizer(filter, filterFinalizer)
- // Enable TSync so all goroutines will receive the same rules
- // If the kernel does not support TSYNC, allow us to continue without error
+ // Enable TSync so all goroutines will receive the same rules.
+ // If the kernel does not support TSYNC, allow us to continue without error.
if err := filter.setFilterAttr(filterAttrTsync, 0x1); err != nil && err != syscall.ENOTSUP {
filter.Release()
return nil, fmt.Errorf("could not create filter - error setting tsync bit: %v", err)
@@ -778,9 +867,8 @@ func (f *ScmpFilter) GetNoNewPrivsBit() (bool, error) {
func (f *ScmpFilter) GetLogBit() (bool, error) {
log, err := f.getFilterAttr(filterAttrLog)
if err != nil {
- api, apiErr := getAPI()
- if (apiErr != nil && api == 0) || (apiErr == nil && api < 3) {
- return false, fmt.Errorf("getting the log bit is only supported in libseccomp 2.4.0 and newer with API level 3 or higher")
+ if e := checkAPI("GetLogBit", 3, 2, 4, 0); e != nil {
+ err = e
}
return false, err
@@ -793,6 +881,65 @@ func (f *ScmpFilter) GetLogBit() (bool, error) {
return true, nil
}
+// GetSSB returns the current state the SSB bit will be set to on the filter
+// being loaded, or an error if an issue was encountered retrieving the value.
+// The SSB bit tells the kernel that a seccomp user is not interested in enabling
+// Speculative Store Bypass mitigation.
+// The SSB bit is only usable when libseccomp API level 4 or higher is
+// supported.
+func (f *ScmpFilter) GetSSB() (bool, error) {
+ ssb, err := f.getFilterAttr(filterAttrSSB)
+ if err != nil {
+ if e := checkAPI("GetSSB", 4, 2, 5, 0); e != nil {
+ err = e
+ }
+
+ return false, err
+ }
+
+ if ssb == 0 {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// GetOptimize returns the current optimization level of the filter,
+// or an error if an issue was encountered retrieving the value.
+// See SetOptimize for more details.
+func (f *ScmpFilter) GetOptimize() (int, error) {
+ level, err := f.getFilterAttr(filterAttrOptimize)
+ if err != nil {
+ if e := checkAPI("GetOptimize", 4, 2, 5, 0); e != nil {
+ err = e
+ }
+
+ return 0, err
+ }
+
+ return int(level), nil
+}
+
+// GetRawRC returns the current state of RawRC flag, or an error
+// if an issue was encountered retrieving the value.
+// See SetRawRC for more details.
+func (f *ScmpFilter) GetRawRC() (bool, error) {
+ rawrc, err := f.getFilterAttr(filterAttrRawRC)
+ if err != nil {
+ if e := checkAPI("GetRawRC", 4, 2, 5, 0); e != nil {
+ err = e
+ }
+
+ return false, err
+ }
+
+ if rawrc == 0 {
+ return false, nil
+ }
+
+ return true, nil
+}
+
// SetBadArchAction sets the default action taken on a syscall for an
// architecture not in the filter, or an error if an issue was encountered
// setting the value.
@@ -832,9 +979,73 @@ func (f *ScmpFilter) SetLogBit(state bool) error {
err := f.setFilterAttr(filterAttrLog, toSet)
if err != nil {
- api, apiErr := getAPI()
- if (apiErr != nil && api == 0) || (apiErr == nil && api < 3) {
- return fmt.Errorf("setting the log bit is only supported in libseccomp 2.4.0 and newer with API level 3 or higher")
+ if e := checkAPI("SetLogBit", 3, 2, 4, 0); e != nil {
+ err = e
+ }
+ }
+
+ return err
+}
+
+// SetSSB sets the state of the SSB bit, which will be applied on filter
+// load, or an error if an issue was encountered setting the value.
+// The SSB bit is only usable when libseccomp API level 4 or higher is
+// supported.
+func (f *ScmpFilter) SetSSB(state bool) error {
+ var toSet C.uint32_t = 0x0
+
+ if state {
+ toSet = 0x1
+ }
+
+ err := f.setFilterAttr(filterAttrSSB, toSet)
+ if err != nil {
+ if e := checkAPI("SetSSB", 4, 2, 5, 0); e != nil {
+ err = e
+ }
+ }
+
+ return err
+}
+
+// SetOptimize sets optimization level of the seccomp filter. By default
+// libseccomp generates a set of sequential "if" statements for each rule in
+// the filter. SetSyscallPriority can be used to prioritize the order for the
+// default cause. The binary tree optimization sorts by syscall numbers and
+// generates consistent O(log n) filter traversal for every rule in the filter.
+// The binary tree may be advantageous for large filters. Note that
+// SetSyscallPriority is ignored when level == 2.
+//
+// The different optimization levels are:
+// 0: Reserved value, not currently used.
+// 1: Rules sorted by priority and complexity (DEFAULT).
+// 2: Binary tree sorted by syscall number.
+func (f *ScmpFilter) SetOptimize(level int) error {
+ cLevel := C.uint32_t(level)
+
+ err := f.setFilterAttr(filterAttrOptimize, cLevel)
+ if err != nil {
+ if e := checkAPI("SetOptimize", 4, 2, 5, 0); e != nil {
+ err = e
+ }
+ }
+
+ return err
+}
+
+// SetRawRC sets whether libseccomp should pass system error codes back to the
+// caller, instead of the default ECANCELED. Defaults to false.
+func (f *ScmpFilter) SetRawRC(state bool) error {
+ var toSet C.uint32_t = 0x0
+
+ if state {
+ toSet = 0x1
+ }
+
+ err := f.setFilterAttr(filterAttrRawRC, toSet)
+ if err != nil {
+ if e := checkAPI("SetRawRC", 4, 2, 5, 0); e != nil {
+ err = e
}
}
@@ -885,9 +1096,6 @@ func (f *ScmpFilter) AddRuleExact(call ScmpSyscall, action ScmpAction) error {
// AddRuleConditional adds a single rule for a conditional action on a syscall.
// Returns an error if an issue was encountered adding the rule.
// All conditions must match for the rule to match.
-// There is a bug in library versions below v2.2.1 which can, in some cases,
-// cause conditions to be lost when more than one are used. Consequently,
-// AddRuleConditional is disabled on library versions lower than v2.2.1
func (f *ScmpFilter) AddRuleConditional(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error {
return f.addRuleGeneric(call, action, false, conds)
}
@@ -899,9 +1107,6 @@ func (f *ScmpFilter) AddRuleConditional(call ScmpSyscall, action ScmpAction, con
// The rule will function exactly as described, but it may not function identically
// (or be able to be applied to) all architectures.
// Returns an error if an issue was encountered adding the rule.
-// There is a bug in library versions below v2.2.1 which can, in some cases,
-// cause conditions to be lost when more than one are used. Consequently,
-// AddRuleConditionalExact is disabled on library versions lower than v2.2.1
func (f *ScmpFilter) AddRuleConditionalExact(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error {
return f.addRuleGeneric(call, action, true, conds)
}
@@ -947,3 +1152,36 @@ func (f *ScmpFilter) ExportBPF(file *os.File) error {
return nil
}
+
+// Userspace Notification API
+
+// GetNotifFd returns the userspace notification file descriptor associated with the given
+// filter context. Such a file descriptor is only valid after the filter has been loaded
+// and only when the filter uses the ActNotify action. The file descriptor can be used to
+// retrieve and respond to notifications associated with the filter (see NotifReceive(),
+// NotifRespond(), and NotifIDValid()).
+func (f *ScmpFilter) GetNotifFd() (ScmpFd, error) {
+ return f.getNotifFd()
+}
+
+// NotifReceive retrieves a seccomp userspace notification from a filter whose ActNotify
+// action has triggered. The caller is expected to process the notification and return a
+// response via NotifRespond(). Each invocation of this function returns one
+// notification. As multiple notifications may be pending at any time, this function is
+// normally called within a polling loop.
+func NotifReceive(fd ScmpFd) (*ScmpNotifReq, error) {
+ return notifReceive(fd)
+}
+
+// NotifRespond responds to a notification retrieved via NotifReceive(). The response Id
+// must match that of the corresponding notification retrieved via NotifReceive().
+func NotifRespond(fd ScmpFd, scmpResp *ScmpNotifResp) error {
+ return notifRespond(fd, scmpResp)
+}
+
+// NotifIDValid checks if a notification is still valid. An return value of nil means the
+// notification is still valid. Otherwise the notification is not valid. This can be used
+// to mitigate time-of-check-time-of-use (TOCTOU) attacks as described in seccomp_notify_id_valid(2).
+func NotifIDValid(fd ScmpFd, id uint64) error {
+ return notifIDValid(fd, id)
+}
diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
index 0982e930f9e..df4dfb7eba8 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
+++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
@@ -1,11 +1,10 @@
-// +build linux
-
// Internal functions for libseccomp Go bindings
// No exported functions
package seccomp
import (
+ "errors"
"fmt"
"syscall"
)
@@ -14,16 +13,23 @@ import (
// Get the seccomp header in scope
// Need stdlib.h for free() on cstrings
+// To compile libseccomp-golang against a specific version of libseccomp:
+// cd ../libseccomp && mkdir -p prefix
+// ./configure --prefix=$PWD/prefix && make && make install
+// cd ../libseccomp-golang
+// PKG_CONFIG_PATH=$PWD/../libseccomp/prefix/lib/pkgconfig/ make
+// LD_PRELOAD=$PWD/../libseccomp/prefix/lib/libseccomp.so.2.5.0 PKG_CONFIG_PATH=$PWD/../libseccomp/prefix/lib/pkgconfig/ make test
+
// #cgo pkg-config: libseccomp
/*
#include
#include
#include
-#if SCMP_VER_MAJOR < 2
-#error Minimum supported version of Libseccomp is v2.2.0
-#elif SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 2
-#error Minimum supported version of Libseccomp is v2.2.0
+#if (SCMP_VER_MAJOR < 2) || \
+ (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 3) || \
+ (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR == 3 && SCMP_VER_MICRO < 1)
+#error This package requires libseccomp >= v2.3.1
#endif
#define ARCH_BAD ~0
@@ -50,6 +56,18 @@ const uint32_t C_ARCH_BAD = ARCH_BAD;
#define SCMP_ARCH_S390X ARCH_BAD
#endif
+#ifndef SCMP_ARCH_PARISC
+#define SCMP_ARCH_PARISC ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_PARISC64
+#define SCMP_ARCH_PARISC64 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_RISCV64
+#define SCMP_ARCH_RISCV64 ARCH_BAD
+#endif
+
const uint32_t C_ARCH_NATIVE = SCMP_ARCH_NATIVE;
const uint32_t C_ARCH_X86 = SCMP_ARCH_X86;
const uint32_t C_ARCH_X86_64 = SCMP_ARCH_X86_64;
@@ -67,6 +85,9 @@ const uint32_t C_ARCH_PPC64 = SCMP_ARCH_PPC64;
const uint32_t C_ARCH_PPC64LE = SCMP_ARCH_PPC64LE;
const uint32_t C_ARCH_S390 = SCMP_ARCH_S390;
const uint32_t C_ARCH_S390X = SCMP_ARCH_S390X;
+const uint32_t C_ARCH_PARISC = SCMP_ARCH_PARISC;
+const uint32_t C_ARCH_PARISC64 = SCMP_ARCH_PARISC64;
+const uint32_t C_ARCH_RISCV64 = SCMP_ARCH_RISCV64;
#ifndef SCMP_ACT_LOG
#define SCMP_ACT_LOG 0x7ffc0000U
@@ -80,6 +101,10 @@ const uint32_t C_ARCH_S390X = SCMP_ARCH_S390X;
#define SCMP_ACT_KILL_THREAD 0x00000000U
#endif
+#ifndef SCMP_ACT_NOTIFY
+#define SCMP_ACT_NOTIFY 0x7fc00000U
+#endif
+
const uint32_t C_ACT_KILL = SCMP_ACT_KILL;
const uint32_t C_ACT_KILL_PROCESS = SCMP_ACT_KILL_PROCESS;
const uint32_t C_ACT_KILL_THREAD = SCMP_ACT_KILL_THREAD;
@@ -88,19 +113,29 @@ const uint32_t C_ACT_ERRNO = SCMP_ACT_ERRNO(0);
const uint32_t C_ACT_TRACE = SCMP_ACT_TRACE(0);
const uint32_t C_ACT_LOG = SCMP_ACT_LOG;
const uint32_t C_ACT_ALLOW = SCMP_ACT_ALLOW;
+const uint32_t C_ACT_NOTIFY = SCMP_ACT_NOTIFY;
// The libseccomp SCMP_FLTATR_CTL_LOG member of the scmp_filter_attr enum was
// added in v2.4.0
-#if (SCMP_VER_MAJOR < 2) || \
- (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4)
+#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4
#define SCMP_FLTATR_CTL_LOG _SCMP_FLTATR_MIN
#endif
-const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT;
-const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH;
-const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP;
-const uint32_t C_ATTRIBUTE_TSYNC = (uint32_t)SCMP_FLTATR_CTL_TSYNC;
-const uint32_t C_ATTRIBUTE_LOG = (uint32_t)SCMP_FLTATR_CTL_LOG;
+// The following SCMP_FLTATR_* were added in libseccomp v2.5.0.
+#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 5
+#define SCMP_FLTATR_CTL_SSB _SCMP_FLTATR_MIN
+#define SCMP_FLTATR_CTL_OPTIMIZE _SCMP_FLTATR_MIN
+#define SCMP_FLTATR_API_SYSRAWRC _SCMP_FLTATR_MIN
+#endif
+
+const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT;
+const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH;
+const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP;
+const uint32_t C_ATTRIBUTE_TSYNC = (uint32_t)SCMP_FLTATR_CTL_TSYNC;
+const uint32_t C_ATTRIBUTE_LOG = (uint32_t)SCMP_FLTATR_CTL_LOG;
+const uint32_t C_ATTRIBUTE_SSB = (uint32_t)SCMP_FLTATR_CTL_SSB;
+const uint32_t C_ATTRIBUTE_OPTIMIZE = (uint32_t)SCMP_FLTATR_CTL_OPTIMIZE;
+const uint32_t C_ATTRIBUTE_SYSRAWRC = (uint32_t)SCMP_FLTATR_API_SYSRAWRC;
const int C_CMP_NE = (int)SCMP_CMP_NE;
const int C_CMP_LT = (int)SCMP_CMP_LT;
@@ -147,8 +182,7 @@ unsigned int get_micro_version()
#endif
// The libseccomp API level functions were added in v2.4.0
-#if (SCMP_VER_MAJOR < 2) || \
- (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4)
+#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4
const unsigned int seccomp_api_get(void)
{
// libseccomp-golang requires libseccomp v2.2.0, at a minimum, which
@@ -189,6 +223,50 @@ void add_struct_arg_cmp(
return;
}
+
+// The seccomp notify API functions were added in v2.5.0
+#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 5
+
+struct seccomp_data {
+ int nr;
+ __u32 arch;
+ __u64 instruction_pointer;
+ __u64 args[6];
+};
+
+struct seccomp_notif {
+ __u64 id;
+ __u32 pid;
+ __u32 flags;
+ struct seccomp_data data;
+};
+
+struct seccomp_notif_resp {
+ __u64 id;
+ __s64 val;
+ __s32 error;
+ __u32 flags;
+};
+
+int seccomp_notify_alloc(struct seccomp_notif **req, struct seccomp_notif_resp **resp) {
+ return -EOPNOTSUPP;
+}
+int seccomp_notify_fd(const scmp_filter_ctx ctx) {
+ return -EOPNOTSUPP;
+}
+void seccomp_notify_free(struct seccomp_notif *req, struct seccomp_notif_resp *resp) {
+}
+int seccomp_notify_id_valid(int fd, uint64_t id) {
+ return -EOPNOTSUPP;
+}
+int seccomp_notify_receive(int fd, struct seccomp_notif *req) {
+ return -EOPNOTSUPP;
+}
+int seccomp_notify_respond(int fd, struct seccomp_notif_resp *resp) {
+ return -EOPNOTSUPP;
+}
+
+#endif
*/
import "C"
@@ -199,10 +277,13 @@ type scmpFilterAttr uint32
const (
filterAttrActDefault scmpFilterAttr = iota
- filterAttrActBadArch scmpFilterAttr = iota
- filterAttrNNP scmpFilterAttr = iota
- filterAttrTsync scmpFilterAttr = iota
- filterAttrLog scmpFilterAttr = iota
+ filterAttrActBadArch
+ filterAttrNNP
+ filterAttrTsync
+ filterAttrLog
+ filterAttrSSB
+ filterAttrOptimize
+ filterAttrRawRC
)
const (
@@ -210,9 +291,9 @@ const (
scmpError C.int = -1
// Comparison boundaries to check for architecture validity
archStart ScmpArch = ArchNative
- archEnd ScmpArch = ArchS390X
+ archEnd ScmpArch = ArchRISCV64
// Comparison boundaries to check for action validity
- actionStart ScmpAction = ActKill
+ actionStart ScmpAction = ActKillThread
actionEnd ScmpAction = ActKillProcess
// Comparison boundaries to check for comparison operator validity
compareOpStart ScmpCompareOp = CompareNotEqual
@@ -220,8 +301,9 @@ const (
)
var (
- // Error thrown on bad filter context
- errBadFilter = fmt.Errorf("filter is invalid or uninitialized")
+ // errBadFilter is thrown on bad filter context.
+ errBadFilter = errors.New("filter is invalid or uninitialized")
+ errDefAction = errors.New("requested action matches default action of filter")
// Constants representing library major, minor, and micro versions
verMajor = uint(C.get_major_version())
verMinor = uint(C.get_minor_version())
@@ -230,19 +312,28 @@ var (
// Nonexported functions
-// Check if library version is greater than or equal to the given one
-func checkVersionAbove(major, minor, micro uint) bool {
- return (verMajor > major) ||
+// checkVersion returns an error if the libseccomp version being used
+// is less than the one specified by major, minor, and micro arguments.
+// Argument op is an arbitrary non-empty operation description, which
+// is used as a part of the error message returned.
+//
+// Most users should use checkAPI instead.
+func checkVersion(op string, major, minor, micro uint) error {
+ if (verMajor > major) ||
(verMajor == major && verMinor > minor) ||
- (verMajor == major && verMinor == minor && verMicro >= micro)
+ (verMajor == major && verMinor == minor && verMicro >= micro) {
+ return nil
+ }
+ return &VersionError{
+ op: op,
+ major: major,
+ minor: minor,
+ micro: micro,
+ }
}
-// Ensure that the library is supported, i.e. >= 2.2.0.
func ensureSupportedVersion() error {
- if !checkVersionAbove(2, 2, 0) {
- return VersionError{}
- }
- return nil
+ return checkVersion("seccomp", 2, 3, 1)
}
// Get the API level
@@ -334,8 +425,10 @@ func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact b
switch e := errRc(retCode); e {
case syscall.EFAULT:
return fmt.Errorf("unrecognized syscall %#x", int32(call))
- case syscall.EPERM:
- return fmt.Errorf("requested action matches default action of filter")
+ // libseccomp >= v2.5.0 returns EACCES, older versions return EPERM.
+ // TODO: remove EPERM once libseccomp < v2.5.0 is not supported.
+ case syscall.EPERM, syscall.EACCES:
+ return errDefAction
case syscall.EINVAL:
return fmt.Errorf("two checks on same syscall argument")
default:
@@ -360,14 +453,6 @@ func (f *ScmpFilter) addRuleGeneric(call ScmpSyscall, action ScmpAction, exact b
return err
}
} else {
- // We don't support conditional filtering in library version v2.1
- if !checkVersionAbove(2, 2, 1) {
- return VersionError{
- message: "conditional filtering is not supported",
- minimum: "2.2.1",
- }
- }
-
argsArr := C.make_arg_cmp_array(C.uint(len(conds)))
if argsArr == nil {
return fmt.Errorf("error allocating memory for conditions")
@@ -460,6 +545,12 @@ func archFromNative(a C.uint32_t) (ScmpArch, error) {
return ArchS390, nil
case C.C_ARCH_S390X:
return ArchS390X, nil
+ case C.C_ARCH_PARISC:
+ return ArchPARISC, nil
+ case C.C_ARCH_PARISC64:
+ return ArchPARISC64, nil
+ case C.C_ARCH_RISCV64:
+ return ArchRISCV64, nil
default:
return 0x0, fmt.Errorf("unrecognized architecture %#x", uint32(a))
}
@@ -500,6 +591,12 @@ func (a ScmpArch) toNative() C.uint32_t {
return C.C_ARCH_S390
case ArchS390X:
return C.C_ARCH_S390X
+ case ArchPARISC:
+ return C.C_ARCH_PARISC
+ case ArchPARISC64:
+ return C.C_ARCH_PARISC64
+ case ArchRISCV64:
+ return C.C_ARCH_RISCV64
case ArchNative:
return C.C_ARCH_NATIVE
default:
@@ -532,8 +629,6 @@ func (a ScmpCompareOp) toNative() C.int {
func actionFromNative(a C.uint32_t) (ScmpAction, error) {
aTmp := a & 0xFFFF
switch a & 0xFFFF0000 {
- case C.C_ACT_KILL:
- return ActKill, nil
case C.C_ACT_KILL_PROCESS:
return ActKillProcess, nil
case C.C_ACT_KILL_THREAD:
@@ -548,6 +643,8 @@ func actionFromNative(a C.uint32_t) (ScmpAction, error) {
return ActLog, nil
case C.C_ACT_ALLOW:
return ActAllow, nil
+ case C.C_ACT_NOTIFY:
+ return ActNotify, nil
default:
return 0x0, fmt.Errorf("unrecognized action %#x", uint32(a))
}
@@ -556,8 +653,6 @@ func actionFromNative(a C.uint32_t) (ScmpAction, error) {
// Only use with sanitized actions, no error handling
func (a ScmpAction) toNative() C.uint32_t {
switch a & 0xFFFF {
- case ActKill:
- return C.C_ACT_KILL
case ActKillProcess:
return C.C_ACT_KILL_PROCESS
case ActKillThread:
@@ -572,6 +667,8 @@ func (a ScmpAction) toNative() C.uint32_t {
return C.C_ACT_LOG
case ActAllow:
return C.C_ACT_ALLOW
+ case ActNotify:
+ return C.C_ACT_NOTIFY
default:
return 0x0
}
@@ -590,7 +687,197 @@ func (a scmpFilterAttr) toNative() uint32 {
return uint32(C.C_ATTRIBUTE_TSYNC)
case filterAttrLog:
return uint32(C.C_ATTRIBUTE_LOG)
+ case filterAttrSSB:
+ return uint32(C.C_ATTRIBUTE_SSB)
+ case filterAttrOptimize:
+ return uint32(C.C_ATTRIBUTE_OPTIMIZE)
+ case filterAttrRawRC:
+ return uint32(C.C_ATTRIBUTE_SYSRAWRC)
default:
return 0x0
}
}
+
+func syscallFromNative(a C.int) ScmpSyscall {
+ return ScmpSyscall(a)
+}
+
+func notifReqFromNative(req *C.struct_seccomp_notif) (*ScmpNotifReq, error) {
+ scmpArgs := make([]uint64, 6)
+ for i := 0; i < len(scmpArgs); i++ {
+ scmpArgs[i] = uint64(req.data.args[i])
+ }
+
+ arch, err := archFromNative(req.data.arch)
+ if err != nil {
+ return nil, err
+ }
+
+ scmpData := ScmpNotifData{
+ Syscall: syscallFromNative(req.data.nr),
+ Arch: arch,
+ InstrPointer: uint64(req.data.instruction_pointer),
+ Args: scmpArgs,
+ }
+
+ scmpReq := &ScmpNotifReq{
+ ID: uint64(req.id),
+ Pid: uint32(req.pid),
+ Flags: uint32(req.flags),
+ Data: scmpData,
+ }
+
+ return scmpReq, nil
+}
+
+func (scmpResp *ScmpNotifResp) toNative(resp *C.struct_seccomp_notif_resp) {
+ resp.id = C.__u64(scmpResp.ID)
+ resp.val = C.__s64(scmpResp.Val)
+ resp.error = (C.__s32(scmpResp.Error) * -1) // kernel requires a negated value
+ resp.flags = C.__u32(scmpResp.Flags)
+}
+
+// checkAPI checks that both the API level and the seccomp version is equal to
+// or greater than the specified minLevel and major, minor, micro,
+// respectively, and returns an error otherwise. Argument op is an arbitrary
+// non-empty operation description, used as a part of the error message
+// returned.
+func checkAPI(op string, minLevel uint, major, minor, micro uint) error {
+ // Ignore error from getAPI, as it returns level == 0 in case of error.
+ level, _ := getAPI()
+ if level >= minLevel {
+ return checkVersion(op, major, minor, micro)
+ }
+ return &VersionError{
+ op: op,
+ curAPI: level,
+ minAPI: minLevel,
+ major: major,
+ minor: minor,
+ micro: micro,
+ }
+}
+
+// Userspace Notification API
+// Calls to C.seccomp_notify* hidden from seccomp.go
+
+func notifSupported() error {
+ return checkAPI("seccomp notification", 6, 2, 5, 0)
+}
+
+func (f *ScmpFilter) getNotifFd() (ScmpFd, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return -1, errBadFilter
+ }
+ if err := notifSupported(); err != nil {
+ return -1, err
+ }
+
+ fd := C.seccomp_notify_fd(f.filterCtx)
+
+ return ScmpFd(fd), nil
+}
+
+func notifReceive(fd ScmpFd) (*ScmpNotifReq, error) {
+ var req *C.struct_seccomp_notif
+ var resp *C.struct_seccomp_notif_resp
+
+ if err := notifSupported(); err != nil {
+ return nil, err
+ }
+
+ // we only use the request here; the response is unused
+ if retCode := C.seccomp_notify_alloc(&req, &resp); retCode != 0 {
+ return nil, errRc(retCode)
+ }
+
+ defer func() {
+ C.seccomp_notify_free(req, resp)
+ }()
+
+ for {
+ retCode, errno := C.seccomp_notify_receive(C.int(fd), req)
+ if retCode == 0 {
+ break
+ }
+
+ if errno == syscall.EINTR {
+ continue
+ }
+
+ if errno == syscall.ENOENT {
+ return nil, errno
+ }
+
+ return nil, errRc(retCode)
+ }
+
+ return notifReqFromNative(req)
+}
+
+func notifRespond(fd ScmpFd, scmpResp *ScmpNotifResp) error {
+ var req *C.struct_seccomp_notif
+ var resp *C.struct_seccomp_notif_resp
+
+ if err := notifSupported(); err != nil {
+ return err
+ }
+
+ // we only use the response here; the request is discarded
+ if retCode := C.seccomp_notify_alloc(&req, &resp); retCode != 0 {
+ return errRc(retCode)
+ }
+
+ defer func() {
+ C.seccomp_notify_free(req, resp)
+ }()
+
+ scmpResp.toNative(resp)
+
+ for {
+ retCode, errno := C.seccomp_notify_respond(C.int(fd), resp)
+ if retCode == 0 {
+ break
+ }
+
+ if errno == syscall.EINTR {
+ continue
+ }
+
+ if errno == syscall.ENOENT {
+ return errno
+ }
+
+ return errRc(retCode)
+ }
+
+ return nil
+}
+
+func notifIDValid(fd ScmpFd, id uint64) error {
+ if err := notifSupported(); err != nil {
+ return err
+ }
+
+ for {
+ retCode, errno := C.seccomp_notify_id_valid(C.int(fd), C.uint64_t(id))
+ if retCode == 0 {
+ break
+ }
+
+ if errno == syscall.EINTR {
+ continue
+ }
+
+ if errno == syscall.ENOENT {
+ return errno
+ }
+
+ return errRc(retCode)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt b/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt
new file mode 100644
index 00000000000..7a01c849864
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt
@@ -0,0 +1,14 @@
+
+Copyright 2021 The Sigstore Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/sigstore/sigstore/LICENSE
similarity index 99%
rename from vendor/github.com/prometheus/client_model/LICENSE
rename to vendor/github.com/sigstore/sigstore/LICENSE
index 261eeb9e9f8..d6456956733 100644
--- a/vendor/github.com/prometheus/client_model/LICENSE
+++ b/vendor/github.com/sigstore/sigstore/LICENSE
@@ -1,3 +1,4 @@
+
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go
new file mode 100644
index 00000000000..348f47bdc84
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go
@@ -0,0 +1,173 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cryptoutils implements support for working with encoded certificates, public keys, and private keys
+package cryptoutils
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ "time"
+)
+
+const (
+ // CertificatePEMType is the string "CERTIFICATE" to be used during PEM encoding and decoding
+ CertificatePEMType PEMType = "CERTIFICATE"
+)
+
+// MarshalCertificateToPEM converts the provided X509 certificate into PEM format
+func MarshalCertificateToPEM(cert *x509.Certificate) ([]byte, error) {
+ if cert == nil {
+ return nil, errors.New("nil certificate provided")
+ }
+ return PEMEncode(CertificatePEMType, cert.Raw), nil
+}
+
+// MarshalCertificatesToPEM converts the provided X509 certificates into PEM format
+func MarshalCertificatesToPEM(certs []*x509.Certificate) ([]byte, error) {
+ buf := bytes.Buffer{}
+ for _, cert := range certs {
+ pemBytes, err := MarshalCertificateToPEM(cert)
+ if err != nil {
+ return nil, err
+ }
+ _, _ = buf.Write(pemBytes)
+ }
+ return buf.Bytes(), nil
+}
+
+// UnmarshalCertificatesFromPEM extracts one or more X509 certificates from the provided
+// byte slice, which is assumed to be in PEM-encoded format.
+func UnmarshalCertificatesFromPEM(pemBytes []byte) ([]*x509.Certificate, error) {
+ result := []*x509.Certificate{}
+ remaining := pemBytes
+ remaining = bytes.TrimSpace(remaining)
+
+ for len(remaining) > 0 {
+ var certDer *pem.Block
+ certDer, remaining = pem.Decode(remaining)
+
+ if certDer == nil {
+ return nil, errors.New("error during PEM decoding")
+ }
+
+ cert, err := x509.ParseCertificate(certDer.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, cert)
+ }
+ return result, nil
+}
+
+// UnmarshalCertificatesFromPEMLimited extracts one or more X509 certificates from the provided
+// byte slice, which is assumed to be in PEM-encoded format. Fails after a specified
+// number of iterations. A reasonable limit is 10 iterations.
+func UnmarshalCertificatesFromPEMLimited(pemBytes []byte, iterations int) ([]*x509.Certificate, error) {
+ result := []*x509.Certificate{}
+ remaining := pemBytes
+ remaining = bytes.TrimSpace(remaining)
+
+ count := 0
+ for len(remaining) > 0 {
+ if count == iterations {
+ return nil, errors.New("too many certificates specified in PEM block")
+ }
+ var certDer *pem.Block
+ certDer, remaining = pem.Decode(remaining)
+
+ if certDer == nil {
+ return nil, errors.New("error during PEM decoding")
+ }
+
+ cert, err := x509.ParseCertificate(certDer.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, cert)
+ count++
+ }
+ return result, nil
+}
+
+// LoadCertificatesFromPEM extracts one or more X509 certificates from the provided
+// io.Reader.
+func LoadCertificatesFromPEM(pem io.Reader) ([]*x509.Certificate, error) {
+ fileBytes, err := io.ReadAll(pem)
+ if err != nil {
+ return nil, err
+ }
+ return UnmarshalCertificatesFromPEM(fileBytes)
+}
+
+func formatTime(t time.Time) string {
+ return t.UTC().Format(time.RFC3339)
+}
+
+// CheckExpiration verifies that epoch is during the validity period of
+// the certificate provided.
+//
+// It returns nil if issueTime < epoch < expirationTime, and error otherwise.
+func CheckExpiration(cert *x509.Certificate, epoch time.Time) error {
+ if cert == nil {
+ return errors.New("certificate is nil")
+ }
+ if cert.NotAfter.Before(epoch) {
+ return fmt.Errorf("certificate expiration time %s is before %s", formatTime(cert.NotAfter), formatTime(epoch))
+ }
+ if cert.NotBefore.After(epoch) {
+ return fmt.Errorf("certificate issued time %s is before %s", formatTime(cert.NotBefore), formatTime(epoch))
+ }
+ return nil
+}
+
+// ParseCSR parses a PKCS#10 PEM-encoded CSR.
+func ParseCSR(csr []byte) (*x509.CertificateRequest, error) {
+ derBlock, _ := pem.Decode(csr)
+ if derBlock == nil || derBlock.Bytes == nil {
+ return nil, errors.New("no CSR found while decoding")
+ }
+ correctType := false
+ acceptedHeaders := []string{"CERTIFICATE REQUEST", "NEW CERTIFICATE REQUEST"}
+ for _, v := range acceptedHeaders {
+ if derBlock.Type == v {
+ correctType = true
+ }
+ }
+ if !correctType {
+ return nil, fmt.Errorf("DER type %v is not of any type %v for CSR", derBlock.Type, acceptedHeaders)
+ }
+
+ return x509.ParseCertificateRequest(derBlock.Bytes)
+}
+
+// GenerateSerialNumber creates a compliant serial number as per RFC 5280 4.1.2.2.
+// Serial numbers must be positive, and can be no longer than 20 bytes.
+// The serial number is generated with 159 bits, so that the first bit will always
+// be 0, resulting in a positive serial number.
+func GenerateSerialNumber() (*big.Int, error) {
+ // Pick a random number from 0 to 2^159.
+ serial, err := rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil))
+ if err != nil {
+ return nil, errors.New("error generating serial number")
+ }
+ return serial, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/doc.go
similarity index 71%
rename from vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
rename to vendor/github.com/sigstore/sigstore/pkg/cryptoutils/doc.go
index 6068bd571c2..4e7e73d551b 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/doc.go
@@ -1,9 +1,11 @@
-// Copyright 2020 The Prometheus Authors
+//
+// Copyright 2022 The Sigstore Authors.
+//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -11,9 +13,5 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build linux
-// +build ppc64 ppc64le
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoPPC
+// Package cryptoutils contains utilities related to handling cryptographic materials.
+package cryptoutils
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go
similarity index 52%
rename from vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
rename to vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go
index bd55b45377d..3fa6e7ba5b5 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go
@@ -1,9 +1,11 @@
-// Copyright 2019 The Prometheus Authors
+//
+// Copyright 2021 The Sigstore Authors.
+//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -11,16 +13,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build linux,appengine !linux
-
-package util
+package cryptoutils
import (
- "fmt"
+ "encoding/pem"
)
-// SysReadFile is here implemented as a noop for builds that do not support
-// the read syscall. For example Windows, or Linux on Google App Engine.
-func SysReadFile(file string) (string, error) {
- return "", fmt.Errorf("not supported on this platform")
+// PEMType is a specific type for string constants used during PEM encoding and decoding
+type PEMType string
+
+// PEMEncode encodes the specified byte slice in PEM format using the provided type string
+func PEMEncode(typeStr PEMType, bytes []byte) []byte {
+ return pem.EncodeToMemory(&pem.Block{
+ Type: string(typeStr),
+ Bytes: bytes,
+ })
}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go
new file mode 100644
index 00000000000..89dd05e0170
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go
@@ -0,0 +1,94 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cryptoutils
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+
+ "golang.org/x/term"
+)
+
+// PassFunc is a type of function that takes a boolean (representing whether confirmation is desired) and returns the password as read, along with an error if one occurred
+type PassFunc func(bool) ([]byte, error)
+
+// Read is for fuzzing
+var Read = readPasswordFn
+
+// readPasswordFn reads the password from the following sources, in order of preference:
+//
+// - COSIGN_PASSWORD environment variable
+//
+// - user input from from terminal (if present)
+//
+// - provided to stdin from pipe
+func readPasswordFn() func() ([]byte, error) {
+ if pw, ok := os.LookupEnv("COSIGN_PASSWORD"); ok {
+ return func() ([]byte, error) {
+ return []byte(pw), nil
+ }
+ }
+ if term.IsTerminal(0) {
+ return func() ([]byte, error) {
+ return term.ReadPassword(0)
+ }
+ }
+ // Handle piped in passwords.
+ return func() ([]byte, error) {
+ return io.ReadAll(os.Stdin)
+ }
+}
+
+// StaticPasswordFunc returns a PassFunc which returns the provided password.
+func StaticPasswordFunc(pw []byte) PassFunc {
+ return func(bool) ([]byte, error) {
+ return pw, nil
+ }
+}
+
+// SkipPassword is a PassFunc that does not interact with a user, but
+// simply returns nil for both the password result and error struct.
+func SkipPassword(_ bool) ([]byte, error) {
+ return nil, nil
+}
+
+// GetPasswordFromStdIn gathers the password from stdin with an
+// optional confirmation step.
+func GetPasswordFromStdIn(confirm bool) ([]byte, error) {
+ read := Read()
+ fmt.Fprint(os.Stderr, "Enter password for private key: ")
+ pw1, err := read()
+ fmt.Fprintln(os.Stderr)
+ if err != nil {
+ return nil, err
+ }
+ if !confirm {
+ return pw1, nil
+ }
+ fmt.Fprint(os.Stderr, "Enter again: ")
+ pw2, err := read()
+ fmt.Fprintln(os.Stderr)
+ if err != nil {
+ return nil, err
+ }
+
+ if string(pw1) != string(pw2) {
+ return nil, errors.New("passwords do not match")
+ }
+ return pw1, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go
new file mode 100644
index 00000000000..b1a0dad05ea
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go
@@ -0,0 +1,152 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cryptoutils
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+
+ "github.com/theupdateframework/go-tuf/encrypted"
+)
+
+const (
+ // PrivateKeyPEMType is the string "PRIVATE KEY" to be used during PEM encoding and decoding
+ PrivateKeyPEMType PEMType = "PRIVATE KEY"
+ // ECPrivateKeyPEMType is the string "EC PRIVATE KEY" used to parse SEC 1 EC private keys
+ ECPrivateKeyPEMType PEMType = "EC PRIVATE KEY"
+ // PKCS1PrivateKeyPEMType is the string "RSA PRIVATE KEY" used to parse PKCS#1-encoded private keys
+ PKCS1PrivateKeyPEMType PEMType = "RSA PRIVATE KEY"
+ encryptedCosignPrivateKeyPEMType PEMType = "ENCRYPTED COSIGN PRIVATE KEY"
+ // EncryptedSigstorePrivateKeyPEMType is the string "ENCRYPTED SIGSTORE PRIVATE KEY" to be used during PEM encoding and decoding
+ EncryptedSigstorePrivateKeyPEMType PEMType = "ENCRYPTED SIGSTORE PRIVATE KEY"
+)
+
+func pemEncodeKeyPair(priv crypto.PrivateKey, pub crypto.PublicKey, pf PassFunc) (privPEM, pubPEM []byte, err error) {
+ pubPEM, err = MarshalPublicKeyToPEM(pub)
+ if err != nil {
+ return nil, nil, err
+ }
+ derBytes, err := MarshalPrivateKeyToDER(priv)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if pf == nil {
+ return PEMEncode(PrivateKeyPEMType, derBytes), pubPEM, nil
+ }
+ password, err := pf(true)
+ if err != nil {
+ return nil, nil, err
+ }
+ if password == nil {
+ return PEMEncode(PrivateKeyPEMType, derBytes), pubPEM, nil
+ }
+ if derBytes, err = encrypted.Encrypt(derBytes, password); err != nil {
+ return nil, nil, err
+ }
+ return PEMEncode(EncryptedSigstorePrivateKeyPEMType, derBytes), pubPEM, nil
+}
+
+// GeneratePEMEncodedECDSAKeyPair generates an ECDSA keypair, optionally password encrypted using a provided PassFunc, and PEM encoded.
+func GeneratePEMEncodedECDSAKeyPair(curve elliptic.Curve, pf PassFunc) (privPEM, pubPEM []byte, err error) {
+ priv, err := ecdsa.GenerateKey(curve, rand.Reader)
+ if err != nil {
+ return nil, nil, err
+ }
+ return pemEncodeKeyPair(priv, priv.Public(), pf)
+}
+
+// GeneratePEMEncodedRSAKeyPair generates an RSA keypair, optionally password encrypted using a provided PassFunc, and PEM encoded.
+func GeneratePEMEncodedRSAKeyPair(keyLengthBits int, pf PassFunc) (privPEM, pubPEM []byte, err error) {
+ priv, err := rsa.GenerateKey(rand.Reader, keyLengthBits)
+ if err != nil {
+ return nil, nil, err
+ }
+ return pemEncodeKeyPair(priv, priv.Public(), pf)
+}
+
+// MarshalPrivateKeyToEncryptedDER marshals the private key and encrypts the DER-encoded value using the specified password function
+func MarshalPrivateKeyToEncryptedDER(priv crypto.PrivateKey, pf PassFunc) ([]byte, error) {
+ derKey, err := MarshalPrivateKeyToDER(priv)
+ if err != nil {
+ return nil, err
+ }
+ password, err := pf(true)
+ if err != nil {
+ return nil, err
+ }
+ if password == nil {
+ return nil, errors.New("password was nil")
+ }
+ return encrypted.Encrypt(derKey, password)
+}
+
+// UnmarshalPEMToPrivateKey converts a PEM-encoded byte slice into a crypto.PrivateKey
+func UnmarshalPEMToPrivateKey(pemBytes []byte, pf PassFunc) (crypto.PrivateKey, error) {
+ derBlock, _ := pem.Decode(pemBytes)
+ if derBlock == nil {
+ return nil, errors.New("PEM decoding failed")
+ }
+ switch derBlock.Type {
+ case string(PrivateKeyPEMType):
+ return x509.ParsePKCS8PrivateKey(derBlock.Bytes)
+ case string(PKCS1PrivateKeyPEMType):
+ return x509.ParsePKCS1PrivateKey(derBlock.Bytes)
+ case string(ECPrivateKeyPEMType):
+ return x509.ParseECPrivateKey(derBlock.Bytes)
+ case string(EncryptedSigstorePrivateKeyPEMType), string(encryptedCosignPrivateKeyPEMType):
+ derBytes := derBlock.Bytes
+ if pf != nil {
+ password, err := pf(false)
+ if err != nil {
+ return nil, err
+ }
+ if password != nil {
+ derBytes, err = encrypted.Decrypt(derBytes, password)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return x509.ParsePKCS8PrivateKey(derBytes)
+ }
+ return nil, fmt.Errorf("unknown private key PEM file type: %v", derBlock.Type)
+}
+
+// MarshalPrivateKeyToDER converts a crypto.PrivateKey into a PKCS8 ASN.1 DER byte slice
+func MarshalPrivateKeyToDER(priv crypto.PrivateKey) ([]byte, error) {
+ if priv == nil {
+ return nil, errors.New("empty key")
+ }
+ return x509.MarshalPKCS8PrivateKey(priv)
+}
+
+// MarshalPrivateKeyToPEM converts a crypto.PrivateKey into a PKCS#8 PEM-encoded byte slice
+func MarshalPrivateKeyToPEM(priv crypto.PrivateKey) ([]byte, error) {
+ derBytes, err := MarshalPrivateKeyToDER(priv)
+ if err != nil {
+ return nil, err
+ }
+ return PEMEncode(PrivateKeyPEMType, derBytes), nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
new file mode 100644
index 00000000000..d2b94d4d93f
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
@@ -0,0 +1,184 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cryptoutils
+
+import (
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/sha1" // nolint:gosec
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "fmt"
+
+ "github.com/letsencrypt/boulder/goodkey"
+)
+
+const (
+ // PublicKeyPEMType is the string "PUBLIC KEY" to be used during PEM encoding and decoding
+ PublicKeyPEMType PEMType = "PUBLIC KEY"
+ // PKCS1PublicKeyPEMType is the string "RSA PUBLIC KEY" used to parse PKCS#1-encoded public keys
+ PKCS1PublicKeyPEMType PEMType = "RSA PUBLIC KEY"
+)
+
+// subjectPublicKeyInfo is used to construct a subject key ID.
+// https://tools.ietf.org/html/rfc5280#section-4.1.2.7
+type subjectPublicKeyInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ SubjectPublicKey asn1.BitString
+}
+
+// UnmarshalPEMToPublicKey converts a PEM-encoded byte slice into a crypto.PublicKey
+func UnmarshalPEMToPublicKey(pemBytes []byte) (crypto.PublicKey, error) {
+ derBytes, _ := pem.Decode(pemBytes)
+ if derBytes == nil {
+ return nil, errors.New("PEM decoding failed")
+ }
+ switch derBytes.Type {
+ case string(PublicKeyPEMType):
+ return x509.ParsePKIXPublicKey(derBytes.Bytes)
+ case string(PKCS1PublicKeyPEMType):
+ return x509.ParsePKCS1PublicKey(derBytes.Bytes)
+ default:
+ return nil, fmt.Errorf("unknown Public key PEM file type: %v. Are you passing the correct public key?",
+ derBytes.Type)
+ }
+}
+
+// MarshalPublicKeyToDER converts a crypto.PublicKey into a PKIX, ASN.1 DER byte slice
+func MarshalPublicKeyToDER(pub crypto.PublicKey) ([]byte, error) {
+ if pub == nil {
+ return nil, errors.New("empty key")
+ }
+ return x509.MarshalPKIXPublicKey(pub)
+}
+
+// MarshalPublicKeyToPEM converts a crypto.PublicKey into a PEM-encoded byte slice
+func MarshalPublicKeyToPEM(pub crypto.PublicKey) ([]byte, error) {
+ derBytes, err := MarshalPublicKeyToDER(pub)
+ if err != nil {
+ return nil, err
+ }
+ return PEMEncode(PublicKeyPEMType, derBytes), nil
+}
+
+// SKID generates a 160-bit SHA-1 hash of the value of the BIT STRING
+// subjectPublicKey (excluding the tag, length, and number of unused bits).
+// https://tools.ietf.org/html/rfc5280#section-4.2.1.2
+func SKID(pub crypto.PublicKey) ([]byte, error) {
+ derPubBytes, err := x509.MarshalPKIXPublicKey(pub)
+ if err != nil {
+ return nil, err
+ }
+ var spki subjectPublicKeyInfo
+ if _, err := asn1.Unmarshal(derPubBytes, &spki); err != nil {
+ return nil, err
+ }
+ skid := sha1.Sum(spki.SubjectPublicKey.Bytes) // nolint:gosec
+ return skid[:], nil
+}
+
+// EqualKeys compares two public keys. Supports RSA, ECDSA and ED25519.
+// If not equal, the error message contains hex-encoded SHA1 hashes of the DER-encoded keys
+func EqualKeys(first, second crypto.PublicKey) error {
+ switch pub := first.(type) {
+ case *rsa.PublicKey:
+ if !pub.Equal(second) {
+ return fmt.Errorf(genErrMsg(first, second, "rsa"))
+ }
+ case *ecdsa.PublicKey:
+ if !pub.Equal(second) {
+ return fmt.Errorf(genErrMsg(first, second, "ecdsa"))
+ }
+ case ed25519.PublicKey:
+ if !pub.Equal(second) {
+ return fmt.Errorf(genErrMsg(first, second, "ed25519"))
+ }
+ default:
+ return errors.New("unsupported key type")
+ }
+ return nil
+}
+
+// genErrMsg generates an error message for EqualKeys
+func genErrMsg(first, second crypto.PublicKey, keyType string) string {
+ msg := fmt.Sprintf("%s public keys are not equal", keyType)
+ // Calculate SKID to include in error message
+ firstSKID, err := SKID(first)
+ if err != nil {
+ return msg
+ }
+ secondSKID, err := SKID(second)
+ if err != nil {
+ return msg
+ }
+ return fmt.Sprintf("%s (%s, %s)", msg, hex.EncodeToString(firstSKID), hex.EncodeToString(secondSKID))
+}
+
+// ValidatePubKey validates the parameters of an RSA, ECDSA, or ED25519 public key.
+func ValidatePubKey(pub crypto.PublicKey) error {
+ switch pk := pub.(type) {
+ case *rsa.PublicKey:
+ // goodkey policy enforces:
+ // * Size of key: 2048 <= size <= 4096, size % 8 = 0
+ // * Exponent E = 65537 (Default exponent for OpenSSL and Golang)
+ // * Small primes check for modulus
+ // * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17)
+ // * Key is easily factored with Fermat's factorization method
+ p, err := goodkey.NewKeyPolicy(&goodkey.Config{FermatRounds: 100}, nil)
+ if err != nil {
+ // Should not occur, only chances to return errors are if fermat rounds
+ // are <0 or when loading blocked/weak keys from disk (not used here)
+ return errors.New("unable to initialize key policy")
+ }
+ // ctx is unused
+ return p.GoodKey(context.Background(), pub)
+ case *ecdsa.PublicKey:
+ // Unable to use goodkey policy because P-521 curve is not supported
+ return validateEcdsaKey(pk)
+ case ed25519.PublicKey:
+ return validateEd25519Key(pk)
+ }
+ return errors.New("unsupported public key type")
+}
+
+// Enforce that the ECDSA key curve is one of:
+// * NIST P-256 (secp256r1, prime256v1)
+// * NIST P-384
+// * NIST P-521.
+// Other EC curves, like secp256k1, are not supported by Go.
+func validateEcdsaKey(pub *ecdsa.PublicKey) error {
+ switch pub.Curve {
+ case elliptic.P224():
+ return fmt.Errorf("unsupported ec curve, expected NIST P-256, P-384, or P-521")
+ case elliptic.P256(), elliptic.P384(), elliptic.P521():
+ return nil
+ default:
+ return fmt.Errorf("unexpected ec curve")
+ }
+}
+
+// No validations currently, ED25519 supports only one key size.
+func validateEd25519Key(pub ed25519.PublicKey) error {
+ return nil
+}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/sigstore/sigstore/pkg/signature/doc.go
similarity index 72%
rename from vendor/github.com/prometheus/procfs/cpuinfo_armx.go
rename to vendor/github.com/sigstore/sigstore/pkg/signature/doc.go
index 44b590ed38f..dbd3314f6e7 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/doc.go
@@ -1,9 +1,11 @@
-// Copyright 2020 The Prometheus Authors
+//
+// Copyright 2021 The Sigstore Authors.
+//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -11,9 +13,5 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build linux
-// +build arm arm64
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoARM
+// Package signature contains types and utilities related to Sigstore signatures.
+package signature
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go b/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go
new file mode 100644
index 00000000000..dfbd5793d1b
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go
@@ -0,0 +1,244 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/sigstore/sigstore/pkg/signature/options"
+)
+
+// checked on LoadSigner, LoadVerifier and SignMessage
+var ecdsaSupportedHashFuncs = []crypto.Hash{
+ crypto.SHA256,
+ crypto.SHA512,
+ crypto.SHA384,
+ crypto.SHA224,
+}
+
+// checked on VerifySignature. Supports SHA1 verification.
+var ecdsaSupportedVerifyHashFuncs = []crypto.Hash{
+ crypto.SHA256,
+ crypto.SHA512,
+ crypto.SHA384,
+ crypto.SHA224,
+ crypto.SHA1,
+}
+
+// ECDSASigner is a signature.Signer that uses an Elliptic Curve DSA algorithm
+type ECDSASigner struct {
+ hashFunc crypto.Hash
+ priv *ecdsa.PrivateKey
+}
+
+// LoadECDSASigner calculates signatures using the specified private key and hash algorithm.
+//
+// hf must not be crypto.Hash(0).
+func LoadECDSASigner(priv *ecdsa.PrivateKey, hf crypto.Hash) (*ECDSASigner, error) {
+ if priv == nil {
+ return nil, errors.New("invalid ECDSA private key specified")
+ }
+
+ if !isSupportedAlg(hf, ecdsaSupportedHashFuncs) {
+ return nil, errors.New("invalid hash function specified")
+ }
+
+ return &ECDSASigner{
+ priv: priv,
+ hashFunc: hf,
+ }, nil
+}
+
+// SignMessage signs the provided message. If the message is provided,
+// this method will compute the digest according to the hash function specified
+// when the ECDSASigner was created.
+//
+// This function recognizes the following Options listed in order of preference:
+//
+// - WithRand()
+//
+// - WithDigest()
+//
+// - WithCryptoSignerOpts()
+//
+// All other options are ignored if specified.
+func (e ECDSASigner) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) {
+ digest, _, err := ComputeDigestForSigning(message, e.hashFunc, ecdsaSupportedHashFuncs, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ rand := selectRandFromOpts(opts...)
+
+ return ecdsa.SignASN1(rand, e.priv, digest)
+}
+
+// Public returns the public key that can be used to verify signatures created by
+// this signer.
+func (e ECDSASigner) Public() crypto.PublicKey {
+ if e.priv == nil {
+ return nil
+ }
+
+ return e.priv.Public()
+}
+
+// PublicKey returns the public key that can be used to verify signatures created by
+// this signer. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (e ECDSASigner) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return e.Public(), nil
+}
+
+// Sign computes the signature for the specified digest. If a source of entropy is
+// given in rand, it will be used instead of the default value (rand.Reader from crypto/rand).
+//
+// If opts are specified, the hash function in opts.Hash should be the one used to compute
+// digest. If opts are not specified, the value provided when the signer was created will be used instead.
+func (e ECDSASigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
+ ecdsaOpts := []SignOption{options.WithDigest(digest), options.WithRand(rand)}
+ if opts != nil {
+ ecdsaOpts = append(ecdsaOpts, options.WithCryptoSignerOpts(opts))
+ }
+
+ return e.SignMessage(nil, ecdsaOpts...)
+}
+
+// ECDSAVerifier is a signature.Verifier that uses an Elliptic Curve DSA algorithm
+type ECDSAVerifier struct {
+ publicKey *ecdsa.PublicKey
+ hashFunc crypto.Hash
+}
+
+// LoadECDSAVerifier returns a Verifier that verifies signatures using the specified
+// ECDSA public key and hash algorithm.
+//
+// hf must not be crypto.Hash(0).
+func LoadECDSAVerifier(pub *ecdsa.PublicKey, hashFunc crypto.Hash) (*ECDSAVerifier, error) {
+ if pub == nil {
+ return nil, errors.New("invalid ECDSA public key specified")
+ }
+
+ if !isSupportedAlg(hashFunc, ecdsaSupportedHashFuncs) {
+ return nil, errors.New("invalid hash function specified")
+ }
+
+ return &ECDSAVerifier{
+ publicKey: pub,
+ hashFunc: hashFunc,
+ }, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (e ECDSAVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return e.publicKey, nil
+}
+
+// VerifySignature verifies the signature for the given message. Unless provided
+// in an option, the digest of the message will be computed using the hash function specified
+// when the ECDSAVerifier was created.
+//
+// This function returns nil if the verification succeeded, and an error message otherwise.
+//
+// This function recognizes the following Options listed in order of preference:
+//
+// - WithDigest()
+//
+// All other options are ignored if specified.
+func (e ECDSAVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error {
+ digest, _, err := ComputeDigestForVerifying(message, e.hashFunc, ecdsaSupportedVerifyHashFuncs, opts...)
+ if err != nil {
+ return err
+ }
+
+ if signature == nil {
+ return errors.New("nil signature passed to VerifySignature")
+ }
+
+ sigBytes, err := io.ReadAll(signature)
+ if err != nil {
+ return fmt.Errorf("reading signature: %w", err)
+ }
+
+ if !ecdsa.VerifyASN1(e.publicKey, digest, sigBytes) {
+ return errors.New("invalid signature when validating ASN.1 encoded signature")
+ }
+
+ return nil
+}
+
+// ECDSASignerVerifier is a signature.SignerVerifier that uses an Elliptic Curve DSA algorithm
+type ECDSASignerVerifier struct {
+ *ECDSASigner
+ *ECDSAVerifier
+}
+
+// LoadECDSASignerVerifier creates a combined signer and verifier. This is a convenience object
+// that simply wraps an instance of ECDSASigner and ECDSAVerifier.
+func LoadECDSASignerVerifier(priv *ecdsa.PrivateKey, hf crypto.Hash) (*ECDSASignerVerifier, error) {
+ signer, err := LoadECDSASigner(priv, hf)
+ if err != nil {
+ return nil, fmt.Errorf("initializing signer: %w", err)
+ }
+ verifier, err := LoadECDSAVerifier(&priv.PublicKey, hf)
+ if err != nil {
+ return nil, fmt.Errorf("initializing verifier: %w", err)
+ }
+
+ return &ECDSASignerVerifier{
+ ECDSASigner: signer,
+ ECDSAVerifier: verifier,
+ }, nil
+}
+
+// NewDefaultECDSASignerVerifier creates a combined signer and verifier using ECDSA.
+//
+// This creates a new ECDSA key using the P-256 curve and uses the SHA256 hashing algorithm.
+func NewDefaultECDSASignerVerifier() (*ECDSASignerVerifier, *ecdsa.PrivateKey, error) {
+ return NewECDSASignerVerifier(elliptic.P256(), rand.Reader, crypto.SHA256)
+}
+
+// NewECDSASignerVerifier creates a combined signer and verifier using ECDSA.
+//
+// This creates a new ECDSA key using the specified elliptic curve, entropy source, and hashing function.
+func NewECDSASignerVerifier(curve elliptic.Curve, rand io.Reader, hashFunc crypto.Hash) (*ECDSASignerVerifier, *ecdsa.PrivateKey, error) {
+ priv, err := ecdsa.GenerateKey(curve, rand)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sv, err := LoadECDSASignerVerifier(priv, hashFunc)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return sv, priv, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (e ECDSASignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return e.publicKey, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go
new file mode 100644
index 00000000000..23a8638ff55
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go
@@ -0,0 +1,197 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ed25519"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+)
+
+var ed25519SupportedHashFuncs = []crypto.Hash{
+ crypto.Hash(0),
+}
+
+// ED25519Signer is a signature.Signer that uses the Ed25519 public-key signature system
+type ED25519Signer struct {
+ priv ed25519.PrivateKey
+}
+
+// LoadED25519Signer calculates signatures using the specified private key.
+func LoadED25519Signer(priv ed25519.PrivateKey) (*ED25519Signer, error) {
+ if priv == nil {
+ return nil, errors.New("invalid ED25519 private key specified")
+ }
+
+ // check this to avoid panic and throw error gracefully
+ if len(priv) != ed25519.PrivateKeySize {
+ return nil, errors.New("invalid size for ED25519 key")
+ }
+
+ return &ED25519Signer{
+ priv: priv,
+ }, nil
+}
+
+// SignMessage signs the provided message. Passing the WithDigest option is not
+// supported as ED25519 performs a two pass hash over the message during the
+// signing process.
+//
+// All options are ignored.
+func (e ED25519Signer) SignMessage(message io.Reader, _ ...SignOption) ([]byte, error) {
+ messageBytes, _, err := ComputeDigestForSigning(message, crypto.Hash(0), ed25519SupportedHashFuncs)
+ if err != nil {
+ return nil, err
+ }
+
+ return ed25519.Sign(e.priv, messageBytes), nil
+}
+
+// Public returns the public key that can be used to verify signatures created by
+// this signer.
+func (e ED25519Signer) Public() crypto.PublicKey {
+ if e.priv == nil {
+ return nil
+ }
+
+ return e.priv.Public()
+}
+
+// PublicKey returns the public key that can be used to verify signatures created by
+// this signer. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (e ED25519Signer) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return e.Public(), nil
+}
+
+// Sign computes the signature for the specified message; the first and third arguments to this
+// function are ignored as they are not used by the ED25519 algorithm.
+func (e ED25519Signer) Sign(_ io.Reader, message []byte, _ crypto.SignerOpts) ([]byte, error) {
+ if message == nil {
+ return nil, errors.New("message must not be nil")
+ }
+ return e.SignMessage(bytes.NewReader(message))
+}
+
+// ED25519Verifier is a signature.Verifier that uses the Ed25519 public-key signature system
+type ED25519Verifier struct {
+ publicKey ed25519.PublicKey
+}
+
+// LoadED25519Verifier returns a Verifier that verifies signatures using the specified ED25519 public key.
+func LoadED25519Verifier(pub ed25519.PublicKey) (*ED25519Verifier, error) {
+ if pub == nil {
+ return nil, errors.New("invalid ED25519 public key specified")
+ }
+
+ return &ED25519Verifier{
+ publicKey: pub,
+ }, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (e *ED25519Verifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return e.publicKey, nil
+}
+
+// VerifySignature verifies the signature for the given message.
+//
+// This function returns nil if the verification succeeded, and an error message otherwise.
+//
+// All options are ignored if specified.
+func (e *ED25519Verifier) VerifySignature(signature, message io.Reader, _ ...VerifyOption) error {
+ messageBytes, _, err := ComputeDigestForVerifying(message, crypto.Hash(0), ed25519SupportedHashFuncs)
+ if err != nil {
+ return err
+ }
+
+ if signature == nil {
+ return errors.New("nil signature passed to VerifySignature")
+ }
+
+ sigBytes, err := io.ReadAll(signature)
+ if err != nil {
+ return fmt.Errorf("reading signature: %w", err)
+ }
+
+ if !ed25519.Verify(e.publicKey, messageBytes, sigBytes) {
+ return errors.New("failed to verify signature")
+ }
+ return nil
+}
+
+// ED25519SignerVerifier is a signature.SignerVerifier that uses the Ed25519 public-key signature system
+type ED25519SignerVerifier struct {
+ *ED25519Signer
+ *ED25519Verifier
+}
+
+// LoadED25519SignerVerifier creates a combined signer and verifier. This is
+// a convenience object that simply wraps an instance of ED25519Signer and ED25519Verifier.
+func LoadED25519SignerVerifier(priv ed25519.PrivateKey) (*ED25519SignerVerifier, error) {
+ signer, err := LoadED25519Signer(priv)
+ if err != nil {
+ return nil, fmt.Errorf("initializing signer: %w", err)
+ }
+ pub, ok := priv.Public().(ed25519.PublicKey)
+ if !ok {
+ return nil, fmt.Errorf("given key is not ed25519.PublicKey")
+ }
+ verifier, err := LoadED25519Verifier(pub)
+ if err != nil {
+ return nil, fmt.Errorf("initializing verifier: %w", err)
+ }
+
+ return &ED25519SignerVerifier{
+ ED25519Signer: signer,
+ ED25519Verifier: verifier,
+ }, nil
+}
+
+// NewDefaultED25519SignerVerifier creates a combined signer and verifier using ED25519.
+// This creates a new ED25519 key using crypto/rand as an entropy source.
+func NewDefaultED25519SignerVerifier() (*ED25519SignerVerifier, ed25519.PrivateKey, error) {
+ return NewED25519SignerVerifier(rand.Reader)
+}
+
+// NewED25519SignerVerifier creates a combined signer and verifier using ED25519.
+// This creates a new ED25519 key using the specified entropy source.
+func NewED25519SignerVerifier(rand io.Reader) (*ED25519SignerVerifier, ed25519.PrivateKey, error) {
+ _, priv, err := ed25519.GenerateKey(rand)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sv, err := LoadED25519SignerVerifier(priv)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return sv, priv, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (e ED25519SignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return e.publicKey, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/message.go b/vendor/github.com/sigstore/sigstore/pkg/signature/message.go
new file mode 100644
index 00000000000..6f8449eea9c
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/message.go
@@ -0,0 +1,111 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "crypto"
+ crand "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+)
+
+func isSupportedAlg(alg crypto.Hash, supportedAlgs []crypto.Hash) bool {
+ if supportedAlgs == nil {
+ return true
+ }
+ for _, supportedAlg := range supportedAlgs {
+ if alg == supportedAlg {
+ return true
+ }
+ }
+ return false
+}
+
+// ComputeDigestForSigning calculates the digest value for the specified message using a hash function selected by the following process:
+//
+// - if a digest value is already specified in a SignOption and the length of the digest matches that of the selected hash function, the
+// digest value will be returned without any further computation
+// - if a hash function is given using WithCryptoSignerOpts(opts) as a SignOption, it will be used (if it is in the supported list)
+// - otherwise defaultHashFunc will be used (if it is in the supported list)
+func ComputeDigestForSigning(rawMessage io.Reader, defaultHashFunc crypto.Hash, supportedHashFuncs []crypto.Hash, opts ...SignOption) (digest []byte, hashedWith crypto.Hash, err error) {
+ var cryptoSignerOpts crypto.SignerOpts = defaultHashFunc
+ for _, opt := range opts {
+ opt.ApplyDigest(&digest)
+ opt.ApplyCryptoSignerOpts(&cryptoSignerOpts)
+ }
+ hashedWith = cryptoSignerOpts.HashFunc()
+ if !isSupportedAlg(hashedWith, supportedHashFuncs) {
+ return nil, crypto.Hash(0), fmt.Errorf("unsupported hash algorithm: %q not in %v", hashedWith.String(), supportedHashFuncs)
+ }
+ if len(digest) > 0 {
+ if hashedWith != crypto.Hash(0) && len(digest) != hashedWith.Size() {
+ err = errors.New("unexpected length of digest for hash function specified")
+ }
+ return
+ }
+ digest, err = hashMessage(rawMessage, hashedWith)
+ return
+}
+
+// ComputeDigestForVerifying calculates the digest value for the specified message using a hash function selected by the following process:
+//
+// - if a digest value is already specified in a SignOption and the length of the digest matches that of the selected hash function, the
+// digest value will be returned without any further computation
+// - if a hash function is given using WithCryptoSignerOpts(opts) as a SignOption, it will be used (if it is in the supported list)
+// - otherwise defaultHashFunc will be used (if it is in the supported list)
+func ComputeDigestForVerifying(rawMessage io.Reader, defaultHashFunc crypto.Hash, supportedHashFuncs []crypto.Hash, opts ...VerifyOption) (digest []byte, hashedWith crypto.Hash, err error) {
+ var cryptoSignerOpts crypto.SignerOpts = defaultHashFunc
+ for _, opt := range opts {
+ opt.ApplyDigest(&digest)
+ opt.ApplyCryptoSignerOpts(&cryptoSignerOpts)
+ }
+ hashedWith = cryptoSignerOpts.HashFunc()
+ if !isSupportedAlg(hashedWith, supportedHashFuncs) {
+ return nil, crypto.Hash(0), fmt.Errorf("unsupported hash algorithm: %q not in %v", hashedWith.String(), supportedHashFuncs)
+ }
+ if len(digest) > 0 {
+ if hashedWith != crypto.Hash(0) && len(digest) != hashedWith.Size() {
+ err = errors.New("unexpected length of digest for hash function specified")
+ }
+ return
+ }
+ digest, err = hashMessage(rawMessage, hashedWith)
+ return
+}
+
+func hashMessage(rawMessage io.Reader, hashFunc crypto.Hash) ([]byte, error) {
+ if rawMessage == nil {
+ return nil, errors.New("message cannot be nil")
+ }
+ if hashFunc == crypto.Hash(0) {
+ return io.ReadAll(rawMessage)
+ }
+ hasher := hashFunc.New()
+ // avoids reading entire message into memory
+ if _, err := io.Copy(hasher, rawMessage); err != nil {
+ return nil, fmt.Errorf("hashing message: %w", err)
+ }
+ return hasher.Sum(nil), nil
+}
+
+func selectRandFromOpts(opts ...SignOption) io.Reader {
+ rand := crand.Reader
+ for _, opt := range opts {
+ opt.ApplyRand(&rand)
+ }
+ return rand
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options.go
new file mode 100644
index 00000000000..0be699f7e1d
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options.go
@@ -0,0 +1,57 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "context"
+ "crypto"
+ "io"
+
+ "github.com/sigstore/sigstore/pkg/signature/options"
+)
+
+// RPCOption specifies options to be used when performing RPC
+type RPCOption interface {
+ ApplyContext(*context.Context)
+ ApplyRemoteVerification(*bool)
+ ApplyRPCAuthOpts(opts *options.RPCAuth)
+ ApplyKeyVersion(keyVersion *string)
+}
+
+// PublicKeyOption specifies options to be used when obtaining a public key
+type PublicKeyOption interface {
+ RPCOption
+}
+
+// MessageOption specifies options to be used when processing messages during signing or verification
+type MessageOption interface {
+ ApplyDigest(*[]byte)
+ ApplyCryptoSignerOpts(*crypto.SignerOpts)
+}
+
+// SignOption specifies options to be used when signing a message
+type SignOption interface {
+ RPCOption
+ MessageOption
+ ApplyRand(*io.Reader)
+ ApplyKeyVersionUsed(**string)
+}
+
+// VerifyOption specifies options to be used when verifying a signature
+type VerifyOption interface {
+ RPCOption
+ MessageOption
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go
new file mode 100644
index 00000000000..2282a863ef0
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go
@@ -0,0 +1,37 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package options defines options for KMS clients
+package options
+
+import (
+ "context"
+)
+
+// RequestContext implements the functional option pattern for including a context during RPC
+type RequestContext struct {
+ NoOpOptionImpl
+ ctx context.Context
+}
+
+// ApplyContext sets the specified context as the functional option
+func (r RequestContext) ApplyContext(ctx *context.Context) {
+ *ctx = r.ctx
+}
+
+// WithContext specifies that the given context should be used in RPC to external services
+func WithContext(ctx context.Context) RequestContext {
+ return RequestContext{ctx: ctx}
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go
new file mode 100644
index 00000000000..21875dc8c0d
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go
@@ -0,0 +1,35 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+// RequestDigest implements the functional option pattern for specifying a digest value
+type RequestDigest struct {
+ NoOpOptionImpl
+ digest []byte
+}
+
+// ApplyDigest sets the specified digest value as the functional option
+func (r RequestDigest) ApplyDigest(digest *[]byte) {
+ *digest = r.digest
+}
+
+// WithDigest specifies that the given digest can be used by underlying signature implementations
+// WARNING: When verifying a digest with ECDSA, it is trivial to craft a valid signature
+// over a random message given a public key. Do not use this unles you understand the
+// implications and do not need to protect against malleability.
+func WithDigest(digest []byte) RequestDigest {
+ return RequestDigest{digest: digest}
+}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/doc.go
similarity index 80%
rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
rename to vendor/github.com/sigstore/sigstore/pkg/signature/options/doc.go
index c318385cbed..ebdeda22b86 100644
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/doc.go
@@ -1,4 +1,5 @@
-// Copyright 2013 Matt T. Proud
+//
+// Copyright 2022 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,5 +13,5 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package pbutil provides record length-delimited Protocol Buffer streaming.
-package pbutil
+// Package options contains functional options for the various SignerVerifiers
+package options
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go
new file mode 100644
index 00000000000..751418f9d58
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go
@@ -0,0 +1,50 @@
+//
+// Copyright 2022 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+// RequestKeyVersion implements the functional option pattern for specifying the KMS key version during signing or verification
+type RequestKeyVersion struct {
+ NoOpOptionImpl
+ keyVersion string
+}
+
+// ApplyKeyVersion sets the KMS's key version as a functional option
+func (r RequestKeyVersion) ApplyKeyVersion(keyVersion *string) {
+ *keyVersion = r.keyVersion
+}
+
+// WithKeyVersion specifies that a specific KMS key version be used during signing and verification operations;
+// a value of 0 will use the latest version of the key (default)
+func WithKeyVersion(keyVersion string) RequestKeyVersion {
+ return RequestKeyVersion{keyVersion: keyVersion}
+}
+
+// RequestKeyVersionUsed implements the functional option pattern for obtaining the KMS key version used during signing
+type RequestKeyVersionUsed struct {
+ NoOpOptionImpl
+ keyVersionUsed *string
+}
+
+// ApplyKeyVersionUsed requests to store the KMS's key version that was used as a functional option
+func (r RequestKeyVersionUsed) ApplyKeyVersionUsed(keyVersionUsed **string) {
+ *keyVersionUsed = r.keyVersionUsed
+}
+
+// ReturnKeyVersionUsed specifies that the specific KMS key version that was used during signing should be stored
+// in the pointer provided
+func ReturnKeyVersionUsed(keyVersionUsed *string) RequestKeyVersionUsed {
+ return RequestKeyVersionUsed{keyVersionUsed: keyVersionUsed}
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go
new file mode 100644
index 00000000000..04e4f3b5fb4
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go
@@ -0,0 +1,49 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+import (
+ "context"
+ "crypto"
+ "io"
+)
+
+// NoOpOptionImpl implements the RPCOption, SignOption, VerifyOption interfaces as no-ops.
+type NoOpOptionImpl struct{}
+
+// ApplyContext is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyContext(ctx *context.Context) {}
+
+// ApplyCryptoSignerOpts is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyCryptoSignerOpts(opts *crypto.SignerOpts) {}
+
+// ApplyDigest is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyDigest(digest *[]byte) {}
+
+// ApplyRand is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyRand(rand *io.Reader) {}
+
+// ApplyRemoteVerification is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyRemoteVerification(remoteVerification *bool) {}
+
+// ApplyRPCAuthOpts is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyRPCAuthOpts(opts *RPCAuth) {}
+
+// ApplyKeyVersion is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyKeyVersion(keyVersion *string) {}
+
+// ApplyKeyVersionUsed is a no-op required to fully implement the requisite interfaces
+func (NoOpOptionImpl) ApplyKeyVersionUsed(keyVersion **string) {}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go
new file mode 100644
index 00000000000..fd3a17f9e02
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go
@@ -0,0 +1,41 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+import (
+ crand "crypto/rand"
+ "io"
+)
+
+// RequestRand implements the functional option pattern for using a specific source of entropy
+type RequestRand struct {
+ NoOpOptionImpl
+ rand io.Reader
+}
+
+// ApplyRand sets the specified source of entropy as the functional option
+func (r RequestRand) ApplyRand(rand *io.Reader) {
+ *rand = r.rand
+}
+
+// WithRand specifies that the given source of entropy should be used in signing operations
+func WithRand(rand io.Reader) RequestRand {
+ r := rand
+ if r == nil {
+ r = crand.Reader
+ }
+ return RequestRand{rand: r}
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go
new file mode 100644
index 00000000000..26144adbbf6
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go
@@ -0,0 +1,32 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+// RequestRemoteVerification implements the functional option pattern for remotely verifiying signatures when possible
+type RequestRemoteVerification struct {
+ NoOpOptionImpl
+ remoteVerification bool
+}
+
+// ApplyRemoteVerification sets remote verification as a functional option
+func (r RequestRemoteVerification) ApplyRemoteVerification(remoteVerification *bool) {
+ *remoteVerification = r.remoteVerification
+}
+
+// WithRemoteVerification specifies that the verification operation should be performed remotely (vs in the process of the caller)
+func WithRemoteVerification(remoteVerification bool) RequestRemoteVerification {
+ return RequestRemoteVerification{remoteVerification: remoteVerification}
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go
new file mode 100644
index 00000000000..188de92dc96
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go
@@ -0,0 +1,58 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+// RPCAuthOpts includes authentication settings for RPC calls
+type RPCAuthOpts struct {
+ NoOpOptionImpl
+ opts RPCAuth
+}
+
+// RPCAuth provides credentials for RPC calls, empty fields are ignored
+type RPCAuth struct {
+ Address string // address is the remote server address, e.g. https://vault:8200
+ Path string // path for the RPC, in vault this is the transit path which default to "transit"
+ Token string // token used for RPC, in vault this is the VAULT_TOKEN value
+ OIDC RPCAuthOIDC
+}
+
+// RPCAuthOIDC is used to perform the RPC login using OIDC instead of a fixed token
+type RPCAuthOIDC struct {
+ Path string // path defaults to "jwt" for vault
+ Role string // role is required for jwt logins
+ Token string // token is a jwt with vault
+}
+
+// ApplyRPCAuthOpts sets the RPCAuth as a function option
+func (r RPCAuthOpts) ApplyRPCAuthOpts(opts *RPCAuth) {
+ if r.opts.Address != "" {
+ opts.Address = r.opts.Address
+ }
+ if r.opts.Path != "" {
+ opts.Path = r.opts.Path
+ }
+ if r.opts.Token != "" {
+ opts.Token = r.opts.Token
+ }
+ if r.opts.OIDC.Token != "" {
+ opts.OIDC = r.opts.OIDC
+ }
+}
+
+// WithRPCAuthOpts specifies RPCAuth settings to be used with RPC logins
+func WithRPCAuthOpts(opts RPCAuth) RPCAuthOpts {
+ return RPCAuthOpts{opts: opts}
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go
new file mode 100644
index 00000000000..1a3ac739487
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go
@@ -0,0 +1,40 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package options
+
+import (
+ "crypto"
+)
+
+// RequestCryptoSignerOpts implements the functional option pattern for supplying crypto.SignerOpts when signing or verifying
+type RequestCryptoSignerOpts struct {
+ NoOpOptionImpl
+ opts crypto.SignerOpts
+}
+
+// ApplyCryptoSignerOpts sets crypto.SignerOpts as a functional option
+func (r RequestCryptoSignerOpts) ApplyCryptoSignerOpts(opts *crypto.SignerOpts) {
+ *opts = r.opts
+}
+
+// WithCryptoSignerOpts specifies that provided crypto.SignerOpts be used during signing and verification operations
+func WithCryptoSignerOpts(opts crypto.SignerOpts) RequestCryptoSignerOpts {
+ var optsToUse crypto.SignerOpts = crypto.SHA256
+ if opts != nil {
+ optsToUse = opts
+ }
+ return RequestCryptoSignerOpts{opts: optsToUse}
+}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/doc.go
similarity index 71%
rename from vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
rename to vendor/github.com/sigstore/sigstore/pkg/signature/payload/doc.go
index 91e272573a5..3664185a0f8 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/doc.go
@@ -1,9 +1,11 @@
-// Copyright 2020 The Prometheus Authors
+//
+// Copyright 2022 The Sigstore Authors.
+//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -11,9 +13,5 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build linux
-// +build mips mipsle mips64 mips64le
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoMips
+// Package payload contains types and utilities related to the Cosign signature format.
+package payload
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go
new file mode 100644
index 00000000000..8b61aa15f63
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go
@@ -0,0 +1,105 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package payload defines a container image
+package payload
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/google/go-containerregistry/pkg/name"
+)
+
+// CosignSignatureType is the value of `critical.type` in a SimpleContainerImage payload.
+const CosignSignatureType = "cosign container image signature"
+
+// SimpleContainerImage describes the structure of a basic container image signature payload, as defined at:
+// https://github.com/containers/image/blob/master/docs/containers-signature.5.md#json-data-format
+type SimpleContainerImage struct {
+ Critical Critical `json:"critical"` // Critical data critical to correctly evaluating the validity of the signature
+ Optional map[string]interface{} `json:"optional"` // Optional optional metadata about the image
+}
+
+// Critical data critical to correctly evaluating the validity of a signature
+type Critical struct {
+ Identity Identity `json:"identity"` // Identity claimed identity of the image
+ Image Image `json:"image"` // Image identifies the container that the signature applies to
+ Type string `json:"type"` // Type must be 'atomic container signature'
+}
+
+// Identity is the claimed identity of the image
+type Identity struct {
+ DockerReference string `json:"docker-reference"` // DockerReference is a reference used to refer to or download the image
+}
+
+// Image identifies the container image that the signature applies to
+type Image struct {
+ DockerManifestDigest string `json:"docker-manifest-digest"` // DockerManifestDigest the manifest digest of the signed container image
+}
+
+// Cosign describes a container image signed using Cosign
+type Cosign struct {
+ Image name.Digest
+ Annotations map[string]interface{}
+}
+
+// SimpleContainerImage returns information about a container image in the github.com/containers/image/signature format
+func (p Cosign) SimpleContainerImage() SimpleContainerImage {
+ return SimpleContainerImage{
+ Critical: Critical{
+ Identity: Identity{
+ DockerReference: p.Image.Repository.Name(),
+ },
+ Image: Image{
+ DockerManifestDigest: p.Image.DigestStr(),
+ },
+ Type: CosignSignatureType,
+ },
+ Optional: p.Annotations,
+ }
+}
+
+// MarshalJSON marshals the container signature into a []byte of JSON data
+func (p Cosign) MarshalJSON() ([]byte, error) {
+ return json.Marshal(p.SimpleContainerImage())
+}
+
+var _ json.Marshaler = Cosign{}
+
+// UnmarshalJSON unmarshals []byte of JSON data into a container signature object
+func (p *Cosign) UnmarshalJSON(data []byte) error {
+ if string(data) == "null" {
+ // JSON "null" is a no-op by convention
+ return nil
+ }
+ var simple SimpleContainerImage
+ if err := json.Unmarshal(data, &simple); err != nil {
+ return err
+ }
+ if simple.Critical.Type != CosignSignatureType {
+ return fmt.Errorf("Cosign signature payload was of an unknown type: %q", simple.Critical.Type)
+ }
+ digestStr := simple.Critical.Identity.DockerReference + "@" + simple.Critical.Image.DockerManifestDigest
+ digest, err := name.NewDigest(digestStr)
+ if err != nil {
+ return fmt.Errorf("could not parse image digest string %q: %w", digestStr, err)
+ }
+ p.Image = digest
+ p.Annotations = simple.Optional
+ return nil
+}
+
+var _ json.Unmarshaler = (*Cosign)(nil)
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go
similarity index 62%
rename from vendor/github.com/prometheus/procfs/cpuinfo_others.go
rename to vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go
index 95b5b4ec44a..6f6a47a9f96 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go
@@ -1,9 +1,11 @@
-// Copyright 2020 The Prometheus Authors
+//
+// Copyright 2021 The Sigstore Authors.
+//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -11,9 +13,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build linux
-// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
+package signature
-package procfs
+import (
+ "crypto"
+)
-var parseCPUInfo = parseCPUInfoDummy
+// PublicKeyProvider returns a PublicKey associated with a digital signature
+type PublicKeyProvider interface {
+ PublicKey(opts ...PublicKeyOption) (crypto.PublicKey, error)
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go
new file mode 100644
index 00000000000..1cac68a539f
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go
@@ -0,0 +1,225 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/sigstore/sigstore/pkg/signature/options"
+)
+
+// RSAPKCS1v15Signer is a signature.Signer that uses the RSA PKCS1v15 algorithm
+type RSAPKCS1v15Signer struct {
+ hashFunc crypto.Hash
+ priv *rsa.PrivateKey
+}
+
+// LoadRSAPKCS1v15Signer calculates signatures using the specified private key and hash algorithm.
+//
+// hf must be either SHA256, SHA388, or SHA512.
+func LoadRSAPKCS1v15Signer(priv *rsa.PrivateKey, hf crypto.Hash) (*RSAPKCS1v15Signer, error) {
+ if priv == nil {
+ return nil, errors.New("invalid RSA private key specified")
+ }
+
+ if !isSupportedAlg(hf, rsaSupportedHashFuncs) {
+ return nil, errors.New("invalid hash function specified")
+ }
+
+ return &RSAPKCS1v15Signer{
+ priv: priv,
+ hashFunc: hf,
+ }, nil
+}
+
+// SignMessage signs the provided message using PKCS1v15. If the message is provided,
+// this method will compute the digest according to the hash function specified
+// when the RSAPKCS1v15Signer was created.
+//
+// SignMessage recognizes the following Options listed in order of preference:
+//
+// - WithRand()
+//
+// - WithDigest()
+//
+// - WithCryptoSignerOpts()
+//
+// All other options are ignored if specified.
+func (r RSAPKCS1v15Signer) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) {
+ digest, hf, err := ComputeDigestForSigning(message, r.hashFunc, rsaSupportedHashFuncs, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ rand := selectRandFromOpts(opts...)
+
+ return rsa.SignPKCS1v15(rand, r.priv, hf, digest)
+}
+
+// Public returns the public key that can be used to verify signatures created by
+// this signer.
+func (r RSAPKCS1v15Signer) Public() crypto.PublicKey {
+ if r.priv == nil {
+ return nil
+ }
+
+ return r.priv.Public()
+}
+
+// PublicKey returns the public key that can be used to verify signatures created by
+// this signer. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (r RSAPKCS1v15Signer) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return r.Public(), nil
+}
+
+// Sign computes the signature for the specified digest using PKCS1v15.
+//
+// If a source of entropy is given in rand, it will be used instead of the default value (rand.Reader
+// from crypto/rand).
+//
+// If opts are specified, they should specify the hash function used to compute digest. If opts are
+// not specified, this function assumes the hash function provided when the signer was created was
+// used to create the value specified in digest.
+func (r RSAPKCS1v15Signer) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
+ rsaOpts := []SignOption{options.WithDigest(digest), options.WithRand(rand)}
+ if opts != nil {
+ rsaOpts = append(rsaOpts, options.WithCryptoSignerOpts(opts))
+ }
+
+ return r.SignMessage(nil, rsaOpts...)
+}
+
+// RSAPKCS1v15Verifier is a signature.Verifier that uses the RSA PKCS1v15 algorithm
+type RSAPKCS1v15Verifier struct {
+ publicKey *rsa.PublicKey
+ hashFunc crypto.Hash
+}
+
+// LoadRSAPKCS1v15Verifier returns a Verifier that verifies signatures using the specified
+// RSA public key and hash algorithm.
+//
+// hf must be either SHA256, SHA388, or SHA512.
+func LoadRSAPKCS1v15Verifier(pub *rsa.PublicKey, hashFunc crypto.Hash) (*RSAPKCS1v15Verifier, error) {
+ if pub == nil {
+ return nil, errors.New("invalid RSA public key specified")
+ }
+
+ if !isSupportedAlg(hashFunc, rsaSupportedHashFuncs) {
+ return nil, errors.New("invalid hash function specified")
+ }
+
+ return &RSAPKCS1v15Verifier{
+ publicKey: pub,
+ hashFunc: hashFunc,
+ }, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (r RSAPKCS1v15Verifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return r.publicKey, nil
+}
+
+// VerifySignature verifies the signature for the given message using PKCS1v15. Unless provided
+// in an option, the digest of the message will be computed using the hash function specified
+// when the RSAPKCS1v15Verifier was created.
+//
+// This function returns nil if the verification succeeded, and an error message otherwise.
+//
+// This function recognizes the following Options listed in order of preference:
+//
+// - WithDigest()
+//
+// - WithCryptoSignerOpts()
+//
+// All other options are ignored if specified.
+func (r RSAPKCS1v15Verifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error {
+ digest, hf, err := ComputeDigestForVerifying(message, r.hashFunc, rsaSupportedVerifyHashFuncs, opts...)
+ if err != nil {
+ return err
+ }
+
+ if signature == nil {
+ return errors.New("nil signature passed to VerifySignature")
+ }
+
+ sigBytes, err := io.ReadAll(signature)
+ if err != nil {
+ return fmt.Errorf("reading signature: %w", err)
+ }
+
+ return rsa.VerifyPKCS1v15(r.publicKey, hf, digest, sigBytes)
+}
+
+// RSAPKCS1v15SignerVerifier is a signature.SignerVerifier that uses the RSA PKCS1v15 algorithm
+type RSAPKCS1v15SignerVerifier struct {
+ *RSAPKCS1v15Signer
+ *RSAPKCS1v15Verifier
+}
+
+// LoadRSAPKCS1v15SignerVerifier creates a combined signer and verifier. This is a convenience object
+// that simply wraps an instance of RSAPKCS1v15Signer and RSAPKCS1v15Verifier.
+func LoadRSAPKCS1v15SignerVerifier(priv *rsa.PrivateKey, hf crypto.Hash) (*RSAPKCS1v15SignerVerifier, error) {
+ signer, err := LoadRSAPKCS1v15Signer(priv, hf)
+ if err != nil {
+ return nil, fmt.Errorf("initializing signer: %w", err)
+ }
+ verifier, err := LoadRSAPKCS1v15Verifier(&priv.PublicKey, hf)
+ if err != nil {
+ return nil, fmt.Errorf("initializing verifier: %w", err)
+ }
+
+ return &RSAPKCS1v15SignerVerifier{
+ RSAPKCS1v15Signer: signer,
+ RSAPKCS1v15Verifier: verifier,
+ }, nil
+}
+
+// NewDefaultRSAPKCS1v15SignerVerifier creates a combined signer and verifier using RSA PKCS1v15.
+// This creates a new RSA key of 2048 bits and uses the SHA256 hashing algorithm.
+func NewDefaultRSAPKCS1v15SignerVerifier() (*RSAPKCS1v15SignerVerifier, *rsa.PrivateKey, error) {
+ return NewRSAPKCS1v15SignerVerifier(rand.Reader, 2048, crypto.SHA256)
+}
+
+// NewRSAPKCS1v15SignerVerifier creates a combined signer and verifier using RSA PKCS1v15.
+// This creates a new RSA key of the specified length of bits, entropy source, and hash function.
+func NewRSAPKCS1v15SignerVerifier(rand io.Reader, bits int, hashFunc crypto.Hash) (*RSAPKCS1v15SignerVerifier, *rsa.PrivateKey, error) {
+ priv, err := rsa.GenerateKey(rand, bits)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sv, err := LoadRSAPKCS1v15SignerVerifier(priv, hashFunc)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return sv, priv, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (r RSAPKCS1v15SignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return r.publicKey, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go
new file mode 100644
index 00000000000..6e52bed9ba6
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go
@@ -0,0 +1,260 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/sigstore/sigstore/pkg/signature/options"
+)
+
+// checked on LoadSigner, LoadVerifier, and SignMessage
+var rsaSupportedHashFuncs = []crypto.Hash{
+ crypto.SHA256,
+ crypto.SHA384,
+ crypto.SHA512,
+}
+
+// checked on VerifySignature. Supports SHA1 verification.
+var rsaSupportedVerifyHashFuncs = []crypto.Hash{
+ crypto.SHA1,
+ crypto.SHA256,
+ crypto.SHA384,
+ crypto.SHA512,
+}
+
+// RSAPSSSigner is a signature.Signer that uses the RSA PSS algorithm
+type RSAPSSSigner struct {
+ hashFunc crypto.Hash
+ priv *rsa.PrivateKey
+ pssOpts *rsa.PSSOptions
+}
+
+// LoadRSAPSSSigner calculates signatures using the specified private key and hash algorithm.
+//
+// If opts are specified, then they will be stored and used as a default if not overridden
+// by the value passed to Sign().
+//
+// hf must be either SHA256, SHA388, or SHA512. opts.Hash is ignored.
+func LoadRSAPSSSigner(priv *rsa.PrivateKey, hf crypto.Hash, opts *rsa.PSSOptions) (*RSAPSSSigner, error) {
+ if priv == nil {
+ return nil, errors.New("invalid RSA private key specified")
+ }
+
+ if !isSupportedAlg(hf, rsaSupportedHashFuncs) {
+ return nil, errors.New("invalid hash function specified")
+ }
+
+ return &RSAPSSSigner{
+ priv: priv,
+ pssOpts: opts,
+ hashFunc: hf,
+ }, nil
+}
+
+// SignMessage signs the provided message using PSS. If the message is provided,
+// this method will compute the digest according to the hash function specified
+// when the RSAPSSSigner was created.
+//
+// This function recognizes the following Options listed in order of preference:
+//
+// - WithRand()
+//
+// - WithDigest()
+//
+// - WithCryptoSignerOpts()
+//
+// All other options are ignored if specified.
+func (r RSAPSSSigner) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) {
+ digest, hf, err := ComputeDigestForSigning(message, r.hashFunc, rsaSupportedHashFuncs, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ rand := selectRandFromOpts(opts...)
+ pssOpts := r.pssOpts
+ if pssOpts == nil {
+ pssOpts = &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ }
+ }
+ pssOpts.Hash = hf
+
+ return rsa.SignPSS(rand, r.priv, hf, digest, pssOpts)
+}
+
+// Public returns the public key that can be used to verify signatures created by
+// this signer.
+func (r RSAPSSSigner) Public() crypto.PublicKey {
+ if r.priv == nil {
+ return nil
+ }
+
+ return r.priv.Public()
+}
+
+// PublicKey returns the public key that can be used to verify signatures created by
+// this signer. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (r RSAPSSSigner) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return r.Public(), nil
+}
+
+// Sign computes the signature for the specified digest using PSS.
+//
+// If a source of entropy is given in rand, it will be used instead of the default value (rand.Reader
+// from crypto/rand).
+//
+// If opts are specified, they must be *rsa.PSSOptions. If opts are not specified, the hash function
+// provided when the signer was created will be assumed.
+func (r RSAPSSSigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
+ rsaOpts := []SignOption{options.WithDigest(digest), options.WithRand(rand)}
+ if opts != nil {
+ rsaOpts = append(rsaOpts, options.WithCryptoSignerOpts(opts))
+ }
+
+ return r.SignMessage(nil, rsaOpts...)
+}
+
+// RSAPSSVerifier is a signature.Verifier that uses the RSA PSS algorithm
+type RSAPSSVerifier struct {
+ publicKey *rsa.PublicKey
+ hashFunc crypto.Hash
+ pssOpts *rsa.PSSOptions
+}
+
+// LoadRSAPSSVerifier verifies signatures using the specified public key and hash algorithm.
+//
+// hf must be either SHA256, SHA388, or SHA512. opts.Hash is ignored.
+func LoadRSAPSSVerifier(pub *rsa.PublicKey, hashFunc crypto.Hash, opts *rsa.PSSOptions) (*RSAPSSVerifier, error) {
+ if pub == nil {
+ return nil, errors.New("invalid RSA public key specified")
+ }
+
+ if !isSupportedAlg(hashFunc, rsaSupportedHashFuncs) {
+ return nil, errors.New("invalid hash function specified")
+ }
+
+ return &RSAPSSVerifier{
+ publicKey: pub,
+ hashFunc: hashFunc,
+ pssOpts: opts,
+ }, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (r RSAPSSVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return r.publicKey, nil
+}
+
+// VerifySignature verifies the signature for the given message using PSS. Unless provided
+// in an option, the digest of the message will be computed using the hash function specified
+// when the RSAPSSVerifier was created.
+//
+// This function returns nil if the verification succeeded, and an error message otherwise.
+//
+// This function recognizes the following Options listed in order of preference:
+//
+// - WithDigest()
+//
+// - WithCryptoSignerOpts()
+//
+// All other options are ignored if specified.
+func (r RSAPSSVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error {
+ digest, hf, err := ComputeDigestForVerifying(message, r.hashFunc, rsaSupportedVerifyHashFuncs, opts...)
+ if err != nil {
+ return err
+ }
+
+ if signature == nil {
+ return errors.New("nil signature passed to VerifySignature")
+ }
+
+ sigBytes, err := io.ReadAll(signature)
+ if err != nil {
+ return fmt.Errorf("reading signature: %w", err)
+ }
+
+ // rsa.VerifyPSS ignores pssOpts.Hash, so we don't set it
+ pssOpts := r.pssOpts
+ if pssOpts == nil {
+ pssOpts = &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ }
+ }
+
+ return rsa.VerifyPSS(r.publicKey, hf, digest, sigBytes, pssOpts)
+}
+
+// RSAPSSSignerVerifier is a signature.SignerVerifier that uses the RSA PSS algorithm
+type RSAPSSSignerVerifier struct {
+ *RSAPSSSigner
+ *RSAPSSVerifier
+}
+
+// LoadRSAPSSSignerVerifier creates a combined signer and verifier using RSA PSS. This is
+// a convenience object that simply wraps an instance of RSAPSSSigner and RSAPSSVerifier.
+func LoadRSAPSSSignerVerifier(priv *rsa.PrivateKey, hf crypto.Hash, opts *rsa.PSSOptions) (*RSAPSSSignerVerifier, error) {
+ signer, err := LoadRSAPSSSigner(priv, hf, opts)
+ if err != nil {
+ return nil, fmt.Errorf("initializing signer: %w", err)
+ }
+ verifier, err := LoadRSAPSSVerifier(&priv.PublicKey, hf, opts)
+ if err != nil {
+ return nil, fmt.Errorf("initializing verifier: %w", err)
+ }
+
+ return &RSAPSSSignerVerifier{
+ RSAPSSSigner: signer,
+ RSAPSSVerifier: verifier,
+ }, nil
+}
+
+// NewDefaultRSAPSSSignerVerifier creates a combined signer and verifier using RSA PSS.
+// This creates a new RSA key of 2048 bits and uses the SHA256 hashing algorithm.
+func NewDefaultRSAPSSSignerVerifier() (*RSAPSSSignerVerifier, *rsa.PrivateKey, error) {
+ return NewRSAPSSSignerVerifier(rand.Reader, 2048, crypto.SHA256)
+}
+
+// NewRSAPSSSignerVerifier creates a combined signer and verifier using RSA PSS.
+// This creates a new RSA key of the specified length of bits, entropy source, and hash function.
+func NewRSAPSSSignerVerifier(rand io.Reader, bits int, hashFunc crypto.Hash) (*RSAPSSSignerVerifier, *rsa.PrivateKey, error) {
+ priv, err := rsa.GenerateKey(rand, bits)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sv, err := LoadRSAPSSSignerVerifier(priv, hashFunc, &rsa.PSSOptions{Hash: hashFunc})
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return sv, priv, nil
+}
+
+// PublicKey returns the public key that is used to verify signatures by
+// this verifier. As this value is held in memory, all options provided in arguments
+// to this method are ignored.
+func (r RSAPSSSignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
+ return r.publicKey, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go
new file mode 100644
index 00000000000..3bd3823cb23
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go
@@ -0,0 +1,89 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "errors"
+ "io"
+ "os"
+ "path/filepath"
+
+ // these ensure we have the implementations loaded
+ _ "crypto/sha256"
+ _ "crypto/sha512"
+
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+
+ // these ensure we have the implementations loaded
+ _ "golang.org/x/crypto/sha3"
+)
+
+// Signer creates digital signatures over a message using a specified key pair
+type Signer interface {
+ PublicKeyProvider
+ SignMessage(message io.Reader, opts ...SignOption) ([]byte, error)
+}
+
+// SignerOpts implements crypto.SignerOpts but also allows callers to specify
+// additional options that may be utilized in signing the digest provided.
+type SignerOpts struct {
+ Hash crypto.Hash
+ Opts []SignOption
+}
+
+// HashFunc returns the hash function for this object
+func (s SignerOpts) HashFunc() crypto.Hash {
+ return s.Hash
+}
+
+// LoadSigner returns a signature.Signer based on the algorithm of the private key
+// provided.
+//
+// If privateKey is an RSA key, a RSAPKCS1v15Signer will be returned. If a
+// RSAPSSSigner is desired instead, use the LoadRSAPSSSigner() method directly.
+func LoadSigner(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (Signer, error) {
+ switch pk := privateKey.(type) {
+ case *rsa.PrivateKey:
+ return LoadRSAPKCS1v15Signer(pk, hashFunc)
+ case *ecdsa.PrivateKey:
+ return LoadECDSASigner(pk, hashFunc)
+ case ed25519.PrivateKey:
+ return LoadED25519Signer(pk)
+ }
+ return nil, errors.New("unsupported public key type")
+}
+
+// LoadSignerFromPEMFile returns a signature.Signer based on the algorithm of the private key
+// in the file. The Signer will use the hash function specified when computing digests.
+//
+// If key is an RSA key, a RSAPKCS1v15Signer will be returned. If a
+// RSAPSSSigner is desired instead, use the LoadRSAPSSSigner() and
+// cryptoutils.UnmarshalPEMToPrivateKey() methods directly.
+func LoadSignerFromPEMFile(path string, hashFunc crypto.Hash, pf cryptoutils.PassFunc) (Signer, error) {
+ fileBytes, err := os.ReadFile(filepath.Clean(path))
+ if err != nil {
+ return nil, err
+ }
+ priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf)
+ if err != nil {
+ return nil, err
+ }
+ return LoadSigner(priv, hashFunc)
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go
new file mode 100644
index 00000000000..90667f2a84f
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go
@@ -0,0 +1,69 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "errors"
+ "os"
+ "path/filepath"
+
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+)
+
+// SignerVerifier creates and verifies digital signatures over a message using a specified key pair
+type SignerVerifier interface {
+ Signer
+ Verifier
+}
+
+// LoadSignerVerifier returns a signature.SignerVerifier based on the algorithm of the private key
+// provided.
+//
+// If privateKey is an RSA key, a RSAPKCS1v15SignerVerifier will be returned. If a
+// RSAPSSSignerVerifier is desired instead, use the LoadRSAPSSSignerVerifier() method directly.
+func LoadSignerVerifier(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (SignerVerifier, error) {
+ switch pk := privateKey.(type) {
+ case *rsa.PrivateKey:
+ return LoadRSAPKCS1v15SignerVerifier(pk, hashFunc)
+ case *ecdsa.PrivateKey:
+ return LoadECDSASignerVerifier(pk, hashFunc)
+ case ed25519.PrivateKey:
+ return LoadED25519SignerVerifier(pk)
+ }
+ return nil, errors.New("unsupported public key type")
+}
+
+// LoadSignerVerifierFromPEMFile returns a signature.SignerVerifier based on the algorithm of the private key
+// in the file. The SignerVerifier will use the hash function specified when computing digests.
+//
+// If publicKey is an RSA key, a RSAPKCS1v15SignerVerifier will be returned. If a
+// RSAPSSSignerVerifier is desired instead, use the LoadRSAPSSSignerVerifier() and
+// cryptoutils.UnmarshalPEMToPrivateKey() methods directly.
+func LoadSignerVerifierFromPEMFile(path string, hashFunc crypto.Hash, pf cryptoutils.PassFunc) (SignerVerifier, error) {
+ fileBytes, err := os.ReadFile(filepath.Clean(path))
+ if err != nil {
+ return nil, err
+ }
+ priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf)
+ if err != nil {
+ return nil, err
+ }
+ return LoadSignerVerifier(priv, hashFunc)
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/util.go b/vendor/github.com/sigstore/sigstore/pkg/signature/util.go
new file mode 100644
index 00000000000..8852ecc4b59
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/util.go
@@ -0,0 +1,55 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+
+ "github.com/google/go-containerregistry/pkg/name"
+
+ sigpayload "github.com/sigstore/sigstore/pkg/signature/payload"
+)
+
+// SignImage signs a container manifest using the specified signer object
+func SignImage(signer SignerVerifier, image name.Digest, optionalAnnotations map[string]interface{}) (payload, signature []byte, err error) {
+ imgPayload := sigpayload.Cosign{
+ Image: image,
+ Annotations: optionalAnnotations,
+ }
+ payload, err = json.Marshal(imgPayload)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to marshal payload to JSON: %w", err)
+ }
+ signature, err = signer.SignMessage(bytes.NewReader(payload))
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to sign payload: %w", err)
+ }
+ return payload, signature, nil
+}
+
+// VerifyImageSignature verifies a signature over a container manifest
+func VerifyImageSignature(signer SignerVerifier, payload, signature []byte) (image name.Digest, annotations map[string]interface{}, err error) {
+ if err := signer.VerifySignature(bytes.NewReader(signature), bytes.NewReader(payload)); err != nil {
+ return name.Digest{}, nil, fmt.Errorf("signature verification failed: %w", err)
+ }
+ var imgPayload sigpayload.Cosign
+ if err := json.Unmarshal(payload, &imgPayload); err != nil {
+ return name.Digest{}, nil, fmt.Errorf("could not deserialize image payload: %w", err)
+ }
+ return imgPayload.Image, imgPayload.Annotations, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go
new file mode 100644
index 00000000000..9ca60492951
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go
@@ -0,0 +1,100 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package signature
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "errors"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+)
+
+// Verifier verifies the digital signature using a specified public key
+type Verifier interface {
+ PublicKeyProvider
+ VerifySignature(signature, message io.Reader, opts ...VerifyOption) error
+}
+
+// LoadVerifier returns a signature.Verifier based on the algorithm of the public key
+// provided that will use the hash function specified when computing digests.
+//
+// If publicKey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a
+// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() method directly.
+func LoadVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (Verifier, error) {
+ switch pk := publicKey.(type) {
+ case *rsa.PublicKey:
+ return LoadRSAPKCS1v15Verifier(pk, hashFunc)
+ case *ecdsa.PublicKey:
+ return LoadECDSAVerifier(pk, hashFunc)
+ case ed25519.PublicKey:
+ return LoadED25519Verifier(pk)
+ }
+ return nil, errors.New("unsupported public key type")
+}
+
+// LoadUnsafeVerifier returns a signature.Verifier based on the algorithm of the public key
+// provided that will use SHA1 when computing digests for RSA and ECDSA signatures.
+//
+// If publicKey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a
+// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() method directly.
+func LoadUnsafeVerifier(publicKey crypto.PublicKey) (Verifier, error) {
+ switch pk := publicKey.(type) {
+ case *rsa.PublicKey:
+ if pk == nil {
+ return nil, errors.New("invalid RSA public key specified")
+ }
+ return &RSAPKCS1v15Verifier{
+ publicKey: pk,
+ hashFunc: crypto.SHA1,
+ }, nil
+ case *ecdsa.PublicKey:
+ if pk == nil {
+ return nil, errors.New("invalid ECDSA public key specified")
+ }
+ return &ECDSAVerifier{
+ publicKey: pk,
+ hashFunc: crypto.SHA1,
+ }, nil
+ case ed25519.PublicKey:
+ return LoadED25519Verifier(pk)
+ }
+ return nil, errors.New("unsupported public key type")
+}
+
+// LoadVerifierFromPEMFile returns a signature.Verifier based on the contents of a
+// file located at path. The Verifier wil use the hash function specified when computing digests.
+//
+// If the publickey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a
+// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() and cryptoutils.UnmarshalPEMToPublicKey() methods directly.
+func LoadVerifierFromPEMFile(path string, hashFunc crypto.Hash) (Verifier, error) {
+ fileBytes, err := os.ReadFile(filepath.Clean(path))
+ if err != nil {
+ return nil, err
+ }
+
+ pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(fileBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ return LoadVerifier(pubKey, hashFunc)
+}
diff --git a/vendor/github.com/spf13/cobra/CHANGELOG.md b/vendor/github.com/spf13/cobra/CHANGELOG.md
deleted file mode 100644
index 8a23b4f8513..00000000000
--- a/vendor/github.com/spf13/cobra/CHANGELOG.md
+++ /dev/null
@@ -1,51 +0,0 @@
-# Cobra Changelog
-
-## v1.1.3
-
-* **Fix:** release-branch.cobra1.1 only: Revert "Deprecate Go < 1.14" to maintain backward compatibility
-
-## v1.1.2
-
-### Notable Changes
-
-* Bump license year to 2021 in golden files (#1309) @Bowbaq
-* Enhance PowerShell completion with custom comp (#1208) @Luap99
-* Update gopkg.in/yaml.v2 to v2.4.0: The previous breaking change in yaml.v2 v2.3.0 has been reverted, see go-yaml/yaml#670
-* Documentation readability improvements (#1228 etc.) @zaataylor etc.
-* Use golangci-lint: Repair warnings and errors resulting from linting (#1044) @umarcor
-
-## v1.1.1
-
-* **Fix:** yaml.v2 2.3.0 contained a unintended breaking change. This release reverts to yaml.v2 v2.2.8 which has recent critical CVE fixes, but does not have the breaking changes. See https://github.com/spf13/cobra/pull/1259 for context.
-* **Fix:** correct internal formatting for go-md2man v2 (which caused man page generation to be broken). See https://github.com/spf13/cobra/issues/1049 for context.
-
-## v1.1.0
-
-### Notable Changes
-
-* Extend Go completions and revamp zsh comp (#1070)
-* Fix man page doc generation - no auto generated tag when `cmd.DisableAutoGenTag = true` (#1104) @jpmcb
-* Add completion for help command (#1136)
-* Complete subcommands when TraverseChildren is set (#1171)
-* Fix stderr printing functions (#894)
-* fix: fish output redirection (#1247)
-
-## v1.0.0
-
-Announcing v1.0.0 of Cobra. 🎉
-
-### Notable Changes
-* Fish completion (including support for Go custom completion) @marckhouzam
-* API (urgent): Rename BashCompDirectives to ShellCompDirectives @marckhouzam
-* Remove/replace SetOutput on Command - deprecated @jpmcb
-* add support for autolabel stale PR @xchapter7x
-* Add Labeler Actions @xchapter7x
-* Custom completions coded in Go (instead of Bash) @marckhouzam
-* Partial Revert of #922 @jharshman
-* Add Makefile to project @jharshman
-* Correct documentation for InOrStdin @desponda
-* Apply formatting to templates @jharshman
-* Revert change so help is printed on stdout again @marckhouzam
-* Update md2man to v2.0.0 @pdf
-* update viper to v1.4.0 @umarcor
-* Update cmd/root.go example in README.md @jharshman
diff --git a/vendor/github.com/spf13/cobra/MAINTAINERS b/vendor/github.com/spf13/cobra/MAINTAINERS
new file mode 100644
index 00000000000..4c5ac3dd997
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/MAINTAINERS
@@ -0,0 +1,13 @@
+maintainers:
+- spf13
+- johnSchnake
+- jpmcb
+- marckhouzam
+inactive:
+- anthonyfok
+- bep
+- bogem
+- broady
+- eparis
+- jharshman
+- wfernandes
diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile
index 472c73bf16f..443ef1a987a 100644
--- a/vendor/github.com/spf13/cobra/Makefile
+++ b/vendor/github.com/spf13/cobra/Makefile
@@ -9,11 +9,11 @@ ifeq (, $(shell which richgo))
$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo")
endif
-.PHONY: fmt lint test cobra_generator install_deps clean
+.PHONY: fmt lint test install_deps clean
default: all
-all: fmt test cobra_generator
+all: fmt test
fmt:
$(info ******************** checking formatting ********************)
@@ -23,15 +23,10 @@ lint:
$(info ******************** running lint tools ********************)
golangci-lint run -v
-test: install_deps lint
+test: install_deps
$(info ******************** running tests ********************)
richgo test -v ./...
-cobra_generator: install_deps
- $(info ******************** building generator ********************)
- mkdir -p $(BIN)
- make -C cobra all
-
install_deps:
$(info ******************** downloading dependencies ********************)
go get -v ./...
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
index 074e3979f89..2bf15208229 100644
--- a/vendor/github.com/spf13/cobra/README.md
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -1,52 +1,26 @@

-Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
+Cobra is a library for creating powerful modern CLI applications.
-Cobra is used in many Go projects such as [Kubernetes](http://kubernetes.io/),
+Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/),
[Hugo](https://gohugo.io), and [Github CLI](https://github.com/cli/cli) to
name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra.
[](https://github.com/spf13/cobra/actions?query=workflow%3ATest)
-[](https://godoc.org/github.com/spf13/cobra)
+[](https://pkg.go.dev/github.com/spf13/cobra)
[](https://goreportcard.com/report/github.com/spf13/cobra)
[](https://gophers.slack.com/archives/CD3LP1199)
-# Table of Contents
-
-- [Overview](#overview)
-- [Concepts](#concepts)
- * [Commands](#commands)
- * [Flags](#flags)
-- [Installing](#installing)
-- [Usage](#usage)
- * [Using the Cobra Generator](user_guide.md#using-the-cobra-generator)
- * [Using the Cobra Library](user_guide.md#using-the-cobra-library)
- * [Working with Flags](user_guide.md#working-with-flags)
- * [Positional and Custom Arguments](user_guide.md#positional-and-custom-arguments)
- * [Example](user_guide.md#example)
- * [Help Command](user_guide.md#help-command)
- * [Usage Message](user_guide.md#usage-message)
- * [PreRun and PostRun Hooks](user_guide.md#prerun-and-postrun-hooks)
- * [Suggestions when "unknown command" happens](user_guide.md#suggestions-when-unknown-command-happens)
- * [Generating documentation for your command](user_guide.md#generating-documentation-for-your-command)
- * [Generating shell completions](user_guide.md#generating-shell-completions)
-- [Contributing](CONTRIBUTING.md)
-- [License](#license)
-
# Overview
Cobra is a library providing a simple interface to create powerful modern CLI
interfaces similar to git & go tools.
-Cobra is also an application that will generate your application scaffolding to rapidly
-develop a Cobra-based application.
-
Cobra provides:
* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
* Fully POSIX-compliant flags (including short & long versions)
* Nested subcommands
* Global, local and cascading flags
-* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname`
* Intelligent suggestions (`app srver`... did you mean `app server`?)
* Automatic help generation for commands and flags
* Automatic help flag recognition of `-h`, `--help`, etc.
@@ -54,7 +28,7 @@ Cobra provides:
* Automatically generated man pages for your application
* Command aliases so you can change things without breaking them
* The flexibility to define your own help, usage, etc.
-* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps
+* Optional seamless integration with [viper](https://github.com/spf13/viper) for 12-factor apps
# Concepts
@@ -88,7 +62,7 @@ have children commands and optionally run an action.
In the example above, 'server' is the command.
-[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command)
+[More about cobra.Command](https://pkg.go.dev/github.com/spf13/cobra#Command)
## Flags
@@ -105,10 +79,11 @@ which maintains the same interface while adding POSIX compliance.
# Installing
Using Cobra is easy. First, use `go get` to install the latest version
-of the library. This command will install the `cobra` generator executable
-along with the library and its dependencies:
+of the library.
- go get -u github.com/spf13/cobra
+```
+go get -u github.com/spf13/cobra@latest
+```
Next, include Cobra in your application:
@@ -117,8 +92,19 @@ import "github.com/spf13/cobra"
```
# Usage
+`cobra-cli` is a command line program to generate cobra applications and command files.
+It will bootstrap your application scaffolding to rapidly
+develop a Cobra-based application. It is the easiest way to incorporate Cobra into your application.
+
+It can be installed by running:
+
+```
+go install github.com/spf13/cobra-cli@latest
+```
+
+For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md)
-See [User Guide](user_guide.md).
+For complete details on using the Cobra library, please read the [The Cobra User Guide](user_guide.md).
# License
diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go
new file mode 100644
index 00000000000..0c631913d45
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/active_help.go
@@ -0,0 +1,49 @@
+package cobra
+
+import (
+ "fmt"
+ "os"
+ "strings"
+)
+
+const (
+ activeHelpMarker = "_activeHelp_ "
+ // The below values should not be changed: programs will be using them explicitly
+ // in their user documentation, and users will be using them explicitly.
+ activeHelpEnvVarSuffix = "_ACTIVE_HELP"
+ activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP"
+ activeHelpGlobalDisable = "0"
+)
+
+// AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp.
+// Such strings will be processed by the completion script and will be shown as ActiveHelp
+// to the user.
+// The array parameter should be the array that will contain the completions.
+// This function can be called multiple times before and/or after completions are added to
+// the array. Each time this function is called with the same array, the new
+// ActiveHelp line will be shown below the previous ones when completion is triggered.
+func AppendActiveHelp(compArray []string, activeHelpStr string) []string {
+ return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr))
+}
+
+// GetActiveHelpConfig returns the value of the ActiveHelp environment variable
+// _ACTIVE_HELP where is the name of the root command in upper
+// case, with all - replaced by _.
+// It will always return "0" if the global environment variable COBRA_ACTIVE_HELP
+// is set to "0".
+func GetActiveHelpConfig(cmd *Command) string {
+ activeHelpCfg := os.Getenv(activeHelpGlobalEnvVar)
+ if activeHelpCfg != activeHelpGlobalDisable {
+ activeHelpCfg = os.Getenv(activeHelpEnvVar(cmd.Root().Name()))
+ }
+ return activeHelpCfg
+}
+
+// activeHelpEnvVar returns the name of the program-specific ActiveHelp environment
+// variable. It has the format _ACTIVE_HELP where is the name of the
+// root command in upper case, with all - replaced by _.
+func activeHelpEnvVar(name string) string {
+ // This format should not be changed: users will be using it explicitly.
+ activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix))
+ return strings.ReplaceAll(activeHelpEnvVar, "-", "_")
+}
diff --git a/vendor/github.com/spf13/cobra/active_help.md b/vendor/github.com/spf13/cobra/active_help.md
new file mode 100644
index 00000000000..5e7f59af380
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/active_help.md
@@ -0,0 +1,157 @@
+# Active Help
+
+Active Help is a framework provided by Cobra which allows a program to define messages (hints, warnings, etc) that will be printed during program usage. It aims to make it easier for your users to learn how to use your program. If configured by the program, Active Help is printed when the user triggers shell completion.
+
+For example,
+```
+bash-5.1$ helm repo add [tab]
+You must choose a name for the repo you are adding.
+
+bash-5.1$ bin/helm package [tab]
+Please specify the path to the chart to package
+
+bash-5.1$ bin/helm package [tab][tab]
+bin/ internal/ scripts/ pkg/ testdata/
+```
+
+**Hint**: A good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions to guide the user in knowing what is expected by the program.
+## Supported shells
+
+Active Help is currently only supported for the following shells:
+- Bash (using [bash completion V2](shell_completions.md#bash-completion-v2) only). Note that bash 4.4 or higher is required for the prompt to appear when an Active Help message is printed.
+- Zsh
+
+## Adding Active Help messages
+
+As Active Help uses the shell completion system, the implementation of Active Help messages is done by enhancing custom dynamic completions. If you are not familiar with dynamic completions, please refer to [Shell Completions](shell_completions.md).
+
+Adding Active Help is done through the use of the `cobra.AppendActiveHelp(...)` function, where the program repeatedly adds Active Help messages to the list of completions. Keep reading for details.
+
+### Active Help for nouns
+
+Adding Active Help when completing a noun is done within the `ValidArgsFunction(...)` of a command. Please notice the use of `cobra.AppendActiveHelp(...)` in the following example:
+
+```go
+cmd := &cobra.Command{
+ Use: "add [NAME] [URL]",
+ Short: "add a chart repository",
+ Args: require.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return addRepo(args)
+ },
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ var comps []string
+ if len(args) == 0 {
+ comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding")
+ } else if len(args) == 1 {
+ comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding")
+ } else {
+ comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments")
+ }
+ return comps, cobra.ShellCompDirectiveNoFileComp
+ },
+}
+```
+The example above defines the completions (none, in this specific example) as well as the Active Help messages for the `helm repo add` command. It yields the following behavior:
+```
+bash-5.1$ helm repo add [tab]
+You must choose a name for the repo you are adding
+
+bash-5.1$ helm repo add grafana [tab]
+You must specify the URL for the repo you are adding
+
+bash-5.1$ helm repo add grafana https://grafana.github.io/helm-charts [tab]
+This command does not take any more arguments
+```
+**Hint**: As can be seen in the above example, a good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions.
+
+### Active Help for flags
+
+Providing Active Help for flags is done in the same fashion as for nouns, but using the completion function registered for the flag. For example:
+```go
+_ = cmd.RegisterFlagCompletionFunc("version", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 2 {
+ return cobra.AppendActiveHelp(nil, "You must first specify the chart to install before the --version flag can be completed"), cobra.ShellCompDirectiveNoFileComp
+ }
+ return compVersionFlag(args[1], toComplete)
+ })
+```
+The example above prints an Active Help message when not enough information was given by the user to complete the `--version` flag.
+```
+bash-5.1$ bin/helm install myrelease --version 2.0.[tab]
+You must first specify the chart to install before the --version flag can be completed
+
+bash-5.1$ bin/helm install myrelease bitnami/solr --version 2.0.[tab][tab]
+2.0.1 2.0.2 2.0.3
+```
+
+## User control of Active Help
+
+You may want to allow your users to disable Active Help or choose between different levels of Active Help. It is entirely up to the program to define the type of configurability of Active Help that it wants to offer, if any.
+Allowing to configure Active Help is entirely optional; you can use Active Help in your program without doing anything about Active Help configuration.
+
+The way to configure Active Help is to use the program's Active Help environment
+variable. That variable is named `_ACTIVE_HELP` where `` is the name of your
+program in uppercase with any `-` replaced by an `_`. The variable should be set by the user to whatever
+Active Help configuration values are supported by the program.
+
+For example, say `helm` has chosen to support three levels for Active Help: `on`, `off`, `local`. Then a user
+would set the desired behavior to `local` by doing `export HELM_ACTIVE_HELP=local` in their shell.
+
+For simplicity, when in `cmd.ValidArgsFunction(...)` or a flag's completion function, the program should read the
+Active Help configuration using the `cobra.GetActiveHelpConfig(cmd)` function and select what Active Help messages
+should or should not be added (instead of reading the environment variable directly).
+
+For example:
+```go
+ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ activeHelpLevel := cobra.GetActiveHelpConfig(cmd)
+
+ var comps []string
+ if len(args) == 0 {
+ if activeHelpLevel != "off" {
+ comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding")
+ }
+ } else if len(args) == 1 {
+ if activeHelpLevel != "off" {
+ comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding")
+ }
+ } else {
+ if activeHelpLevel == "local" {
+ comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments")
+ }
+ }
+ return comps, cobra.ShellCompDirectiveNoFileComp
+},
+```
+**Note 1**: If the `_ACTIVE_HELP` environment variable is set to the string "0", Cobra will automatically disable all Active Help output (even if some output was specified by the program using the `cobra.AppendActiveHelp(...)` function). Using "0" can simplify your code in situations where you want to blindly disable Active Help without having to call `cobra.GetActiveHelpConfig(cmd)` explicitly.
+
+**Note 2**: If a user wants to disable Active Help for every single program based on Cobra, she can set the environment variable `COBRA_ACTIVE_HELP` to "0". In this case `cobra.GetActiveHelpConfig(cmd)` will return "0" no matter what the variable `_ACTIVE_HELP` is set to.
+
+**Note 3**: If the user does not set `_ACTIVE_HELP` or `COBRA_ACTIVE_HELP` (which will be a common case), the default value for the Active Help configuration returned by `cobra.GetActiveHelpConfig(cmd)` will be the empty string.
+## Active Help with Cobra's default completion command
+
+Cobra provides a default `completion` command for programs that wish to use it.
+When using the default `completion` command, Active Help is configurable in the same
+fashion as described above using environment variables. You may wish to document this in more
+details for your users.
+
+## Debugging Active Help
+
+Debugging your Active Help code is done in the same way as debugging your dynamic completion code, which is with Cobra's hidden `__complete` command. Please refer to [debugging shell completion](shell_completions.md#debugging) for details.
+
+When debugging with the `__complete` command, if you want to specify different Active Help configurations, you should use the active help environment variable. That variable is named `_ACTIVE_HELP` where any `-` is replaced by an `_`. For example, we can test deactivating some Active Help as shown below:
+```
+$ HELM_ACTIVE_HELP=1 bin/helm __complete install wordpress bitnami/h
+bitnami/haproxy
+bitnami/harbor
+_activeHelp_ WARNING: cannot re-use a name that is still in use
+:0
+Completion ended with directive: ShellCompDirectiveDefault
+
+$ HELM_ACTIVE_HELP=0 bin/helm __complete install wordpress bitnami/h
+bitnami/haproxy
+bitnami/harbor
+:0
+Completion ended with directive: ShellCompDirectiveDefault
+```
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
index 70e9b262912..20a022b3084 100644
--- a/vendor/github.com/spf13/cobra/args.go
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -107,3 +107,15 @@ func RangeArgs(min int, max int) PositionalArgs {
return nil
}
}
+
+// MatchAll allows combining several PositionalArgs to work in concert.
+func MatchAll(pargs ...PositionalArgs) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ for _, parg := range pargs {
+ if err := parg(cmd, args); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
index 733f4d1211d..cb7e1953787 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.go
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -24,7 +24,7 @@ func writePreamble(buf io.StringWriter, name string) {
WriteStringAndCheck(buf, fmt.Sprintf(`
__%[1]s_debug()
{
- if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
+ if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then
echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
fi
}
@@ -73,7 +73,8 @@ __%[1]s_handle_go_custom_completion()
# Prepare the command to request completions for the program.
# Calling ${words[0]} instead of directly %[1]s allows to handle aliases
args=("${words[@]:1}")
- requestComp="${words[0]} %[2]s ${args[*]}"
+ # Disable ActiveHelp which is not supported for bash completion v1
+ requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}"
lastParam=${words[$((${#words[@]}-1))]}
lastChar=${lastParam:$((${#lastParam}-1)):1}
@@ -99,7 +100,7 @@ __%[1]s_handle_go_custom_completion()
directive=0
fi
__%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}"
- __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out[*]}"
+ __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out}"
if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
# Error code. No completion.
@@ -125,7 +126,7 @@ __%[1]s_handle_go_custom_completion()
local fullFilter filter filteringCmd
# Do not use quotes around the $out variable or else newline
# characters will be kept.
- for filter in ${out[*]}; do
+ for filter in ${out}; do
fullFilter+="$filter|"
done
@@ -134,9 +135,9 @@ __%[1]s_handle_go_custom_completion()
$filteringCmd
elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
# File completion for directories only
- local subDir
+ local subdir
# Use printf to strip any trailing newline
- subdir=$(printf "%%s" "${out[0]}")
+ subdir=$(printf "%%s" "${out}")
if [ -n "$subdir" ]; then
__%[1]s_debug "Listing directories in $subdir"
__%[1]s_handle_subdirs_in_dir_flag "$subdir"
@@ -147,7 +148,7 @@ __%[1]s_handle_go_custom_completion()
else
while IFS='' read -r comp; do
COMPREPLY+=("$comp")
- done < <(compgen -W "${out[*]}" -- "$cur")
+ done < <(compgen -W "${out}" -- "$cur")
fi
}
@@ -187,13 +188,19 @@ __%[1]s_handle_reply()
PREFIX=""
cur="${cur#*=}"
${flags_completion[${index}]}
- if [ -n "${ZSH_VERSION}" ]; then
+ if [ -n "${ZSH_VERSION:-}" ]; then
# zsh completion needs --flag= prefix
eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
fi
fi
fi
- return 0;
+
+ if [[ -z "${flag_parsing_disabled}" ]]; then
+ # If flag parsing is enabled, we have completed the flags and can return.
+ # If flag parsing is disabled, we may not know all (or any) of the flags, so we fallthrough
+ # to possibly call handle_go_custom_completion.
+ return 0;
+ fi
;;
esac
@@ -232,13 +239,13 @@ __%[1]s_handle_reply()
fi
if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
- if declare -F __%[1]s_custom_func >/dev/null; then
- # try command name qualified custom func
- __%[1]s_custom_func
- else
- # otherwise fall back to unqualified for compatibility
- declare -F __custom_func >/dev/null && __custom_func
- fi
+ if declare -F __%[1]s_custom_func >/dev/null; then
+ # try command name qualified custom func
+ __%[1]s_custom_func
+ else
+ # otherwise fall back to unqualified for compatibility
+ declare -F __custom_func >/dev/null && __custom_func
+ fi
fi
# available in bash-completion >= 2, not always present on macOS
@@ -272,7 +279,7 @@ __%[1]s_handle_flag()
# if a command required a flag, and we found it, unset must_have_one_flag()
local flagname=${words[c]}
- local flagvalue
+ local flagvalue=""
# if the word contained an =
if [[ ${words[c]} == *"="* ]]; then
flagvalue=${flagname#*=} # take in as flagvalue after the =
@@ -291,7 +298,7 @@ __%[1]s_handle_flag()
# keep flag value with flagname as flaghash
# flaghash variable is an associative array which is only supported in bash > 3.
- if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then
if [ -n "${flagvalue}" ] ; then
flaghash[${flagname}]=${flagvalue}
elif [ -n "${words[ $((c+1)) ]}" ] ; then
@@ -303,7 +310,7 @@ __%[1]s_handle_flag()
# skip the argument to a two word flag
if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
- __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
+ __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
c=$((c+1))
# if we are looking for a flags value, don't show commands
if [[ $c -eq $cword ]]; then
@@ -363,7 +370,7 @@ __%[1]s_handle_word()
__%[1]s_handle_command
elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
# aliashash variable is an associative array which is only supported in bash > 3.
- if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then
words[c]=${aliashash[${words[c]}]}
__%[1]s_handle_command
else
@@ -377,11 +384,11 @@ __%[1]s_handle_word()
`, name, ShellCompNoDescRequestCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
}
func writePostscript(buf io.StringWriter, name string) {
- name = strings.Replace(name, ":", "__", -1)
+ name = strings.ReplaceAll(name, ":", "__")
WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name))
WriteStringAndCheck(buf, fmt.Sprintf(`{
local cur prev words cword split
@@ -394,6 +401,7 @@ func writePostscript(buf io.StringWriter, name string) {
fi
local c=0
+ local flag_parsing_disabled=
local flags=()
local two_word_flags=()
local local_nonpersistent_flags=()
@@ -403,8 +411,8 @@ func writePostscript(buf io.StringWriter, name string) {
local command_aliases=()
local must_have_one_flag=()
local must_have_one_noun=()
- local has_completion_function
- local last_command
+ local has_completion_function=""
+ local last_command=""
local nouns=()
local noun_aliases=()
@@ -535,6 +543,11 @@ func writeFlags(buf io.StringWriter, cmd *Command) {
flags_completion=()
`)
+
+ if cmd.DisableFlagParsing {
+ WriteStringAndCheck(buf, " flag_parsing_disabled=1\n")
+ }
+
localNonPersistentFlags := cmd.LocalNonPersistentFlags()
cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
@@ -609,7 +622,7 @@ func writeCmdAliases(buf io.StringWriter, cmd *Command) {
sort.Strings(cmd.Aliases)
- WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
+ WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then`, "\n"))
for _, value := range cmd.Aliases {
WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value))
WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
@@ -633,8 +646,8 @@ func gen(buf io.StringWriter, cmd *Command) {
gen(buf, c)
}
commandName := cmd.CommandPath()
- commandName = strings.Replace(commandName, " ", "_", -1)
- commandName = strings.Replace(commandName, ":", "__", -1)
+ commandName = strings.ReplaceAll(commandName, " ", "_")
+ commandName = strings.ReplaceAll(commandName, ":", "__")
if cmd.Root() == cmd {
WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName))
diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go
index 8859b57c42c..767bf031200 100644
--- a/vendor/github.com/spf13/cobra/bash_completionsV2.go
+++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go
@@ -78,7 +78,7 @@ __%[1]s_get_completion_results() {
directive=0
fi
__%[1]s_debug "The completion directive is: ${directive}"
- __%[1]s_debug "The completions are: ${out[*]}"
+ __%[1]s_debug "The completions are: ${out}"
}
__%[1]s_process_completion_results() {
@@ -111,13 +111,18 @@ __%[1]s_process_completion_results() {
fi
fi
+ # Separate activeHelp from normal completions
+ local completions=()
+ local activeHelp=()
+ __%[1]s_extract_activeHelp
+
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
# File extension filtering
local fullFilter filter filteringCmd
- # Do not use quotes around the $out variable or else newline
+ # Do not use quotes around the $completions variable or else newline
# characters will be kept.
- for filter in ${out[*]}; do
+ for filter in ${completions[*]}; do
fullFilter+="$filter|"
done
@@ -129,7 +134,7 @@ __%[1]s_process_completion_results() {
# Use printf to strip any trailing newline
local subdir
- subdir=$(printf "%%s" "${out[0]}")
+ subdir=$(printf "%%s" "${completions[0]}")
if [ -n "$subdir" ]; then
__%[1]s_debug "Listing directories in $subdir"
pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
@@ -138,52 +143,110 @@ __%[1]s_process_completion_results() {
_filedir -d
fi
else
- __%[1]s_handle_standard_completion_case
+ __%[1]s_handle_completion_types
fi
__%[1]s_handle_special_char "$cur" :
__%[1]s_handle_special_char "$cur" =
+
+ # Print the activeHelp statements before we finish
+ if [ ${#activeHelp} -ne 0 ]; then
+ printf "\n";
+ printf "%%s\n" "${activeHelp[@]}"
+ printf "\n"
+
+ # The prompt format is only available from bash 4.4.
+ # We test if it is available before using it.
+ if (x=${PS1@P}) 2> /dev/null; then
+ printf "%%s" "${PS1@P}${COMP_LINE[@]}"
+ else
+ # Can't print the prompt. Just print the
+ # text the user had typed, it is workable enough.
+ printf "%%s" "${COMP_LINE[@]}"
+ fi
+ fi
+}
+
+# Separate activeHelp lines from real completions.
+# Fills the $activeHelp and $completions arrays.
+__%[1]s_extract_activeHelp() {
+ local activeHelpMarker="%[8]s"
+ local endIndex=${#activeHelpMarker}
+
+ while IFS='' read -r comp; do
+ if [ "${comp:0:endIndex}" = "$activeHelpMarker" ]; then
+ comp=${comp:endIndex}
+ __%[1]s_debug "ActiveHelp found: $comp"
+ if [ -n "$comp" ]; then
+ activeHelp+=("$comp")
+ fi
+ else
+ # Not an activeHelp line but a normal completion
+ completions+=("$comp")
+ fi
+ done < <(printf "%%s\n" "${out}")
+}
+
+__%[1]s_handle_completion_types() {
+ __%[1]s_debug "__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE"
+
+ case $COMP_TYPE in
+ 37|42)
+ # Type: menu-complete/menu-complete-backward and insert-completions
+ # If the user requested inserting one completion at a time, or all
+ # completions at once on the command-line we must remove the descriptions.
+ # https://github.com/spf13/cobra/issues/1508
+ local tab=$'\t' comp
+ while IFS='' read -r comp; do
+ [[ -z $comp ]] && continue
+ # Strip any description
+ comp=${comp%%%%$tab*}
+ # Only consider the completions that match
+ if [[ $comp == "$cur"* ]]; then
+ COMPREPLY+=("$comp")
+ fi
+ done < <(printf "%%s\n" "${completions[@]}")
+ ;;
+
+ *)
+ # Type: complete (normal completion)
+ __%[1]s_handle_standard_completion_case
+ ;;
+ esac
}
__%[1]s_handle_standard_completion_case() {
- local tab comp
- tab=$(printf '\t')
+ local tab=$'\t' comp
+
+ # Short circuit to optimize if we don't have descriptions
+ if [[ "${completions[*]}" != *$tab* ]]; then
+ IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur")
+ return 0
+ fi
local longest=0
+ local compline
# Look for the longest completion so that we can format things nicely
- while IFS='' read -r comp; do
+ while IFS='' read -r compline; do
+ [[ -z $compline ]] && continue
# Strip any description before checking the length
- comp=${comp%%%%$tab*}
+ comp=${compline%%%%$tab*}
# Only consider the completions that match
- comp=$(compgen -W "$comp" -- "$cur")
+ [[ $comp == "$cur"* ]] || continue
+ COMPREPLY+=("$compline")
if ((${#comp}>longest)); then
longest=${#comp}
fi
- done < <(printf "%%s\n" "${out[@]}")
-
- local completions=()
- while IFS='' read -r comp; do
- if [ -z "$comp" ]; then
- continue
- fi
-
- __%[1]s_debug "Original comp: $comp"
- comp="$(__%[1]s_format_comp_descriptions "$comp" "$longest")"
- __%[1]s_debug "Final comp: $comp"
- completions+=("$comp")
- done < <(printf "%%s\n" "${out[@]}")
-
- while IFS='' read -r comp; do
- COMPREPLY+=("$comp")
- done < <(compgen -W "${completions[*]}" -- "$cur")
+ done < <(printf "%%s\n" "${completions[@]}")
# If there is a single completion left, remove the description text
if [ ${#COMPREPLY[*]} -eq 1 ]; then
__%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}"
- comp="${COMPREPLY[0]%%%% *}"
+ comp="${COMPREPLY[0]%%%%$tab*}"
__%[1]s_debug "Removed description from single completion, which is now: ${comp}"
- COMPREPLY=()
- COMPREPLY+=("$comp")
+ COMPREPLY[0]=$comp
+ else # Format the descriptions
+ __%[1]s_format_comp_descriptions $longest
fi
}
@@ -202,45 +265,48 @@ __%[1]s_handle_special_char()
__%[1]s_format_comp_descriptions()
{
- local tab
- tab=$(printf '\t')
- local comp="$1"
- local longest=$2
-
- # Properly format the description string which follows a tab character if there is one
- if [[ "$comp" == *$tab* ]]; then
- desc=${comp#*$tab}
- comp=${comp%%%%$tab*}
-
- # $COLUMNS stores the current shell width.
- # Remove an extra 4 because we add 2 spaces and 2 parentheses.
- maxdesclength=$(( COLUMNS - longest - 4 ))
-
- # Make sure we can fit a description of at least 8 characters
- # if we are to align the descriptions.
- if [[ $maxdesclength -gt 8 ]]; then
- # Add the proper number of spaces to align the descriptions
- for ((i = ${#comp} ; i < longest ; i++)); do
- comp+=" "
- done
- else
- # Don't pad the descriptions so we can fit more text after the completion
- maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
- fi
+ local tab=$'\t'
+ local comp desc maxdesclength
+ local longest=$1
+
+ local i ci
+ for ci in ${!COMPREPLY[*]}; do
+ comp=${COMPREPLY[ci]}
+ # Properly format the description string which follows a tab character if there is one
+ if [[ "$comp" == *$tab* ]]; then
+ __%[1]s_debug "Original comp: $comp"
+ desc=${comp#*$tab}
+ comp=${comp%%%%$tab*}
+
+ # $COLUMNS stores the current shell width.
+ # Remove an extra 4 because we add 2 spaces and 2 parentheses.
+ maxdesclength=$(( COLUMNS - longest - 4 ))
+
+ # Make sure we can fit a description of at least 8 characters
+ # if we are to align the descriptions.
+ if [[ $maxdesclength -gt 8 ]]; then
+ # Add the proper number of spaces to align the descriptions
+ for ((i = ${#comp} ; i < longest ; i++)); do
+ comp+=" "
+ done
+ else
+ # Don't pad the descriptions so we can fit more text after the completion
+ maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
+ fi
- # If there is enough space for any description text,
- # truncate the descriptions that are too long for the shell width
- if [ $maxdesclength -gt 0 ]; then
- if [ ${#desc} -gt $maxdesclength ]; then
- desc=${desc:0:$(( maxdesclength - 1 ))}
- desc+="…"
+ # If there is enough space for any description text,
+ # truncate the descriptions that are too long for the shell width
+ if [ $maxdesclength -gt 0 ]; then
+ if [ ${#desc} -gt $maxdesclength ]; then
+ desc=${desc:0:$(( maxdesclength - 1 ))}
+ desc+="…"
+ fi
+ comp+=" ($desc)"
fi
- comp+=" ($desc)"
+ COMPREPLY[ci]=$comp
+ __%[1]s_debug "Final comp: $comp"
fi
- fi
-
- # Must use printf to escape all special characters
- printf "%%q" "${comp}"
+ done
}
__start_%[1]s()
@@ -281,7 +347,8 @@ fi
# ex: ts=4 sw=4 et filetype=sh
`, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs,
+ activeHelpMarker))
}
// GenBashCompletionFileV2 generates Bash completion version 2.
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index 2cc18891d77..675bb1340af 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -18,6 +18,7 @@ package cobra
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"os"
@@ -166,7 +167,7 @@ type Command struct {
// errWriter is a writer defined by the user that replaces stderr
errWriter io.Writer
- //FParseErrWhitelist flag parse errors to be ignored
+ // FParseErrWhitelist flag parse errors to be ignored
FParseErrWhitelist FParseErrWhitelist
// CompletionOptions is a set of options to control the handling of shell completion
@@ -224,12 +225,23 @@ type Command struct {
SuggestionsMinimumDistance int
}
-// Context returns underlying command context. If command wasn't
-// executed with ExecuteContext Context returns Background context.
+// Context returns underlying command context. If command was executed
+// with ExecuteContext or the context was set with SetContext, the
+// previously set context will be returned. Otherwise, nil is returned.
+//
+// Notice that a call to Execute and ExecuteC will replace a nil context of
+// a command with a context.Background, so a background context will be
+// returned by Context after one of these functions has been called.
func (c *Command) Context() context.Context {
return c.ctx
}
+// SetContext sets context for the command. It is set to context.Background by default and will be overwritten by
+// Command.ExecuteContext or Command.ExecuteContextC
+func (c *Command) SetContext(ctx context.Context) {
+ c.ctx = ctx
+}
+
// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
// particularly useful when testing.
func (c *Command) SetArgs(a []string) {
@@ -852,6 +864,10 @@ func (c *Command) execute(a []string) (err error) {
if err := c.validateRequiredFlags(); err != nil {
return err
}
+ if err := c.validateFlagGroups(); err != nil {
+ return err
+ }
+
if c.RunE != nil {
if err := c.RunE(c, argWoFlags); err != nil {
return err
@@ -975,7 +991,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
if err != nil {
// Always show help if requested, even if SilenceErrors is in
// effect
- if err == flag.ErrHelp {
+ if errors.Is(err, flag.ErrHelp) {
cmd.HelpFunc()(cmd, args)
return cmd, nil
}
@@ -997,7 +1013,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
func (c *Command) ValidateArgs(args []string) error {
if c.Args == nil {
- return nil
+ return ArbitraryArgs(c, args)
}
return c.Args(c, args)
}
diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go
index 6159c1cc19d..bb5dad90b7f 100644
--- a/vendor/github.com/spf13/cobra/command_notwin.go
+++ b/vendor/github.com/spf13/cobra/command_notwin.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package cobra
diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go
index 8768b1736dc..a84f5a82aab 100644
--- a/vendor/github.com/spf13/cobra/command_win.go
+++ b/vendor/github.com/spf13/cobra/command_win.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package cobra
diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go
index b849b9c8444..2c24839988c 100644
--- a/vendor/github.com/spf13/cobra/completions.go
+++ b/vendor/github.com/spf13/cobra/completions.go
@@ -93,6 +93,8 @@ type CompletionOptions struct {
// DisableDescriptions turns off all completion descriptions for shells
// that support them
DisableDescriptions bool
+ // HiddenDefaultCmd makes the default 'completion' command hidden
+ HiddenDefaultCmd bool
}
// NoFileCompletions can be used to disable file completion for commands that should
@@ -101,6 +103,14 @@ func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string
return nil, ShellCompDirectiveNoFileComp
}
+// FixedCompletions can be used to create a completion function which always
+// returns the same results.
+func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ return choices, directive
+ }
+}
+
// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error {
flag := c.Flag(flagName)
@@ -168,6 +178,12 @@ func (c *Command) initCompleteCmd(args []string) {
noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd)
for _, comp := range completions {
+ if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable {
+ // Remove all activeHelp entries in this case
+ if strings.HasPrefix(comp, activeHelpMarker) {
+ continue
+ }
+ }
if noDescriptions {
// Remove any description that may be included following a tab character.
comp = strings.Split(comp, "\t")[0]
@@ -226,7 +242,17 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi
if c.Root().TraverseChildren {
finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs)
} else {
- finalCmd, finalArgs, err = c.Root().Find(trimmedArgs)
+ // For Root commands that don't specify any value for their Args fields, when we call
+ // Find(), if those Root commands don't have any sub-commands, they will accept arguments.
+ // However, because we have added the __complete sub-command in the current code path, the
+ // call to Find() -> legacyArgs() will return an error if there are any arguments.
+ // To avoid this, we first remove the __complete command to get back to having no sub-commands.
+ rootCmd := c.Root()
+ if len(rootCmd.Commands()) == 1 {
+ rootCmd.RemoveCommand(c)
+ }
+
+ finalCmd, finalArgs, err = rootCmd.Find(trimmedArgs)
}
if err != nil {
// Unable to find the real command. E.g., someInvalidCmd
@@ -266,6 +292,12 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi
}
}
+ // We only remove the flags from the arguments if DisableFlagParsing is not set.
+ // This is important for commands which have requested to do their own flag completion.
+ if !finalCmd.DisableFlagParsing {
+ finalArgs = finalCmd.Flags().Args()
+ }
+
if flag != nil && flagCompletion {
// Check if we are completing a flag value subject to annotations
if validExts, present := flag.Annotations[BashCompFilenameExt]; present {
@@ -290,12 +322,19 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi
}
}
+ var completions []string
+ var directive ShellCompDirective
+
+ // Enforce flag groups before doing flag completions
+ finalCmd.enforceFlagGroupsForCompletion()
+
+ // Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true;
+ // doing this allows for completion of persistent flag names even for commands that disable flag parsing.
+ //
// When doing completion of a flag name, as soon as an argument starts with
// a '-' we know it is a flag. We cannot use isFlagArg() here as it requires
// the flag name to be complete
if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion {
- var completions []string
-
// First check for required flags
completions = completeRequireFlags(finalCmd, toComplete)
@@ -322,86 +361,86 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi
})
}
- directive := ShellCompDirectiveNoFileComp
+ directive = ShellCompDirectiveNoFileComp
if len(completions) == 1 && strings.HasSuffix(completions[0], "=") {
// If there is a single completion, the shell usually adds a space
// after the completion. We don't want that if the flag ends with an =
directive = ShellCompDirectiveNoSpace
}
- return finalCmd, completions, directive, nil
- }
- // We only remove the flags from the arguments if DisableFlagParsing is not set.
- // This is important for commands which have requested to do their own flag completion.
- if !finalCmd.DisableFlagParsing {
- finalArgs = finalCmd.Flags().Args()
- }
-
- var completions []string
- directive := ShellCompDirectiveDefault
- if flag == nil {
- foundLocalNonPersistentFlag := false
- // If TraverseChildren is true on the root command we don't check for
- // local flags because we can use a local flag on a parent command
- if !finalCmd.Root().TraverseChildren {
- // Check if there are any local, non-persistent flags on the command-line
- localNonPersistentFlags := finalCmd.LocalNonPersistentFlags()
- finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
- if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed {
- foundLocalNonPersistentFlag = true
- }
- })
+ if !finalCmd.DisableFlagParsing {
+ // If DisableFlagParsing==false, we have completed the flags as known by Cobra;
+ // we can return what we found.
+ // If DisableFlagParsing==true, Cobra may not be aware of all flags, so we
+ // let the logic continue to see if ValidArgsFunction needs to be called.
+ return finalCmd, completions, directive, nil
}
+ } else {
+ directive = ShellCompDirectiveDefault
+ if flag == nil {
+ foundLocalNonPersistentFlag := false
+ // If TraverseChildren is true on the root command we don't check for
+ // local flags because we can use a local flag on a parent command
+ if !finalCmd.Root().TraverseChildren {
+ // Check if there are any local, non-persistent flags on the command-line
+ localNonPersistentFlags := finalCmd.LocalNonPersistentFlags()
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed {
+ foundLocalNonPersistentFlag = true
+ }
+ })
+ }
- // Complete subcommand names, including the help command
- if len(finalArgs) == 0 && !foundLocalNonPersistentFlag {
- // We only complete sub-commands if:
- // - there are no arguments on the command-line and
- // - there are no local, non-persistent flags on the command-line or TraverseChildren is true
- for _, subCmd := range finalCmd.Commands() {
- if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand {
- if strings.HasPrefix(subCmd.Name(), toComplete) {
- completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
+ // Complete subcommand names, including the help command
+ if len(finalArgs) == 0 && !foundLocalNonPersistentFlag {
+ // We only complete sub-commands if:
+ // - there are no arguments on the command-line and
+ // - there are no local, non-persistent flags on the command-line or TraverseChildren is true
+ for _, subCmd := range finalCmd.Commands() {
+ if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand {
+ if strings.HasPrefix(subCmd.Name(), toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
+ }
+ directive = ShellCompDirectiveNoFileComp
}
- directive = ShellCompDirectiveNoFileComp
}
}
- }
- // Complete required flags even without the '-' prefix
- completions = append(completions, completeRequireFlags(finalCmd, toComplete)...)
-
- // Always complete ValidArgs, even if we are completing a subcommand name.
- // This is for commands that have both subcommands and ValidArgs.
- if len(finalCmd.ValidArgs) > 0 {
- if len(finalArgs) == 0 {
- // ValidArgs are only for the first argument
- for _, validArg := range finalCmd.ValidArgs {
- if strings.HasPrefix(validArg, toComplete) {
- completions = append(completions, validArg)
+ // Complete required flags even without the '-' prefix
+ completions = append(completions, completeRequireFlags(finalCmd, toComplete)...)
+
+ // Always complete ValidArgs, even if we are completing a subcommand name.
+ // This is for commands that have both subcommands and ValidArgs.
+ if len(finalCmd.ValidArgs) > 0 {
+ if len(finalArgs) == 0 {
+ // ValidArgs are only for the first argument
+ for _, validArg := range finalCmd.ValidArgs {
+ if strings.HasPrefix(validArg, toComplete) {
+ completions = append(completions, validArg)
+ }
}
- }
- directive = ShellCompDirectiveNoFileComp
-
- // If no completions were found within commands or ValidArgs,
- // see if there are any ArgAliases that should be completed.
- if len(completions) == 0 {
- for _, argAlias := range finalCmd.ArgAliases {
- if strings.HasPrefix(argAlias, toComplete) {
- completions = append(completions, argAlias)
+ directive = ShellCompDirectiveNoFileComp
+
+ // If no completions were found within commands or ValidArgs,
+ // see if there are any ArgAliases that should be completed.
+ if len(completions) == 0 {
+ for _, argAlias := range finalCmd.ArgAliases {
+ if strings.HasPrefix(argAlias, toComplete) {
+ completions = append(completions, argAlias)
+ }
}
}
}
+
+ // If there are ValidArgs specified (even if they don't match), we stop completion.
+ // Only one of ValidArgs or ValidArgsFunction can be used for a single command.
+ return finalCmd, completions, directive, nil
}
- // If there are ValidArgs specified (even if they don't match), we stop completion.
- // Only one of ValidArgs or ValidArgsFunction can be used for a single command.
- return finalCmd, completions, directive, nil
+ // Let the logic continue so as to add any ValidArgsFunction completions,
+ // even if we already found sub-commands.
+ // This is for commands that have subcommands but also specify a ValidArgsFunction.
}
-
- // Let the logic continue so as to add any ValidArgsFunction completions,
- // even if we already found sub-commands.
- // This is for commands that have subcommands but also specify a ValidArgsFunction.
}
// Find the completion function for the flag or command
@@ -589,39 +628,43 @@ func (c *Command) initDefaultCompletionCmd() {
completionCmd := &Command{
Use: compCmdName,
- Short: "generate the autocompletion script for the specified shell",
- Long: fmt.Sprintf(`
-Generate the autocompletion script for %[1]s for the specified shell.
+ Short: "Generate the autocompletion script for the specified shell",
+ Long: fmt.Sprintf(`Generate the autocompletion script for %[1]s for the specified shell.
See each sub-command's help for details on how to use the generated script.
`, c.Root().Name()),
Args: NoArgs,
ValidArgsFunction: NoFileCompletions,
+ Hidden: c.CompletionOptions.HiddenDefaultCmd,
}
c.AddCommand(completionCmd)
out := c.OutOrStdout()
noDesc := c.CompletionOptions.DisableDescriptions
- shortDesc := "generate the autocompletion script for %s"
+ shortDesc := "Generate the autocompletion script for %s"
bash := &Command{
Use: "bash",
Short: fmt.Sprintf(shortDesc, "bash"),
- Long: fmt.Sprintf(`
-Generate the autocompletion script for the bash shell.
+ Long: fmt.Sprintf(`Generate the autocompletion script for the bash shell.
This script depends on the 'bash-completion' package.
If it is not installed already, you can install it via your OS's package manager.
To load completions in your current shell session:
-$ source <(%[1]s completion bash)
+
+ source <(%[1]s completion bash)
To load completions for every new session, execute once:
-Linux:
- $ %[1]s completion bash > /etc/bash_completion.d/%[1]s
-MacOS:
- $ %[1]s completion bash > /usr/local/etc/bash_completion.d/%[1]s
+
+#### Linux:
+
+ %[1]s completion bash > /etc/bash_completion.d/%[1]s
+
+#### macOS:
+
+ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s
You will need to start a new shell for this setup to take effect.
- `, c.Root().Name()),
+`, c.Root().Name()),
Args: NoArgs,
DisableFlagsInUseLine: true,
ValidArgsFunction: NoFileCompletions,
@@ -636,19 +679,26 @@ You will need to start a new shell for this setup to take effect.
zsh := &Command{
Use: "zsh",
Short: fmt.Sprintf(shortDesc, "zsh"),
- Long: fmt.Sprintf(`
-Generate the autocompletion script for the zsh shell.
+ Long: fmt.Sprintf(`Generate the autocompletion script for the zsh shell.
If shell completion is not already enabled in your environment you will need
to enable it. You can execute the following once:
-$ echo "autoload -U compinit; compinit" >> ~/.zshrc
+ echo "autoload -U compinit; compinit" >> ~/.zshrc
+
+To load completions in your current shell session:
+
+ source <(%[1]s completion zsh); compdef _%[1]s %[1]s
To load completions for every new session, execute once:
-# Linux:
-$ %[1]s completion zsh > "${fpath[1]}/_%[1]s"
-# macOS:
-$ %[1]s completion zsh > /usr/local/share/zsh/site-functions/_%[1]s
+
+#### Linux:
+
+ %[1]s completion zsh > "${fpath[1]}/_%[1]s"
+
+#### macOS:
+
+ %[1]s completion zsh > $(brew --prefix)/share/zsh/site-functions/_%[1]s
You will need to start a new shell for this setup to take effect.
`, c.Root().Name()),
@@ -668,14 +718,15 @@ You will need to start a new shell for this setup to take effect.
fish := &Command{
Use: "fish",
Short: fmt.Sprintf(shortDesc, "fish"),
- Long: fmt.Sprintf(`
-Generate the autocompletion script for the fish shell.
+ Long: fmt.Sprintf(`Generate the autocompletion script for the fish shell.
To load completions in your current shell session:
-$ %[1]s completion fish | source
+
+ %[1]s completion fish | source
To load completions for every new session, execute once:
-$ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
+
+ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
You will need to start a new shell for this setup to take effect.
`, c.Root().Name()),
@@ -692,11 +743,11 @@ You will need to start a new shell for this setup to take effect.
powershell := &Command{
Use: "powershell",
Short: fmt.Sprintf(shortDesc, "powershell"),
- Long: fmt.Sprintf(`
-Generate the autocompletion script for powershell.
+ Long: fmt.Sprintf(`Generate the autocompletion script for powershell.
To load completions in your current shell session:
-PS C:\> %[1]s completion powershell | Out-String | Invoke-Expression
+
+ %[1]s completion powershell | Out-String | Invoke-Expression
To load completions for every new session, add the output of the above command
to your powershell profile.
diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go
index bb57fd5685f..005ee6be73e 100644
--- a/vendor/github.com/spf13/cobra/fish_completions.go
+++ b/vendor/github.com/spf13/cobra/fish_completions.go
@@ -11,8 +11,8 @@ import (
func genFishComp(buf io.StringWriter, name string, includeDesc bool) {
// Variables should not contain a '-' or ':' character
nameForVar := name
- nameForVar = strings.Replace(nameForVar, "-", "_", -1)
- nameForVar = strings.Replace(nameForVar, ":", "_", -1)
+ nameForVar = strings.ReplaceAll(nameForVar, "-", "_")
+ nameForVar = strings.ReplaceAll(nameForVar, ":", "_")
compCmd := ShellCompRequestCmd
if !includeDesc {
@@ -38,7 +38,8 @@ function __%[1]s_perform_completion
__%[1]s_debug "args: $args"
__%[1]s_debug "last arg: $lastArg"
- set -l requestComp "$args[1] %[3]s $args[2..-1] $lastArg"
+ # Disable ActiveHelp which is not supported for fish shell
+ set -l requestComp "%[9]s=0 $args[1] %[3]s $args[2..-1] $lastArg"
__%[1]s_debug "Calling $requestComp"
set -l results (eval $requestComp 2> /dev/null)
@@ -196,7 +197,7 @@ complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
`, nameForVar, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
}
// GenFishCompletion generates fish completion file and writes to the passed writer.
diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go
new file mode 100644
index 00000000000..dc78431194c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/flag_groups.go
@@ -0,0 +1,223 @@
+// Copyright © 2022 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+)
+
+const (
+ requiredAsGroup = "cobra_annotation_required_if_others_set"
+ mutuallyExclusive = "cobra_annotation_mutually_exclusive"
+)
+
+// MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors
+// if the command is invoked with a subset (but not all) of the given flags.
+func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) {
+ c.mergePersistentFlags()
+ for _, v := range flagNames {
+ f := c.Flags().Lookup(v)
+ if f == nil {
+ panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v))
+ }
+ if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil {
+ // Only errs if the flag isn't found.
+ panic(err)
+ }
+ }
+}
+
+// MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors
+// if the command is invoked with more than one flag from the given set of flags.
+func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) {
+ c.mergePersistentFlags()
+ for _, v := range flagNames {
+ f := c.Flags().Lookup(v)
+ if f == nil {
+ panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v))
+ }
+ // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed.
+ if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil {
+ panic(err)
+ }
+ }
+}
+
+// validateFlagGroups validates the mutuallyExclusive/requiredAsGroup logic and returns the
+// first error encountered.
+func (c *Command) validateFlagGroups() error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ flags := c.Flags()
+
+ // groupStatus format is the list of flags as a unique ID,
+ // then a map of each flag name and whether it is set or not.
+ groupStatus := map[string]map[string]bool{}
+ mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
+ flags.VisitAll(func(pflag *flag.Flag) {
+ processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus)
+ processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus)
+ })
+
+ if err := validateRequiredFlagGroups(groupStatus); err != nil {
+ return err
+ }
+ if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil {
+ return err
+ }
+ return nil
+}
+
+func hasAllFlags(fs *flag.FlagSet, flagnames ...string) bool {
+ for _, fname := range flagnames {
+ f := fs.Lookup(fname)
+ if f == nil {
+ return false
+ }
+ }
+ return true
+}
+
+func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annotation string, groupStatus map[string]map[string]bool) {
+ groupInfo, found := pflag.Annotations[annotation]
+ if found {
+ for _, group := range groupInfo {
+ if groupStatus[group] == nil {
+ flagnames := strings.Split(group, " ")
+
+ // Only consider this flag group at all if all the flags are defined.
+ if !hasAllFlags(flags, flagnames...) {
+ continue
+ }
+
+ groupStatus[group] = map[string]bool{}
+ for _, name := range flagnames {
+ groupStatus[group][name] = false
+ }
+ }
+
+ groupStatus[group][pflag.Name] = pflag.Changed
+ }
+ }
+}
+
+func validateRequiredFlagGroups(data map[string]map[string]bool) error {
+ keys := sortedKeys(data)
+ for _, flagList := range keys {
+ flagnameAndStatus := data[flagList]
+
+ unset := []string{}
+ for flagname, isSet := range flagnameAndStatus {
+ if !isSet {
+ unset = append(unset, flagname)
+ }
+ }
+ if len(unset) == len(flagnameAndStatus) || len(unset) == 0 {
+ continue
+ }
+
+ // Sort values, so they can be tested/scripted against consistently.
+ sort.Strings(unset)
+ return fmt.Errorf("if any flags in the group [%v] are set they must all be set; missing %v", flagList, unset)
+ }
+
+ return nil
+}
+
+func validateExclusiveFlagGroups(data map[string]map[string]bool) error {
+ keys := sortedKeys(data)
+ for _, flagList := range keys {
+ flagnameAndStatus := data[flagList]
+ var set []string
+ for flagname, isSet := range flagnameAndStatus {
+ if isSet {
+ set = append(set, flagname)
+ }
+ }
+ if len(set) == 0 || len(set) == 1 {
+ continue
+ }
+
+ // Sort values, so they can be tested/scripted against consistently.
+ sort.Strings(set)
+ return fmt.Errorf("if any flags in the group [%v] are set none of the others can be; %v were all set", flagList, set)
+ }
+ return nil
+}
+
+func sortedKeys(m map[string]map[string]bool) []string {
+ keys := make([]string, len(m))
+ i := 0
+ for k := range m {
+ keys[i] = k
+ i++
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// enforceFlagGroupsForCompletion will do the following:
+// - when a flag in a group is present, other flags in the group will be marked required
+// - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden
+// This allows the standard completion logic to behave appropriately for flag groups
+func (c *Command) enforceFlagGroupsForCompletion() {
+ if c.DisableFlagParsing {
+ return
+ }
+
+ flags := c.Flags()
+ groupStatus := map[string]map[string]bool{}
+ mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
+ c.Flags().VisitAll(func(pflag *flag.Flag) {
+ processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus)
+ processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus)
+ })
+
+ // If a flag that is part of a group is present, we make all the other flags
+ // of that group required so that the shell completion suggests them automatically
+ for flagList, flagnameAndStatus := range groupStatus {
+ for _, isSet := range flagnameAndStatus {
+ if isSet {
+ // One of the flags of the group is set, mark the other ones as required
+ for _, fName := range strings.Split(flagList, " ") {
+ _ = c.MarkFlagRequired(fName)
+ }
+ }
+ }
+ }
+
+ // If a flag that is mutually exclusive to others is present, we hide the other
+ // flags of that group so the shell completion does not suggest them
+ for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus {
+ for flagName, isSet := range flagnameAndStatus {
+ if isSet {
+ // One of the flags of the mutually exclusive group is set, mark the other ones as hidden
+ // Don't mark the flag that is already set as hidden because it may be an
+ // array or slice flag and therefore must continue being suggested
+ for _, fName := range strings.Split(flagList, " ") {
+ if fName != flagName {
+ flag := c.Flags().Lookup(fName)
+ flag.Hidden = true
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/go.mod b/vendor/github.com/spf13/cobra/go.mod
index 1fb9439dd86..1d45d9f9ab0 100644
--- a/vendor/github.com/spf13/cobra/go.mod
+++ b/vendor/github.com/spf13/cobra/go.mod
@@ -1,11 +1,10 @@
module github.com/spf13/cobra
-go 1.14
+go 1.15
require (
- github.com/cpuguy83/go-md2man/v2 v2.0.0
+ github.com/cpuguy83/go-md2man/v2 v2.0.2
github.com/inconshreveable/mousetrap v1.0.0
github.com/spf13/pflag v1.0.5
- github.com/spf13/viper v1.8.1
gopkg.in/yaml.v2 v2.4.0
)
diff --git a/vendor/github.com/spf13/cobra/go.sum b/vendor/github.com/spf13/cobra/go.sum
index 3e22df29a3d..8ed228800c7 100644
--- a/vendor/github.com/spf13/cobra/go.sum
+++ b/vendor/github.com/spf13/cobra/go.sum
@@ -1,592 +1,12 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
-cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
-cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
-cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
-github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
-github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
-github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
-github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
-github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
-github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
-github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
-github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
-go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
-google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
-google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
-gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go
index 59234c09f18..379e7c088af 100644
--- a/vendor/github.com/spf13/cobra/powershell_completions.go
+++ b/vendor/github.com/spf13/cobra/powershell_completions.go
@@ -50,7 +50,7 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
if ($Command.Length -gt $CursorPosition) {
$Command=$Command.Substring(0,$CursorPosition)
}
- __%[1]s_debug "Truncated command: $Command"
+ __%[1]s_debug "Truncated command: $Command"
$ShellCompDirectiveError=%[3]d
$ShellCompDirectiveNoSpace=%[4]d
@@ -58,9 +58,10 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
$ShellCompDirectiveFilterFileExt=%[6]d
$ShellCompDirectiveFilterDirs=%[7]d
- # Prepare the command to request completions for the program.
+ # Prepare the command to request completions for the program.
# Split the command at the first space to separate the program and arguments.
$Program,$Arguments = $Command.Split(" ",2)
+
$RequestComp="$Program %[2]s $Arguments"
__%[1]s_debug "RequestComp: $RequestComp"
@@ -90,11 +91,13 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
}
__%[1]s_debug "Calling $RequestComp"
+ # First disable ActiveHelp which is not supported for Powershell
+ $env:%[8]s=0
+
#call the command store the output in $out and redirect stderr and stdout to null
# $Out is an array contains each line per element
Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null
-
# get directive from last line
[int]$Directive = $Out[-1].TrimStart(':')
if ($Directive -eq "") {
@@ -233,7 +236,7 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
Default {
# Like MenuComplete but we don't want to add a space here because
# the user need to press space anyway to get the completion.
- # Description will not be shown because thats not possible with TabCompleteNext
+ # Description will not be shown because that's not possible with TabCompleteNext
[System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
}
}
@@ -242,7 +245,7 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
}
`, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
}
func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error {
diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md
index d98a71e36f9..ac680118eff 100644
--- a/vendor/github.com/spf13/cobra/projects_using_cobra.md
+++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md
@@ -1,9 +1,10 @@
## Projects using Cobra
- [Arduino CLI](https://github.com/arduino/arduino-cli)
-- [Bleve](http://www.blevesearch.com/)
-- [CockroachDB](http://www.cockroachlabs.com/)
+- [Bleve](https://blevesearch.com/)
+- [CockroachDB](https://www.cockroachlabs.com/)
- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk)
+- [Datree](https://github.com/datreeio/datree)
- [Delve](https://github.com/derekparker/delve)
- [Docker (distribution)](https://github.com/docker/distribution)
- [Etcd](https://etcd.io/)
@@ -13,26 +14,41 @@
- [Github CLI](https://github.com/cli/cli)
- [GitHub Labeler](https://github.com/erdaltsksn/gh-label)
- [Golangci-lint](https://golangci-lint.run)
-- [GopherJS](http://www.gopherjs.org/)
+- [GopherJS](https://github.com/gopherjs/gopherjs)
+- [GoReleaser](https://goreleaser.com)
- [Helm](https://helm.sh)
- [Hugo](https://gohugo.io)
+- [Infracost](https://github.com/infracost/infracost)
- [Istio](https://istio.io)
- [Kool](https://github.com/kool-dev/kool)
-- [Kubernetes](http://kubernetes.io/)
+- [Kubernetes](https://kubernetes.io/)
+- [Kubescape](https://github.com/armosec/kubescape)
- [Linkerd](https://linkerd.io/)
- [Mattermost-server](https://github.com/mattermost/mattermost-server)
+- [Mercure](https://mercure.rocks/)
+- [Meroxa CLI](https://github.com/meroxa/cli)
- [Metal Stack CLI](https://github.com/metal-stack/metalctl)
- [Moby (former Docker)](https://github.com/moby/moby)
+- [Moldy](https://github.com/Moldy-Community/moldy)
+- [Multi-gitter](https://github.com/lindell/multi-gitter)
- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
+- [nFPM](https://nfpm.goreleaser.com)
- [OpenShift](https://www.openshift.com/)
- [Ory Hydra](https://github.com/ory/hydra)
- [Ory Kratos](https://github.com/ory/kratos)
+- [Pixie](https://github.com/pixie-io/pixie)
+- [Polygon Edge](https://github.com/0xPolygon/polygon-edge)
- [Pouch](https://github.com/alibaba/pouch)
-- [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
+- [ProjectAtomic (enterprise)](https://www.projectatomic.io/)
- [Prototool](https://github.com/uber/prototool)
+- [Pulumi](https://www.pulumi.com)
+- [QRcp](https://github.com/claudiodangelis/qrcp)
- [Random](https://github.com/erdaltsksn/random)
- [Rclone](https://rclone.org/)
+- [Scaleway CLI](https://github.com/scaleway/scaleway-cli)
- [Skaffold](https://skaffold.dev/)
- [Tendermint](https://github.com/tendermint/tendermint)
- [Twitch CLI](https://github.com/twitchdev/twitch-cli)
+- [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli)
+- VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework)
- [Werf](https://werf.io/)
diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md
index 4ba06a11c0d..1e2058ed625 100644
--- a/vendor/github.com/spf13/cobra/shell_completions.md
+++ b/vendor/github.com/spf13/cobra/shell_completions.md
@@ -16,10 +16,12 @@ If you do not wish to use the default `completion` command, you can choose to
provide your own, which will take precedence over the default one. (This also provides
backwards-compatibility with programs that already have their own `completion` command.)
-If you are using the generator, you can create a completion command by running
+If you are using the `cobra-cli` generator,
+which can be found at [spf13/cobra-cli](https://github.com/spf13/cobra-cli),
+you can create a completion command by running
```bash
-cobra add completion
+cobra-cli add completion
```
and then modifying the generated `cmd/completion.go` file to look something like this
(writing the shell script to stdout allows the most flexible use):
@@ -28,17 +30,17 @@ and then modifying the generated `cmd/completion.go` file to look something like
var completionCmd = &cobra.Command{
Use: "completion [bash|zsh|fish|powershell]",
Short: "Generate completion script",
- Long: `To load completions:
+ Long: fmt.Sprintf(`To load completions:
Bash:
- $ source <(yourprogram completion bash)
+ $ source <(%[1]s completion bash)
# To load completions for each session, execute once:
# Linux:
- $ yourprogram completion bash > /etc/bash_completion.d/yourprogram
+ $ %[1]s completion bash > /etc/bash_completion.d/%[1]s
# macOS:
- $ yourprogram completion bash > /usr/local/etc/bash_completion.d/yourprogram
+ $ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s
Zsh:
@@ -48,25 +50,25 @@ Zsh:
$ echo "autoload -U compinit; compinit" >> ~/.zshrc
# To load completions for each session, execute once:
- $ yourprogram completion zsh > "${fpath[1]}/_yourprogram"
+ $ %[1]s completion zsh > "${fpath[1]}/_%[1]s"
# You will need to start a new shell for this setup to take effect.
fish:
- $ yourprogram completion fish | source
+ $ %[1]s completion fish | source
# To load completions for each session, execute once:
- $ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish
+ $ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
PowerShell:
- PS> yourprogram completion powershell | Out-String | Invoke-Expression
+ PS> %[1]s completion powershell | Out-String | Invoke-Expression
# To load completions for every new session, run:
- PS> yourprogram completion powershell > yourprogram.ps1
+ PS> %[1]s completion powershell > %[1]s.ps1
# and source this file from your PowerShell profile.
-`,
+`,cmd.Root().Name()),
DisableFlagsInUseLine: true,
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
Args: cobra.ExactValidArgs(1),
@@ -120,7 +122,7 @@ For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns
Some simplified code from `kubectl get` looks like:
```go
-validArgs []string = { "pod", "node", "service", "replicationcontroller" }
+validArgs = []string{ "pod", "node", "service", "replicationcontroller" }
cmd := &cobra.Command{
Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
@@ -146,7 +148,7 @@ node pod replicationcontroller service
If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
```go
-argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
+argAliases = []string { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
cmd := &cobra.Command{
...
diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md
index 311abce284d..5a7acf88e69 100644
--- a/vendor/github.com/spf13/cobra/user_guide.md
+++ b/vendor/github.com/spf13/cobra/user_guide.md
@@ -29,10 +29,10 @@ func main() {
## Using the Cobra Generator
-Cobra provides its own program that will create your application and add any
+Cobra-CLI is its own program that will create your application and add any
commands you want. It's the easiest way to incorporate Cobra into your application.
-[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it.
+For complete details on using the Cobra generator, please refer to [The Cobra-CLI Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md)
## Using the Cobra Library
@@ -51,7 +51,7 @@ var rootCmd = &cobra.Command{
Short: "Hugo is a very fast static site generator",
Long: `A Fast and Flexible Static Site Generator built with
love by spf13 and friends in Go.
- Complete documentation is available at http://hugo.spf13.com`,
+ Complete documentation is available at https://gohugo.io/documentation/`,
Run: func(cmd *cobra.Command, args []string) {
// Do Stuff Here
},
@@ -86,7 +86,7 @@ var (
userLicense string
rootCmd = &cobra.Command{
- Use: "cobra",
+ Use: "cobra-cli",
Short: "A generator for Cobra based Applications",
Long: `Cobra is a CLI library for Go that empowers applications.
This application is a tool to generate the needed files
@@ -281,7 +281,7 @@ func init() {
In this example, the persistent flag `author` is bound with `viper`.
**Note**: the variable `author` will not be set to the value from config,
-when the `--author` flag is not provided by user.
+when the `--author` flag is provided by user.
More in [viper documentation](https://github.com/spf13/viper#working-with-flags).
@@ -300,10 +300,34 @@ rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (re
rootCmd.MarkPersistentFlagRequired("region")
```
+### Flag Groups
+
+If you have different flags that must be provided together (e.g. if they provide the `--username` flag they MUST provide the `--password` flag as well) then
+Cobra can enforce that requirement:
+```go
+rootCmd.Flags().StringVarP(&u, "username", "u", "", "Username (required if password is set)")
+rootCmd.Flags().StringVarP(&pw, "password", "p", "", "Password (required if username is set)")
+rootCmd.MarkFlagsRequiredTogether("username", "password")
+```
+
+You can also prevent different flags from being provided together if they represent mutually
+exclusive options such as specifying an output format as either `--json` or `--yaml` but never both:
+```go
+rootCmd.Flags().BoolVar(&u, "json", false, "Output in JSON")
+rootCmd.Flags().BoolVar(&pw, "yaml", false, "Output in YAML")
+rootCmd.MarkFlagsMutuallyExclusive("json", "yaml")
+```
+
+In both of these cases:
+ - both local and persistent flags can be used
+ - **NOTE:** the group is only enforced on commands where every flag is defined
+ - a flag may appear in multiple groups
+ - a group may contain any number of flags
+
## Positional and Custom Arguments
-Validation of positional arguments can be specified using the `Args` field
-of `Command`.
+Validation of positional arguments can be specified using the `Args` field of `Command`.
+If `Args` is undefined or `nil`, it defaults to `ArbitraryArgs`.
The following validators are built in:
@@ -315,6 +339,7 @@ The following validators are built in:
- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args.
- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command`
- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args.
+- `MatchAll(pargs ...PositionalArgs)` - enables combining existing checks with arbitrary other checks (e.g. you want to check the ExactArgs length along with other qualities).
An example of setting the custom validator:
@@ -404,7 +429,7 @@ a count and a string.`,
}
```
-For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/).
+For a more complete example of a larger application, please checkout [Hugo](https://gohugo.io/).
## Help Command
@@ -602,7 +627,7 @@ Did you mean this?
Run 'hugo --help' for usage.
```
-Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
+Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
If you need to disable suggestions or tweak the string distance in your command, use:
@@ -635,3 +660,7 @@ Cobra can generate documentation based on subcommands, flags, etc. Read more abo
## Generating shell completions
Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md).
+
+## Providing Active Help
+
+Cobra makes use of the shell-completion system to define a framework allowing you to provide Active Help to your users. Active Help are messages (hints, warnings, etc) printed as the program is being used. Read more about it in [Active Help](active_help.md).
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
index 1afec30ea92..65cd94c6040 100644
--- a/vendor/github.com/spf13/cobra/zsh_completions.go
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -75,7 +75,7 @@ func genZshComp(buf io.StringWriter, name string, includeDesc bool) {
if !includeDesc {
compCmd = ShellCompNoDescRequestCmd
}
- WriteStringAndCheck(buf, fmt.Sprintf(`#compdef _%[1]s %[1]s
+ WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s
# zsh completion for %-36[1]s -*- shell-script -*-
@@ -163,7 +163,24 @@ _%[1]s()
return
fi
+ local activeHelpMarker="%[8]s"
+ local endIndex=${#activeHelpMarker}
+ local startIndex=$((${#activeHelpMarker}+1))
+ local hasActiveHelp=0
while IFS='\n' read -r comp; do
+ # Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker)
+ if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then
+ __%[1]s_debug "ActiveHelp found: $comp"
+ comp="${comp[$startIndex,-1]}"
+ if [ -n "$comp" ]; then
+ compadd -x "${comp}"
+ __%[1]s_debug "ActiveHelp will need delimiter"
+ hasActiveHelp=1
+ fi
+
+ continue
+ fi
+
if [ -n "$comp" ]; then
# If requested, completions are returned with a description.
# The description is preceded by a TAB character.
@@ -171,7 +188,7 @@ _%[1]s()
# We first need to escape any : as part of the completion itself.
comp=${comp//:/\\:}
- local tab=$(printf '\t')
+ local tab="$(printf '\t')"
comp=${comp//$tab/:}
__%[1]s_debug "Adding completion: ${comp}"
@@ -180,6 +197,17 @@ _%[1]s()
fi
done < <(printf "%%s\n" "${out[@]}")
+ # Add a delimiter after the activeHelp statements, but only if:
+ # - there are completions following the activeHelp statements, or
+ # - file completion will be performed (so there will be choices after the activeHelp)
+ if [ $hasActiveHelp -eq 1 ]; then
+ if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then
+ __%[1]s_debug "Adding activeHelp delimiter"
+ compadd -x "--"
+ hasActiveHelp=0
+ fi
+ fi
+
if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
__%[1]s_debug "Activating nospace."
noSpace="-S ''"
@@ -202,7 +230,7 @@ _%[1]s()
_arguments '*:filename:'"$filteringCmd"
elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
# File completion for directories only
- local subDir
+ local subdir
subdir="${completions[1]}"
if [ -n "$subdir" ]; then
__%[1]s_debug "Listing directories in $subdir"
@@ -250,9 +278,10 @@ _%[1]s()
# don't run the completion function when being source-ed or eval-ed
if [ "$funcstack[1]" = "_%[1]s" ]; then
- _%[1]s
+ _%[1]s
fi
`, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs,
+ activeHelpMarker))
}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
index 41649d26792..95d8e59da69 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -1,8 +1,10 @@
package assert
import (
+ "bytes"
"fmt"
"reflect"
+ "time"
)
type CompareType int
@@ -30,6 +32,9 @@ var (
float64Type = reflect.TypeOf(float64(1))
stringType = reflect.TypeOf("")
+
+ timeType = reflect.TypeOf(time.Time{})
+ bytesType = reflect.TypeOf([]byte{})
)
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
@@ -299,6 +304,47 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
return compareLess, true
}
}
+ // Check for known struct types we can check for compare results.
+ case reflect.Struct:
+ {
+ // All structs enter here. We're not interested in most types.
+ if !canConvert(obj1Value, timeType) {
+ break
+ }
+
+ // time.Time can compared!
+ timeObj1, ok := obj1.(time.Time)
+ if !ok {
+ timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
+ }
+
+ timeObj2, ok := obj2.(time.Time)
+ if !ok {
+ timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
+ }
+
+ return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
+ }
+ case reflect.Slice:
+ {
+ // We only care about the []byte type.
+ if !canConvert(obj1Value, bytesType) {
+ break
+ }
+
+ // []byte can be compared!
+ bytesObj1, ok := obj1.([]byte)
+ if !ok {
+ bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte)
+
+ }
+ bytesObj2, ok := obj2.([]byte)
+ if !ok {
+ bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte)
+ }
+
+ return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
+ }
}
return compareEqual, false
@@ -310,7 +356,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
// assert.Greater(t, float64(2), float64(1))
// assert.Greater(t, "b", "a")
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// GreaterOrEqual asserts that the first element is greater than or equal to the second
@@ -320,7 +369,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
// assert.GreaterOrEqual(t, "b", "a")
// assert.GreaterOrEqual(t, "b", "b")
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// Less asserts that the first element is less than the second
@@ -329,7 +381,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
// assert.Less(t, float64(1), float64(2))
// assert.Less(t, "a", "b")
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// LessOrEqual asserts that the first element is less than or equal to the second
@@ -339,7 +394,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
// assert.LessOrEqual(t, "a", "b")
// assert.LessOrEqual(t, "b", "b")
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}
// Positive asserts that the specified element is positive
@@ -347,8 +405,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
// assert.Positive(t, 1)
// assert.Positive(t, 1.23)
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs)
+ return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
}
// Negative asserts that the specified element is negative
@@ -356,8 +417,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
// assert.Negative(t, -1)
// assert.Negative(t, -1.23)
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs)
+ return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
}
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
new file mode 100644
index 00000000000..da867903e2f
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
@@ -0,0 +1,16 @@
+//go:build go1.17
+// +build go1.17
+
+// TODO: once support for Go 1.16 is dropped, this file can be
+// merged/removed with assertion_compare_go1.17_test.go and
+// assertion_compare_legacy.go
+
+package assert
+
+import "reflect"
+
+// Wrapper around reflect.Value.CanConvert, for compatibility
+// reasons.
+func canConvert(value reflect.Value, to reflect.Type) bool {
+ return value.CanConvert(to)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
new file mode 100644
index 00000000000..1701af2a3c8
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
@@ -0,0 +1,16 @@
+//go:build !go1.17
+// +build !go1.17
+
+// TODO: once support for Go 1.16 is dropped, this file can be
+// merged/removed with assertion_compare_go1.17_test.go and
+// assertion_compare_can_convert.go
+
+package assert
+
+import "reflect"
+
+// Older versions of Go does not have the reflect.Value.CanConvert
+// method.
+func canConvert(value reflect.Value, to reflect.Type) bool {
+ return false
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index 4dfd1229a86..7880b8f9433 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int
return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
}
+// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
+func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...)
+}
+
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
@@ -724,6 +736,16 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
+// WithinRangef asserts that a time is within a time range (inclusive).
+//
+// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...)
+}
+
// YAMLEqf asserts that two YAML strings are equivalent.
func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 25337a6f07e..339515b8bfb 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ..
return ErrorAsf(a.t, err, target, msg, args...)
}
+// ErrorContains asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// a.ErrorContains(err, expectedErrorSubString)
+func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorContains(a.t, theError, contains, msgAndArgs...)
+}
+
+// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted")
+func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorContainsf(a.t, theError, contains, msg, args...)
+}
+
// ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
@@ -1437,6 +1461,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta
return WithinDurationf(a.t, expected, actual, delta, msg, args...)
}
+// WithinRange asserts that a time is within a time range (inclusive).
+//
+// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinRange(a.t, actual, start, end, msgAndArgs...)
+}
+
+// WithinRangef asserts that a time is within a time range (inclusive).
+//
+// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinRangef(a.t, actual, start, end, msg, args...)
+}
+
// YAMLEq asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
index 1c3b47182a7..75944878358 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_order.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT
// assert.IsIncreasing(t, []float{1, 2})
// assert.IsIncreasing(t, []string{"a", "b"})
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
+ return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// IsNonIncreasing asserts that the collection is not increasing
@@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
// assert.IsNonIncreasing(t, []float{2, 1})
// assert.IsNonIncreasing(t, []string{"b", "a"})
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
+ return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// IsDecreasing asserts that the collection is decreasing
@@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{})
// assert.IsDecreasing(t, []float{2, 1})
// assert.IsDecreasing(t, []string{"b", "a"})
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
+ return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// IsNonDecreasing asserts that the collection is not decreasing
@@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
// assert.IsNonDecreasing(t, []float{1, 2})
// assert.IsNonDecreasing(t, []string{"a", "b"})
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
+ return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index bcac4401f57..fa1245b1897 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -8,6 +8,7 @@ import (
"fmt"
"math"
"os"
+ "path/filepath"
"reflect"
"regexp"
"runtime"
@@ -144,7 +145,8 @@ func CallerInfo() []string {
if len(parts) > 1 {
dir := parts[len(parts)-2]
if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
- callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ path, _ := filepath.Abs(file)
+ callers = append(callers, fmt.Sprintf("%s:%d", path, line))
}
}
@@ -563,16 +565,17 @@ func isEmpty(object interface{}) bool {
switch objValue.Kind() {
// collection types are empty when they have no element
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ case reflect.Chan, reflect.Map, reflect.Slice:
return objValue.Len() == 0
- // pointers are empty if nil or if the value they point to is empty
+ // pointers are empty if nil or if the value they point to is empty
case reflect.Ptr:
if objValue.IsNil() {
return true
}
deref := objValue.Elem().Interface()
return isEmpty(deref)
- // for all other types, compare against the zero value
+ // for all other types, compare against the zero value
+ // array types are empty when they match their zero-initialized state
default:
zero := reflect.Zero(objValue.Type())
return reflect.DeepEqual(object, zero.Interface())
@@ -718,10 +721,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte
// return (false, false) if impossible.
// return (true, false) if element was not found.
// return (true, true) if element was found.
-func includeElement(list interface{}, element interface{}) (ok, found bool) {
+func containsElement(list interface{}, element interface{}) (ok, found bool) {
listValue := reflect.ValueOf(list)
- listKind := reflect.TypeOf(list).Kind()
+ listType := reflect.TypeOf(list)
+ if listType == nil {
+ return false, false
+ }
+ listKind := listType.Kind()
defer func() {
if e := recover(); e != nil {
ok = false
@@ -764,7 +771,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
h.Helper()
}
- ok, found := includeElement(s, contains)
+ ok, found := containsElement(s, contains)
if !ok {
return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
}
@@ -787,7 +794,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
h.Helper()
}
- ok, found := includeElement(s, contains)
+ ok, found := containsElement(s, contains)
if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
}
@@ -811,7 +818,6 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
return true // we consider nil to be equal to the nil set
}
- subsetValue := reflect.ValueOf(subset)
defer func() {
if e := recover(); e != nil {
ok = false
@@ -821,17 +827,35 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
listKind := reflect.TypeOf(list).Kind()
subsetKind := reflect.TypeOf(subset).Kind()
- if listKind != reflect.Array && listKind != reflect.Slice {
+ if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
}
- if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
+ subsetValue := reflect.ValueOf(subset)
+ if subsetKind == reflect.Map && listKind == reflect.Map {
+ listValue := reflect.ValueOf(list)
+ subsetKeys := subsetValue.MapKeys()
+
+ for i := 0; i < len(subsetKeys); i++ {
+ subsetKey := subsetKeys[i]
+ subsetElement := subsetValue.MapIndex(subsetKey).Interface()
+ listElement := listValue.MapIndex(subsetKey).Interface()
+
+ if !ObjectsAreEqual(subsetElement, listElement) {
+ return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...)
+ }
+ }
+
+ return true
+ }
+
for i := 0; i < subsetValue.Len(); i++ {
element := subsetValue.Index(i).Interface()
- ok, found := includeElement(list, element)
+ ok, found := containsElement(list, element)
if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
}
@@ -852,10 +876,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
h.Helper()
}
if subset == nil {
- return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...)
+ return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
}
- subsetValue := reflect.ValueOf(subset)
defer func() {
if e := recover(); e != nil {
ok = false
@@ -865,17 +888,35 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
listKind := reflect.TypeOf(list).Kind()
subsetKind := reflect.TypeOf(subset).Kind()
- if listKind != reflect.Array && listKind != reflect.Slice {
+ if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
}
- if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
+ subsetValue := reflect.ValueOf(subset)
+ if subsetKind == reflect.Map && listKind == reflect.Map {
+ listValue := reflect.ValueOf(list)
+ subsetKeys := subsetValue.MapKeys()
+
+ for i := 0; i < len(subsetKeys); i++ {
+ subsetKey := subsetKeys[i]
+ subsetElement := subsetValue.MapIndex(subsetKey).Interface()
+ listElement := listValue.MapIndex(subsetKey).Interface()
+
+ if !ObjectsAreEqual(subsetElement, listElement) {
+ return true
+ }
+ }
+
+ return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
+ }
+
for i := 0; i < subsetValue.Len(); i++ {
element := subsetValue.Index(i).Interface()
- ok, found := includeElement(list, element)
+ ok, found := containsElement(list, element)
if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
}
@@ -1000,27 +1041,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
type PanicTestFunc func()
// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
-func didPanic(f PanicTestFunc) (bool, interface{}, string) {
-
- didPanic := false
- var message interface{}
- var stack string
- func() {
-
- defer func() {
- if message = recover(); message != nil {
- didPanic = true
- stack = string(debug.Stack())
- }
- }()
-
- // call the target function
- f()
+func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) {
+ didPanic = true
+ defer func() {
+ message = recover()
+ if didPanic {
+ stack = string(debug.Stack())
+ }
}()
- return didPanic, message, stack
+ // call the target function
+ f()
+ didPanic = false
+ return
}
// Panics asserts that the code inside the specified PanicTestFunc panics.
@@ -1111,6 +1146,27 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration,
return true
}
+// WithinRange asserts that a time is within a time range (inclusive).
+//
+// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if end.Before(start) {
+ return Fail(t, "Start should be before end", msgAndArgs...)
+ }
+
+ if actual.Before(start) {
+ return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is before the range", actual, start, end), msgAndArgs...)
+ } else if actual.After(end) {
+ return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is after the range", actual, start, end), msgAndArgs...)
+ }
+
+ return true
+}
+
func toFloat(x interface{}) (float64, bool) {
var xf float64
xok := true
@@ -1161,11 +1217,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs
bf, bok := toFloat(actual)
if !aok || !bok {
- return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...)
+ return Fail(t, "Parameters must be numerical", msgAndArgs...)
+ }
+
+ if math.IsNaN(af) && math.IsNaN(bf) {
+ return true
}
if math.IsNaN(af) {
- return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...)
+ return Fail(t, "Expected must not be NaN", msgAndArgs...)
}
if math.IsNaN(bf) {
@@ -1188,7 +1248,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn
if expected == nil || actual == nil ||
reflect.TypeOf(actual).Kind() != reflect.Slice ||
reflect.TypeOf(expected).Kind() != reflect.Slice {
- return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+ return Fail(t, "Parameters must be slice", msgAndArgs...)
}
actualSlice := reflect.ValueOf(actual)
@@ -1250,8 +1310,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m
func calcRelativeError(expected, actual interface{}) (float64, error) {
af, aok := toFloat(expected)
- if !aok {
- return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
+ bf, bok := toFloat(actual)
+ if !aok || !bok {
+ return 0, fmt.Errorf("Parameters must be numerical")
+ }
+ if math.IsNaN(af) && math.IsNaN(bf) {
+ return 0, nil
}
if math.IsNaN(af) {
return 0, errors.New("expected value must not be NaN")
@@ -1259,10 +1323,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
if af == 0 {
return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
}
- bf, bok := toFloat(actual)
- if !bok {
- return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
- }
if math.IsNaN(bf) {
return 0, errors.New("actual value must not be NaN")
}
@@ -1298,7 +1358,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
if expected == nil || actual == nil ||
reflect.TypeOf(actual).Kind() != reflect.Slice ||
reflect.TypeOf(expected).Kind() != reflect.Slice {
- return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+ return Fail(t, "Parameters must be slice", msgAndArgs...)
}
actualSlice := reflect.ValueOf(actual)
@@ -1375,6 +1435,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte
return true
}
+// ErrorContains asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// assert.ErrorContains(t, err, expectedErrorSubString)
+func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !Error(t, theError, msgAndArgs...) {
+ return false
+ }
+
+ actual := theError.Error()
+ if !strings.Contains(actual, contains) {
+ return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...)
+ }
+
+ return true
+}
+
// matchRegexp return true if a specified regexp matches a string.
func matchRegexp(rx interface{}, str interface{}) bool {
@@ -1588,12 +1669,17 @@ func diff(expected interface{}, actual interface{}) string {
}
var e, a string
- if et != reflect.TypeOf("") {
- e = spewConfig.Sdump(expected)
- a = spewConfig.Sdump(actual)
- } else {
+
+ switch et {
+ case reflect.TypeOf(""):
e = reflect.ValueOf(expected).String()
a = reflect.ValueOf(actual).String()
+ case reflect.TypeOf(time.Time{}):
+ e = spewConfigStringerEnabled.Sdump(expected)
+ a = spewConfigStringerEnabled.Sdump(actual)
+ default:
+ e = spewConfig.Sdump(expected)
+ a = spewConfig.Sdump(actual)
}
diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
@@ -1625,6 +1711,14 @@ var spewConfig = spew.ConfigState{
MaxDepth: 10,
}
+var spewConfigStringerEnabled = spew.ConfigState{
+ Indent: " ",
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+ SortKeys: true,
+ MaxDepth: 10,
+}
+
type tHelper interface {
Helper()
}
diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go
index 51820df2e67..880853f5a2c 100644
--- a/vendor/github.com/stretchr/testify/require/require.go
+++ b/vendor/github.com/stretchr/testify/require/require.go
@@ -280,6 +280,36 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int
t.FailNow()
}
+// ErrorContains asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// assert.ErrorContains(t, err, expectedErrorSubString)
+func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.ErrorContains(t, theError, contains, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
+func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.ErrorContainsf(t, theError, contains, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {
@@ -1834,6 +1864,32 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
t.FailNow()
}
+// WithinRange asserts that a time is within a time range (inclusive).
+//
+// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.WithinRange(t, actual, start, end, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// WithinRangef asserts that a time is within a time range (inclusive).
+//
+// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.WithinRangef(t, actual, start, end, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// YAMLEq asserts that two YAML strings are equivalent.
func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go
index ed54a9d83f3..960bf6f2cab 100644
--- a/vendor/github.com/stretchr/testify/require/require_forward.go
+++ b/vendor/github.com/stretchr/testify/require/require_forward.go
@@ -223,6 +223,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ..
ErrorAsf(a.t, err, target, msg, args...)
}
+// ErrorContains asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// a.ErrorContains(err, expectedErrorSubString)
+func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ ErrorContains(a.t, theError, contains, msgAndArgs...)
+}
+
+// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted")
+func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ ErrorContainsf(a.t, theError, contains, msg, args...)
+}
+
// ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) {
@@ -1438,6 +1462,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta
WithinDurationf(a.t, expected, actual, delta, msg, args...)
}
+// WithinRange asserts that a time is within a time range (inclusive).
+//
+// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ WithinRange(a.t, actual, start, end, msgAndArgs...)
+}
+
+// WithinRangef asserts that a time is within a time range (inclusive).
+//
+// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ WithinRangef(a.t, actual, start, end, msg, args...)
+}
+
// YAMLEq asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/sylabs/sif/v2/LICENSE.md b/vendor/github.com/sylabs/sif/v2/LICENSE.md
new file mode 100644
index 00000000000..dea3e409e11
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/LICENSE.md
@@ -0,0 +1,29 @@
+# LICENSE
+
+Copyright (c) 2018-2022, Sylabs Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go
new file mode 100644
index 00000000000..d6f667378a2
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go
@@ -0,0 +1,72 @@
+// Copyright (c) 2021-2022, Sylabs Inc. All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+package sif
+
+var (
+ hdrArchUnknown archType = [...]byte{'0', '0', '\x00'}
+ hdrArch386 archType = [...]byte{'0', '1', '\x00'}
+ hdrArchAMD64 archType = [...]byte{'0', '2', '\x00'}
+ hdrArchARM archType = [...]byte{'0', '3', '\x00'}
+ hdrArchARM64 archType = [...]byte{'0', '4', '\x00'}
+ hdrArchPPC64 archType = [...]byte{'0', '5', '\x00'}
+ hdrArchPPC64le archType = [...]byte{'0', '6', '\x00'}
+ hdrArchMIPS archType = [...]byte{'0', '7', '\x00'}
+ hdrArchMIPSle archType = [...]byte{'0', '8', '\x00'}
+ hdrArchMIPS64 archType = [...]byte{'0', '9', '\x00'}
+ hdrArchMIPS64le archType = [...]byte{'1', '0', '\x00'}
+ hdrArchS390x archType = [...]byte{'1', '1', '\x00'}
+ hdrArchRISCV64 archType = [...]byte{'1', '2', '\x00'}
+)
+
+type archType [3]byte
+
+// getSIFArch returns the archType corresponding to go runtime arch.
+func getSIFArch(arch string) archType {
+ archMap := map[string]archType{
+ "386": hdrArch386,
+ "amd64": hdrArchAMD64,
+ "arm": hdrArchARM,
+ "arm64": hdrArchARM64,
+ "ppc64": hdrArchPPC64,
+ "ppc64le": hdrArchPPC64le,
+ "mips": hdrArchMIPS,
+ "mipsle": hdrArchMIPSle,
+ "mips64": hdrArchMIPS64,
+ "mips64le": hdrArchMIPS64le,
+ "s390x": hdrArchS390x,
+ "riscv64": hdrArchRISCV64,
+ }
+
+ t, ok := archMap[arch]
+ if !ok {
+ return hdrArchUnknown
+ }
+ return t
+}
+
+// GoArch returns the go runtime arch corresponding to t.
+func (t archType) GoArch() string {
+ archMap := map[archType]string{
+ hdrArch386: "386",
+ hdrArchAMD64: "amd64",
+ hdrArchARM: "arm",
+ hdrArchARM64: "arm64",
+ hdrArchPPC64: "ppc64",
+ hdrArchPPC64le: "ppc64le",
+ hdrArchMIPS: "mips",
+ hdrArchMIPSle: "mipsle",
+ hdrArchMIPS64: "mips64",
+ hdrArchMIPS64le: "mips64le",
+ hdrArchS390x: "s390x",
+ hdrArchRISCV64: "riscv64",
+ }
+
+ arch, ok := archMap[t]
+ if !ok {
+ arch = "unknown"
+ }
+ return arch
+}
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go
new file mode 100644
index 00000000000..1d73a4750e6
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go
@@ -0,0 +1,103 @@
+// Copyright (c) 2021-2022, Sylabs Inc. All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+package sif
+
+import (
+ "errors"
+ "io"
+)
+
+// A Buffer is a variable-sized buffer of bytes that implements the sif.ReadWriter interface. The
+// zero value for Buffer is an empty buffer ready to use.
+type Buffer struct {
+ buf []byte
+ pos int64
+}
+
+// NewBuffer creates and initializes a new Buffer using buf as its initial contents.
+func NewBuffer(buf []byte) *Buffer {
+ return &Buffer{buf: buf}
+}
+
+var errNegativeOffset = errors.New("negative offset")
+
+// ReadAt implements the io.ReaderAt interface.
+func (b *Buffer) ReadAt(p []byte, off int64) (int, error) {
+ if off < 0 {
+ return 0, errNegativeOffset
+ }
+
+ if off >= int64(len(b.buf)) {
+ return 0, io.EOF
+ }
+
+ n := copy(p, b.buf[off:])
+ if n < len(p) {
+ return n, io.EOF
+ }
+ return n, nil
+}
+
+var errNegativePosition = errors.New("negative position")
+
+// Write implements the io.Writer interface.
+func (b *Buffer) Write(p []byte) (int, error) {
+ if b.pos < 0 {
+ return 0, errNegativePosition
+ }
+
+ if have, need := int64(len(b.buf))-b.pos, int64(len(p)); have < need {
+ b.buf = append(b.buf, make([]byte, need-have)...)
+ }
+
+ n := copy(b.buf[b.pos:], p)
+ b.pos += int64(n)
+ return n, nil
+}
+
+var errInvalidWhence = errors.New("invalid whence")
+
+// Seek implements the io.Seeker interface.
+func (b *Buffer) Seek(offset int64, whence int) (int64, error) {
+ var abs int64
+
+ switch whence {
+ case io.SeekStart:
+ abs = offset
+ case io.SeekCurrent:
+ abs = b.pos + offset
+ case io.SeekEnd:
+ abs = int64(len(b.buf)) + offset
+ default:
+ return 0, errInvalidWhence
+ }
+
+ if abs < 0 {
+ return 0, errNegativePosition
+ }
+
+ b.pos = abs
+ return abs, nil
+}
+
+var errTruncateRange = errors.New("truncation out of range")
+
+// Truncate discards all but the first n bytes from the buffer.
+func (b *Buffer) Truncate(n int64) error {
+ if n < 0 || n > int64(len(b.buf)) {
+ return errTruncateRange
+ }
+
+ b.buf = b.buf[:n]
+ return nil
+}
+
+// Bytes returns the contents of the buffer. The slice is valid for use only until the next buffer
+// modification (that is, only until the next call to a method like ReadAt, Write, or Truncate).
+func (b *Buffer) Bytes() []byte { return b.buf }
+
+// Len returns the number of bytes in the buffer.
+func (b *Buffer) Len() int64 { return int64(len(b.buf)) }
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
new file mode 100644
index 00000000000..e65bdb7476d
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
@@ -0,0 +1,680 @@
+// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved.
+// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
+// Copyright (c) 2017, Yannick Cote All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+package sif
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "github.com/google/uuid"
+)
+
+// nextAligned finds the next offset that satisfies alignment.
+func nextAligned(offset int64, alignment int) int64 {
+ align64 := uint64(alignment)
+ offset64 := uint64(offset)
+
+ if align64 != 0 && offset64%align64 != 0 {
+ offset64 = (offset64 & ^(align64 - 1)) + align64
+ }
+
+ return int64(offset64)
+}
+
+// writeDataObjectAt writes the data object described by di to ws, using time t, recording details
+// in d. The object is written at the first position that satisfies the alignment requirements
+// described by di following offsetUnaligned.
+func writeDataObjectAt(ws io.WriteSeeker, offsetUnaligned int64, di DescriptorInput, t time.Time, d *rawDescriptor) error { //nolint:lll
+ offset, err := ws.Seek(nextAligned(offsetUnaligned, di.opts.alignment), io.SeekStart)
+ if err != nil {
+ return err
+ }
+
+ n, err := io.Copy(ws, di.r)
+ if err != nil {
+ return err
+ }
+
+ if err := di.fillDescriptor(t, d); err != nil {
+ return err
+ }
+ d.Used = true
+ d.Offset = offset
+ d.Size = n
+ d.SizeWithPadding = offset - offsetUnaligned + n
+
+ return nil
+}
+
+var (
+ errInsufficientCapacity = errors.New("insufficient descriptor capacity to add data object(s) to image")
+ errPrimaryPartition = errors.New("image already contains a primary partition")
+)
+
+// writeDataObject writes the data object described by di to f, using time t, recording details in
+// the descriptor at index i.
+func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) error {
+ if i >= len(f.rds) {
+ return errInsufficientCapacity
+ }
+
+ // If this is a primary partition, verify there isn't another primary partition, and update the
+ // architecture in the global header.
+ if p, ok := di.opts.extra.(partition); ok && p.Parttype == PartPrimSys {
+ if ds, err := f.GetDescriptors(WithPartitionType(PartPrimSys)); err == nil && len(ds) > 0 {
+ return errPrimaryPartition
+ }
+
+ f.h.Arch = p.Arch
+ }
+
+ d := &f.rds[i]
+ d.ID = uint32(i) + 1
+
+ if err := writeDataObjectAt(f.rw, f.h.DataOffset+f.h.DataSize, di, t, d); err != nil {
+ return err
+ }
+
+ // Update minimum object ID map.
+ if minID, ok := f.minIDs[d.GroupID]; !ok || d.ID < minID {
+ f.minIDs[d.GroupID] = d.ID
+ }
+
+ f.h.DescriptorsFree--
+ f.h.DataSize += d.SizeWithPadding
+
+ return nil
+}
+
+// writeDescriptors writes the descriptors in f to backing storage.
+func (f *FileImage) writeDescriptors() error {
+ if _, err := f.rw.Seek(f.h.DescriptorsOffset, io.SeekStart); err != nil {
+ return err
+ }
+
+ return binary.Write(f.rw, binary.LittleEndian, f.rds)
+}
+
+// writeHeader writes the the global header in f to backing storage.
+func (f *FileImage) writeHeader() error {
+ if _, err := f.rw.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+
+ return binary.Write(f.rw, binary.LittleEndian, f.h)
+}
+
+// createOpts accumulates container creation options.
+type createOpts struct {
+ launchScript [hdrLaunchLen]byte
+ id uuid.UUID
+ descriptorsOffset int64
+ descriptorCapacity int64
+ dis []DescriptorInput
+ t time.Time
+ closeOnUnload bool
+}
+
+// CreateOpt are used to specify container creation options.
+type CreateOpt func(*createOpts) error
+
+var errLaunchScriptLen = errors.New("launch script too large")
+
+// OptCreateWithLaunchScript specifies s as the launch script.
+func OptCreateWithLaunchScript(s string) CreateOpt {
+ return func(co *createOpts) error {
+ b := []byte(s)
+
+ if len(b) >= len(co.launchScript) {
+ return errLaunchScriptLen
+ }
+
+ copy(co.launchScript[:], b)
+
+ return nil
+ }
+}
+
+// OptCreateDeterministic sets header/descriptor fields to values that support deterministic
+// creation of images.
+func OptCreateDeterministic() CreateOpt {
+ return func(co *createOpts) error {
+ co.id = uuid.Nil
+ co.t = time.Time{}
+ return nil
+ }
+}
+
+// OptCreateWithID specifies id as the unique ID.
+func OptCreateWithID(id string) CreateOpt {
+ return func(co *createOpts) error {
+ id, err := uuid.Parse(id)
+ co.id = id
+ return err
+ }
+}
+
+// OptCreateWithDescriptorCapacity specifies that the created image should have the capacity for a
+// maximum of n descriptors.
+func OptCreateWithDescriptorCapacity(n int64) CreateOpt {
+ return func(co *createOpts) error {
+ co.descriptorCapacity = n
+ return nil
+ }
+}
+
+// OptCreateWithDescriptors appends dis to the list of descriptors.
+func OptCreateWithDescriptors(dis ...DescriptorInput) CreateOpt {
+ return func(co *createOpts) error {
+ co.dis = append(co.dis, dis...)
+ return nil
+ }
+}
+
+// OptCreateWithTime specifies t as the image creation time.
+func OptCreateWithTime(t time.Time) CreateOpt {
+ return func(co *createOpts) error {
+ co.t = t
+ return nil
+ }
+}
+
+// OptCreateWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer.
+// By default, the ReadWriter will be closed if it implements the io.Closer interface.
+func OptCreateWithCloseOnUnload(b bool) CreateOpt {
+ return func(co *createOpts) error {
+ co.closeOnUnload = b
+ return nil
+ }
+}
+
+// createContainer creates a new SIF container file in rw, according to opts.
+func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) {
+ rds := make([]rawDescriptor, co.descriptorCapacity)
+ rdsSize := int64(binary.Size(rds))
+
+ h := header{
+ LaunchScript: co.launchScript,
+ Magic: hdrMagic,
+ Version: CurrentVersion.bytes(),
+ Arch: hdrArchUnknown,
+ ID: co.id,
+ CreatedAt: co.t.Unix(),
+ ModifiedAt: co.t.Unix(),
+ DescriptorsFree: co.descriptorCapacity,
+ DescriptorsTotal: co.descriptorCapacity,
+ DescriptorsOffset: co.descriptorsOffset,
+ DescriptorsSize: rdsSize,
+ DataOffset: co.descriptorsOffset + rdsSize,
+ }
+
+ f := &FileImage{
+ rw: rw,
+ h: h,
+ rds: rds,
+ minIDs: make(map[uint32]uint32),
+ }
+
+ for i, di := range co.dis {
+ if err := f.writeDataObject(i, di, co.t); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := f.writeDescriptors(); err != nil {
+ return nil, err
+ }
+
+ if err := f.writeHeader(); err != nil {
+ return nil, err
+ }
+
+ return f, nil
+}
+
+// CreateContainer creates a new SIF container in rw, according to opts. One or more data objects
+// can optionally be specified using OptCreateWithDescriptors.
+//
+// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources
+// are released. By default, UnloadContainer will close rw if it implements the io.Closer
+// interface. To change this behavior, consider using OptCreateWithCloseOnUnload.
+//
+// By default, the image ID is set to a randomly generated value. To override this, consider using
+// OptCreateDeterministic or OptCreateWithID.
+//
+// By default, the image creation time is set to time.Now(). To override this, consider using
+// OptCreateDeterministic or OptCreateWithTime.
+//
+// By default, the image will support a maximum of 48 descriptors. To change this, consider using
+// OptCreateWithDescriptorCapacity.
+//
+// A launch script can optionally be set using OptCreateWithLaunchScript.
+func CreateContainer(rw ReadWriter, opts ...CreateOpt) (*FileImage, error) {
+ id, err := uuid.NewRandom()
+ if err != nil {
+ return nil, err
+ }
+
+ co := createOpts{
+ id: id,
+ descriptorsOffset: 4096,
+ descriptorCapacity: 48,
+ t: time.Now(),
+ closeOnUnload: true,
+ }
+
+ for _, opt := range opts {
+ if err := opt(&co); err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+ }
+
+ f, err := createContainer(rw, co)
+ if err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+
+ f.closeOnUnload = co.closeOnUnload
+ return f, nil
+}
+
+// CreateContainerAtPath creates a new SIF container file at path, according to opts. One or more
+// data objects can optionally be specified using OptCreateWithDescriptors.
+//
+// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources
+// are released.
+//
+// By default, the image ID is set to a randomly generated value. To override this, consider using
+// OptCreateDeterministic or OptCreateWithID.
+//
+// By default, the image creation time is set to time.Now(). To override this, consider using
+// OptCreateDeterministic or OptCreateWithTime.
+//
+// By default, the image will support a maximum of 48 descriptors. To change this, consider using
+// OptCreateWithDescriptorCapacity.
+//
+// A launch script can optionally be set using OptCreateWithLaunchScript.
+func CreateContainerAtPath(path string, opts ...CreateOpt) (*FileImage, error) {
+ fp, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o755)
+ if err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+
+ f, err := CreateContainer(fp, opts...)
+ if err != nil {
+ fp.Close()
+ os.Remove(fp.Name())
+
+ return nil, err
+ }
+
+ f.closeOnUnload = true
+ return f, nil
+}
+
+func zeroData(fimg *FileImage, descr *rawDescriptor) error {
+ // first, move to data object offset
+ if _, err := fimg.rw.Seek(descr.Offset, io.SeekStart); err != nil {
+ return err
+ }
+
+ var zero [4096]byte
+ n := descr.Size
+ upbound := int64(4096)
+ for {
+ if n < 4096 {
+ upbound = n
+ }
+
+ if _, err := fimg.rw.Write(zero[:upbound]); err != nil {
+ return err
+ }
+ n -= 4096
+ if n <= 0 {
+ break
+ }
+ }
+
+ return nil
+}
+
+func resetDescriptor(fimg *FileImage, index int) error {
+ // If we remove the primary partition, set the global header Arch field to HdrArchUnknown
+ // to indicate that the SIF file doesn't include a primary partition and no dependency
+ // on any architecture exists.
+ if fimg.rds[index].isPartitionOfType(PartPrimSys) {
+ fimg.h.Arch = hdrArchUnknown
+ }
+
+ offset := fimg.h.DescriptorsOffset + int64(index)*int64(binary.Size(fimg.rds[0]))
+
+ // first, move to descriptor offset
+ if _, err := fimg.rw.Seek(offset, io.SeekStart); err != nil {
+ return err
+ }
+
+ var emptyDesc rawDescriptor
+ return binary.Write(fimg.rw, binary.LittleEndian, emptyDesc)
+}
+
+// addOpts accumulates object add options.
+type addOpts struct {
+ t time.Time
+}
+
+// AddOpt are used to specify object add options.
+type AddOpt func(*addOpts) error
+
+// OptAddDeterministic sets header/descriptor fields to values that support deterministic
+// modification of images.
+func OptAddDeterministic() AddOpt {
+ return func(ao *addOpts) error {
+ ao.t = time.Time{}
+ return nil
+ }
+}
+
+// OptAddWithTime specifies t as the image modification time.
+func OptAddWithTime(t time.Time) AddOpt {
+ return func(ao *addOpts) error {
+ ao.t = t
+ return nil
+ }
+}
+
+// AddObject adds a new data object and its descriptor into the specified SIF file.
+//
+// By default, the image modification time is set to the current time. To override this, consider
+// using OptAddDeterministic or OptAddWithTime.
+func (f *FileImage) AddObject(di DescriptorInput, opts ...AddOpt) error {
+ ao := addOpts{
+ t: time.Now(),
+ }
+
+ for _, opt := range opts {
+ if err := opt(&ao); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+ }
+
+ // Find an unused descriptor.
+ i := 0
+ for _, rd := range f.rds {
+ if !rd.Used {
+ break
+ }
+ i++
+ }
+
+ if err := f.writeDataObject(i, di, ao.t); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ if err := f.writeDescriptors(); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ f.h.ModifiedAt = ao.t.Unix()
+
+ if err := f.writeHeader(); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ return nil
+}
+
+// isLast return true if the data object associated with d is the last in f.
+func (f *FileImage) isLast(d *rawDescriptor) bool {
+ isLast := true
+
+ end := d.Offset + d.Size
+ f.WithDescriptors(func(d Descriptor) bool {
+ isLast = d.Offset()+d.Size() <= end
+ return !isLast
+ })
+
+ return isLast
+}
+
+// truncateAt truncates f at the start of the padded data object described by d.
+func (f *FileImage) truncateAt(d *rawDescriptor) error {
+ start := d.Offset + d.Size - d.SizeWithPadding
+
+ if err := f.rw.Truncate(start); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// deleteOpts accumulates object deletion options.
+type deleteOpts struct {
+ zero bool
+ compact bool
+ t time.Time
+}
+
+// DeleteOpt are used to specify object deletion options.
+type DeleteOpt func(*deleteOpts) error
+
+// OptDeleteZero specifies whether the deleted object should be zeroed.
+func OptDeleteZero(b bool) DeleteOpt {
+ return func(do *deleteOpts) error {
+ do.zero = b
+ return nil
+ }
+}
+
+// OptDeleteCompact specifies whether the image should be compacted following object deletion.
+func OptDeleteCompact(b bool) DeleteOpt {
+ return func(do *deleteOpts) error {
+ do.compact = b
+ return nil
+ }
+}
+
+// OptDeleteDeterministic sets header/descriptor fields to values that support deterministic
+// modification of images.
+func OptDeleteDeterministic() DeleteOpt {
+ return func(do *deleteOpts) error {
+ do.t = time.Time{}
+ return nil
+ }
+}
+
+// OptDeleteWithTime specifies t as the image modification time.
+func OptDeleteWithTime(t time.Time) DeleteOpt {
+ return func(do *deleteOpts) error {
+ do.t = t
+ return nil
+ }
+}
+
+var errCompactNotImplemented = errors.New("compact not implemented for non-last object")
+
+// DeleteObject deletes the data object with id, according to opts.
+//
+// To zero the data region of the deleted object, use OptDeleteZero. To compact the file following
+// object deletion, use OptDeleteCompact.
+//
+// By default, the image modification time is set to time.Now(). To override this, consider using
+// OptDeleteDeterministic or OptDeleteWithTime.
+func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error {
+ do := deleteOpts{
+ t: time.Now(),
+ }
+
+ for _, opt := range opts {
+ if err := opt(&do); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+ }
+
+ d, err := f.getDescriptor(WithID(id))
+ if err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ if do.compact && !f.isLast(d) {
+ return fmt.Errorf("%w", errCompactNotImplemented)
+ }
+
+ if do.zero {
+ if err := zeroData(f, d); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+ }
+
+ if do.compact {
+ if err := f.truncateAt(d); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ f.h.DataSize -= d.SizeWithPadding
+ }
+
+ f.h.DescriptorsFree++
+ f.h.ModifiedAt = do.t.Unix()
+
+ index := 0
+ for i, od := range f.rds {
+ if od.ID == id {
+ index = i
+ break
+ }
+ }
+
+ if err := resetDescriptor(f, index); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ if err := f.writeHeader(); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ return nil
+}
+
+// setOpts accumulates object set options.
+type setOpts struct {
+ t time.Time
+}
+
+// SetOpt are used to specify object set options.
+type SetOpt func(*setOpts) error
+
+// OptSetDeterministic sets header/descriptor fields to values that support deterministic
+// modification of images.
+func OptSetDeterministic() SetOpt {
+ return func(so *setOpts) error {
+ so.t = time.Time{}
+ return nil
+ }
+}
+
+// OptSetWithTime specifies t as the image/object modification time.
+func OptSetWithTime(t time.Time) SetOpt {
+ return func(so *setOpts) error {
+ so.t = t
+ return nil
+ }
+}
+
+var (
+ errNotPartition = errors.New("data object not a partition")
+ errNotSystem = errors.New("data object not a system partition")
+)
+
+// SetPrimPart sets the specified system partition to be the primary one.
+//
+// By default, the image/object modification times are set to time.Now(). To override this,
+// consider using OptSetDeterministic or OptSetWithTime.
+func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error {
+ so := setOpts{
+ t: time.Now(),
+ }
+
+ for _, opt := range opts {
+ if err := opt(&so); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+ }
+
+ descr, err := f.getDescriptor(WithID(id))
+ if err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ if descr.DataType != DataPartition {
+ return fmt.Errorf("%w", errNotPartition)
+ }
+
+ fs, pt, arch, err := descr.getPartitionMetadata()
+ if err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ // if already primary system partition, nothing to do
+ if pt == PartPrimSys {
+ return nil
+ }
+
+ if pt != PartSystem {
+ return fmt.Errorf("%w", errNotSystem)
+ }
+
+ olddescr, err := f.getDescriptor(WithPartitionType(PartPrimSys))
+ if err != nil && !errors.Is(err, ErrObjectNotFound) {
+ return fmt.Errorf("%w", err)
+ }
+
+ f.h.Arch = getSIFArch(arch)
+
+ extra := partition{
+ Fstype: fs,
+ Parttype: PartPrimSys,
+ }
+ copy(extra.Arch[:], arch)
+
+ if err := descr.setExtra(extra); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ if olddescr != nil {
+ oldfs, _, oldarch, err := olddescr.getPartitionMetadata()
+ if err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ oldextra := partition{
+ Fstype: oldfs,
+ Parttype: PartSystem,
+ Arch: getSIFArch(oldarch),
+ }
+
+ if err := olddescr.setExtra(oldextra); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+ }
+
+ if err := f.writeDescriptors(); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ f.h.ModifiedAt = so.t.Unix()
+
+ if err := f.writeHeader(); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go
new file mode 100644
index 00000000000..03ed2b042bd
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go
@@ -0,0 +1,294 @@
+// Copyright (c) 2018-2022, Sylabs Inc. All rights reserved.
+// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
+// Copyright (c) 2017, Yannick Cote All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+package sif
+
+import (
+ "bytes"
+ "crypto"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+)
+
+// rawDescriptor represents an on-disk object descriptor.
+type rawDescriptor struct {
+ DataType DataType
+ Used bool
+ ID uint32
+ GroupID uint32
+ LinkedID uint32
+ Offset int64
+ Size int64
+ SizeWithPadding int64
+
+ CreatedAt int64
+ ModifiedAt int64
+ UID int64 // Deprecated: UID exists for historical compatibility and should not be used.
+ GID int64 // Deprecated: GID exists for historical compatibility and should not be used.
+ Name [descrNameLen]byte
+ Extra [descrMaxPrivLen]byte
+}
+
+// partition represents the SIF partition data object descriptor.
+type partition struct {
+ Fstype FSType
+ Parttype PartType
+ Arch archType
+}
+
+// signature represents the SIF signature data object descriptor.
+type signature struct {
+ Hashtype hashType
+ Entity [descrEntityLen]byte
+}
+
+// cryptoMessage represents the SIF crypto message object descriptor.
+type cryptoMessage struct {
+ Formattype FormatType
+ Messagetype MessageType
+}
+
+// sbom represents the SIF SBOM data object descriptor.
+type sbom struct {
+ Format SBOMFormat
+}
+
+var errNameTooLarge = errors.New("name value too large")
+
+// setName encodes name into the name field of d.
+func (d *rawDescriptor) setName(name string) error {
+ if len(name) > len(d.Name) {
+ return errNameTooLarge
+ }
+
+ for i := copy(d.Name[:], name); i < len(d.Name); i++ {
+ d.Name[i] = 0
+ }
+
+ return nil
+}
+
+var errExtraTooLarge = errors.New("extra value too large")
+
+// setExtra encodes v into the extra field of d.
+func (d *rawDescriptor) setExtra(v interface{}) error {
+ if v == nil {
+ return nil
+ }
+
+ if binary.Size(v) > len(d.Extra) {
+ return errExtraTooLarge
+ }
+
+ b := new(bytes.Buffer)
+ if err := binary.Write(b, binary.LittleEndian, v); err != nil {
+ return err
+ }
+
+ for i := copy(d.Extra[:], b.Bytes()); i < len(d.Extra); i++ {
+ d.Extra[i] = 0
+ }
+
+ return nil
+}
+
+// getPartitionMetadata gets metadata for a partition data object.
+func (d rawDescriptor) getPartitionMetadata() (FSType, PartType, string, error) {
+ if got, want := d.DataType, DataPartition; got != want {
+ return 0, 0, "", &unexpectedDataTypeError{got, []DataType{want}}
+ }
+
+ var p partition
+
+ b := bytes.NewReader(d.Extra[:])
+ if err := binary.Read(b, binary.LittleEndian, &p); err != nil {
+ return 0, 0, "", fmt.Errorf("%w", err)
+ }
+
+ return p.Fstype, p.Parttype, p.Arch.GoArch(), nil
+}
+
+// isPartitionOfType returns true if d is a partition data object of type pt.
+func (d rawDescriptor) isPartitionOfType(pt PartType) bool {
+ _, t, _, err := d.getPartitionMetadata()
+ if err != nil {
+ return false
+ }
+ return t == pt
+}
+
+// Descriptor represents the SIF descriptor type.
+type Descriptor struct {
+ r io.ReaderAt // Backing storage.
+
+ raw rawDescriptor // Raw descriptor from image.
+
+ relativeID uint32 // ID relative to minimum ID of object group.
+}
+
+// DataType returns the type of data object.
+func (d Descriptor) DataType() DataType { return d.raw.DataType }
+
+// ID returns the data object ID of d.
+func (d Descriptor) ID() uint32 { return d.raw.ID }
+
+// GroupID returns the data object group ID of d, or zero if d is not part of a data object
+// group.
+func (d Descriptor) GroupID() uint32 { return d.raw.GroupID &^ descrGroupMask }
+
+// LinkedID returns the object/group ID d is linked to, or zero if d does not contain a linked
+// ID. If isGroup is true, the returned id is an object group ID. Otherwise, the returned id is a
+// data object ID.
+//
+//nolint:nonamedreturns // Named returns effective as documentation.
+func (d Descriptor) LinkedID() (id uint32, isGroup bool) {
+ return d.raw.LinkedID &^ descrGroupMask, d.raw.LinkedID&descrGroupMask == descrGroupMask
+}
+
+// Offset returns the offset of the data object.
+func (d Descriptor) Offset() int64 { return d.raw.Offset }
+
+// Size returns the data object size.
+func (d Descriptor) Size() int64 { return d.raw.Size }
+
+// CreatedAt returns the creation time of the data object.
+func (d Descriptor) CreatedAt() time.Time { return time.Unix(d.raw.CreatedAt, 0) }
+
+// ModifiedAt returns the modification time of the data object.
+func (d Descriptor) ModifiedAt() time.Time { return time.Unix(d.raw.ModifiedAt, 0) }
+
+// Name returns the name of the data object.
+func (d Descriptor) Name() string { return strings.TrimRight(string(d.raw.Name[:]), "\000") }
+
+// PartitionMetadata gets metadata for a partition data object.
+//
+//nolint:nonamedreturns // Named returns effective as documentation.
+func (d Descriptor) PartitionMetadata() (fs FSType, pt PartType, arch string, err error) {
+ return d.raw.getPartitionMetadata()
+}
+
+var errHashUnsupported = errors.New("hash algorithm unsupported")
+
+// getHashType converts ht into a crypto.Hash.
+func getHashType(ht hashType) (crypto.Hash, error) {
+ switch ht {
+ case hashSHA256:
+ return crypto.SHA256, nil
+ case hashSHA384:
+ return crypto.SHA384, nil
+ case hashSHA512:
+ return crypto.SHA512, nil
+ case hashBLAKE2S:
+ return crypto.BLAKE2s_256, nil
+ case hashBLAKE2B:
+ return crypto.BLAKE2b_256, nil
+ }
+ return 0, errHashUnsupported
+}
+
+// SignatureMetadata gets metadata for a signature data object.
+//
+//nolint:nonamedreturns // Named returns effective as documentation.
+func (d Descriptor) SignatureMetadata() (ht crypto.Hash, fp []byte, err error) {
+ if got, want := d.raw.DataType, DataSignature; got != want {
+ return ht, fp, &unexpectedDataTypeError{got, []DataType{want}}
+ }
+
+ var s signature
+
+ b := bytes.NewReader(d.raw.Extra[:])
+ if err := binary.Read(b, binary.LittleEndian, &s); err != nil {
+ return ht, fp, fmt.Errorf("%w", err)
+ }
+
+ if ht, err = getHashType(s.Hashtype); err != nil {
+ return ht, fp, fmt.Errorf("%w", err)
+ }
+
+ fp = make([]byte, 20)
+ copy(fp, s.Entity[:])
+
+ return ht, fp, nil
+}
+
+// CryptoMessageMetadata gets metadata for a crypto message data object.
+func (d Descriptor) CryptoMessageMetadata() (FormatType, MessageType, error) {
+ if got, want := d.raw.DataType, DataCryptoMessage; got != want {
+ return 0, 0, &unexpectedDataTypeError{got, []DataType{want}}
+ }
+
+ var m cryptoMessage
+
+ b := bytes.NewReader(d.raw.Extra[:])
+ if err := binary.Read(b, binary.LittleEndian, &m); err != nil {
+ return 0, 0, fmt.Errorf("%w", err)
+ }
+
+ return m.Formattype, m.Messagetype, nil
+}
+
+// SBOMMetadata gets metadata for a SBOM data object.
+func (d Descriptor) SBOMMetadata() (SBOMFormat, error) {
+ if got, want := d.raw.DataType, DataSBOM; got != want {
+ return 0, &unexpectedDataTypeError{got, []DataType{want}}
+ }
+
+ var s sbom
+
+ b := bytes.NewReader(d.raw.Extra[:])
+ if err := binary.Read(b, binary.LittleEndian, &s); err != nil {
+ return 0, fmt.Errorf("%w", err)
+ }
+
+ return s.Format, nil
+}
+
+// GetData returns the data object associated with descriptor d.
+func (d Descriptor) GetData() ([]byte, error) {
+ b := make([]byte, d.raw.Size)
+ if _, err := io.ReadFull(d.GetReader(), b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+// GetReader returns a io.Reader that reads the data object associated with descriptor d.
+func (d Descriptor) GetReader() io.Reader {
+ return io.NewSectionReader(d.r, d.raw.Offset, d.raw.Size)
+}
+
+// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from d.
+func (d Descriptor) GetIntegrityReader() io.Reader {
+ fields := []interface{}{
+ d.raw.DataType,
+ d.raw.Used,
+ d.relativeID,
+ d.raw.LinkedID,
+ d.raw.Size,
+ d.raw.CreatedAt,
+ d.raw.UID,
+ d.raw.GID,
+ }
+
+ // Encode endian-sensitive fields.
+ data := bytes.Buffer{}
+ for _, f := range fields {
+ if err := binary.Write(&data, binary.LittleEndian, f); err != nil {
+ panic(err) // (*bytes.Buffer).Write() is documented as always returning a nil error.
+ }
+ }
+
+ return io.MultiReader(
+ &data,
+ bytes.NewReader(d.raw.Name[:]),
+ bytes.NewReader(d.raw.Extra[:]),
+ )
+}
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go
new file mode 100644
index 00000000000..3e81c394fcf
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go
@@ -0,0 +1,321 @@
+// Copyright (c) 2021-2022, Sylabs Inc. All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+package sif
+
+import (
+ "crypto"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+)
+
+// descriptorOpts accumulates data object options.
+type descriptorOpts struct {
+ groupID uint32
+ linkID uint32
+ alignment int
+ name string
+ extra interface{}
+ t time.Time
+}
+
+// DescriptorInputOpt are used to specify data object options.
+type DescriptorInputOpt func(DataType, *descriptorOpts) error
+
+// OptNoGroup specifies the data object is not contained within a data object group.
+func OptNoGroup() DescriptorInputOpt {
+ return func(_ DataType, opts *descriptorOpts) error {
+ opts.groupID = 0
+ return nil
+ }
+}
+
+// OptGroupID specifies groupID as data object group ID.
+func OptGroupID(groupID uint32) DescriptorInputOpt {
+ return func(_ DataType, opts *descriptorOpts) error {
+ if groupID == 0 {
+ return ErrInvalidGroupID
+ }
+ opts.groupID = groupID
+ return nil
+ }
+}
+
+// OptLinkedID specifies that the data object is linked to the data object with the specified ID.
+func OptLinkedID(id uint32) DescriptorInputOpt {
+ return func(_ DataType, opts *descriptorOpts) error {
+ if id == 0 {
+ return ErrInvalidObjectID
+ }
+ opts.linkID = id
+ return nil
+ }
+}
+
+// OptLinkedGroupID specifies that the data object is linked to the data object group with the
+// specified groupID.
+func OptLinkedGroupID(groupID uint32) DescriptorInputOpt {
+ return func(_ DataType, opts *descriptorOpts) error {
+ if groupID == 0 {
+ return ErrInvalidGroupID
+ }
+ opts.linkID = groupID | descrGroupMask
+ return nil
+ }
+}
+
+// OptObjectAlignment specifies n as the data alignment requirement.
+func OptObjectAlignment(n int) DescriptorInputOpt {
+ return func(_ DataType, opts *descriptorOpts) error {
+ opts.alignment = n
+ return nil
+ }
+}
+
+// OptObjectName specifies name as the data object name.
+func OptObjectName(name string) DescriptorInputOpt {
+ return func(_ DataType, opts *descriptorOpts) error {
+ opts.name = name
+ return nil
+ }
+}
+
+// OptObjectTime specifies t as the data object creation time.
+func OptObjectTime(t time.Time) DescriptorInputOpt {
+ return func(_ DataType, opts *descriptorOpts) error {
+ opts.t = t
+ return nil
+ }
+}
+
+type unexpectedDataTypeError struct {
+ got DataType
+ want []DataType
+}
+
+func (e *unexpectedDataTypeError) Error() string {
+ return fmt.Sprintf("unexpected data type %v, expected one of: %v", e.got, e.want)
+}
+
+func (e *unexpectedDataTypeError) Is(target error) bool {
+ //nolint:errorlint // don't compare wrapped errors in Is()
+ t, ok := target.(*unexpectedDataTypeError)
+ if !ok {
+ return false
+ }
+
+ if len(t.want) > 0 {
+ // Use a map to check that the "want" errors in e and t contain the same values, ignoring
+ // any ordering differences.
+ acc := make(map[DataType]int, len(t.want))
+
+ // Increment counter for each data type in e.
+ for _, dt := range e.want {
+ if _, ok := acc[dt]; !ok {
+ acc[dt] = 0
+ }
+ acc[dt]++
+ }
+
+ // Decrement counter for each data type in e.
+ for _, dt := range t.want {
+ if _, ok := acc[dt]; !ok {
+ return false
+ }
+ acc[dt]--
+ }
+
+ // If the "want" errors in e and t are equivalent, all counters should be zero.
+ for _, n := range acc {
+ if n != 0 {
+ return false
+ }
+ }
+ }
+
+ return (e.got == t.got || t.got == 0)
+}
+
+// OptCryptoMessageMetadata sets metadata for a crypto message data object. The format type is set
+// to ft, and the message type is set to mt.
+//
+// If this option is applied to a data object with an incompatible type, an error is returned.
+func OptCryptoMessageMetadata(ft FormatType, mt MessageType) DescriptorInputOpt {
+ return func(t DataType, opts *descriptorOpts) error {
+ if got, want := t, DataCryptoMessage; got != want {
+ return &unexpectedDataTypeError{got, []DataType{want}}
+ }
+
+ m := cryptoMessage{
+ Formattype: ft,
+ Messagetype: mt,
+ }
+
+ opts.extra = m
+ return nil
+ }
+}
+
+var errUnknownArchitcture = errors.New("unknown architecture")
+
+// OptPartitionMetadata sets metadata for a partition data object. The filesystem type is set to
+// fs, the partition type is set to pt, and the CPU architecture is set to arch. The value of arch
+// should be the architecture as represented by the Go runtime.
+//
+// If this option is applied to a data object with an incompatible type, an error is returned.
+func OptPartitionMetadata(fs FSType, pt PartType, arch string) DescriptorInputOpt {
+ return func(t DataType, opts *descriptorOpts) error {
+ if got, want := t, DataPartition; got != want {
+ return &unexpectedDataTypeError{got, []DataType{want}}
+ }
+
+ sifarch := getSIFArch(arch)
+ if sifarch == hdrArchUnknown {
+ return fmt.Errorf("%w: %v", errUnknownArchitcture, arch)
+ }
+
+ p := partition{
+ Fstype: fs,
+ Parttype: pt,
+ Arch: sifarch,
+ }
+
+ opts.extra = p
+ return nil
+ }
+}
+
+// sifHashType converts h into a HashType.
+func sifHashType(h crypto.Hash) hashType {
+ switch h {
+ case crypto.SHA256:
+ return hashSHA256
+ case crypto.SHA384:
+ return hashSHA384
+ case crypto.SHA512:
+ return hashSHA512
+ case crypto.BLAKE2s_256:
+ return hashBLAKE2S
+ case crypto.BLAKE2b_256:
+ return hashBLAKE2B
+ }
+ return 0
+}
+
+// OptSignatureMetadata sets metadata for a signature data object. The hash type is set to ht, and
+// the signing entity fingerprint is set to fp.
+//
+// If this option is applied to a data object with an incompatible type, an error is returned.
+func OptSignatureMetadata(ht crypto.Hash, fp []byte) DescriptorInputOpt {
+ return func(t DataType, opts *descriptorOpts) error {
+ if got, want := t, DataSignature; got != want {
+ return &unexpectedDataTypeError{got, []DataType{want}}
+ }
+
+ s := signature{
+ Hashtype: sifHashType(ht),
+ }
+ copy(s.Entity[:], fp)
+
+ opts.extra = s
+ return nil
+ }
+}
+
+// OptSBOMMetadata sets metadata for a SBOM data object. The SBOM format is set to f.
+//
+// If this option is applied to a data object with an incompatible type, an error is returned.
+func OptSBOMMetadata(f SBOMFormat) DescriptorInputOpt {
+ return func(t DataType, opts *descriptorOpts) error {
+ if got, want := t, DataSBOM; got != want {
+ return &unexpectedDataTypeError{got, []DataType{want}}
+ }
+
+ s := sbom{
+ Format: f,
+ }
+
+ opts.extra = s
+ return nil
+ }
+}
+
+// DescriptorInput describes a new data object.
+type DescriptorInput struct {
+ dt DataType
+ r io.Reader
+ opts descriptorOpts
+}
+
+// DefaultObjectGroup is the default group that data objects are placed in.
+const DefaultObjectGroup = 1
+
+// NewDescriptorInput returns a DescriptorInput representing a data object of type t, with contents
+// read from r, configured according to opts.
+//
+// It is possible (and often necessary) to store additional metadata related to certain types of
+// data objects. Consider supplying options such as OptCryptoMessageMetadata, OptPartitionMetadata,
+// OptSignatureMetadata, and OptSBOMMetadata for this purpose.
+//
+// By default, the data object will be placed in the default data object group (1). To override
+// this behavior, use OptNoGroup or OptGroupID. To link this data object, use OptLinkedID or
+// OptLinkedGroupID.
+//
+// By default, the data object will not be aligned unless it is of type DataPartition, in which
+// case it will be aligned on a 4096 byte boundary. To override this behavior, consider using
+// OptObjectAlignment.
+//
+// By default, no name is set for data object. To set a name, use OptObjectName.
+//
+// When creating a new image, data object creation/modification times are set to the image creation
+// time. When modifying an existing image, the data object creation/modification time is set to the
+// image modification time. To override this behavior, consider using OptObjectTime.
+func NewDescriptorInput(t DataType, r io.Reader, opts ...DescriptorInputOpt) (DescriptorInput, error) {
+ dopts := descriptorOpts{
+ groupID: DefaultObjectGroup,
+ }
+
+ if t == DataPartition {
+ dopts.alignment = 4096
+ }
+
+ for _, opt := range opts {
+ if err := opt(t, &dopts); err != nil {
+ return DescriptorInput{}, fmt.Errorf("%w", err)
+ }
+ }
+
+ di := DescriptorInput{
+ dt: t,
+ r: r,
+ opts: dopts,
+ }
+
+ return di, nil
+}
+
+// fillDescriptor fills d according to di. If di does not explicitly specify a time value, use t.
+func (di DescriptorInput) fillDescriptor(t time.Time, d *rawDescriptor) error {
+ d.DataType = di.dt
+ d.GroupID = di.opts.groupID | descrGroupMask
+ d.LinkedID = di.opts.linkID
+
+ if !di.opts.t.IsZero() {
+ t = di.opts.t
+ }
+ d.CreatedAt = t.Unix()
+ d.ModifiedAt = t.Unix()
+
+ d.UID = 0
+ d.GID = 0
+
+ if err := d.setName(di.opts.name); err != nil {
+ return err
+ }
+
+ return d.setExtra(di.opts.extra)
+}
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go
new file mode 100644
index 00000000000..75266e1945c
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go
@@ -0,0 +1,174 @@
+// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved.
+// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
+// Copyright (c) 2017, Yannick Cote All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+package sif
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+)
+
+var (
+ errInvalidMagic = errors.New("invalid SIF magic")
+ errIncompatibleVersion = errors.New("incompatible SIF version")
+)
+
+// isValidSif looks at key fields from the global header to assess SIF validity.
+func isValidSif(f *FileImage) error {
+ if f.h.Magic != hdrMagic {
+ return errInvalidMagic
+ }
+
+ if f.h.Version != CurrentVersion.bytes() {
+ return errIncompatibleVersion
+ }
+
+ return nil
+}
+
+// populateMinIDs populates the minIDs field of f.
+func (f *FileImage) populateMinIDs() {
+ f.minIDs = make(map[uint32]uint32)
+ f.WithDescriptors(func(d Descriptor) bool {
+ if minID, ok := f.minIDs[d.raw.GroupID]; !ok || d.ID() < minID {
+ f.minIDs[d.raw.GroupID] = d.ID()
+ }
+ return false
+ })
+}
+
+// loadContainer loads a SIF image from rw.
+func loadContainer(rw ReadWriter) (*FileImage, error) {
+ f := FileImage{rw: rw}
+
+ // Read global header.
+ err := binary.Read(
+ io.NewSectionReader(rw, 0, int64(binary.Size(f.h))),
+ binary.LittleEndian,
+ &f.h,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("reading global header: %w", err)
+ }
+
+ if err := isValidSif(&f); err != nil {
+ return nil, err
+ }
+
+ // Read descriptors.
+ f.rds = make([]rawDescriptor, f.h.DescriptorsTotal)
+ err = binary.Read(
+ io.NewSectionReader(rw, f.h.DescriptorsOffset, f.h.DescriptorsSize),
+ binary.LittleEndian,
+ &f.rds,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("reading descriptors: %w", err)
+ }
+
+ f.populateMinIDs()
+
+ return &f, nil
+}
+
+// loadOpts accumulates container loading options.
+type loadOpts struct {
+ flag int
+ closeOnUnload bool
+}
+
+// LoadOpt are used to specify container loading options.
+type LoadOpt func(*loadOpts) error
+
+// OptLoadWithFlag specifies flag (os.O_RDONLY etc.) to be used when opening the container file.
+func OptLoadWithFlag(flag int) LoadOpt {
+ return func(lo *loadOpts) error {
+ lo.flag = flag
+ return nil
+ }
+}
+
+// OptLoadWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer.
+// By default, the ReadWriter will be closed if it implements the io.Closer interface.
+func OptLoadWithCloseOnUnload(b bool) LoadOpt {
+ return func(lo *loadOpts) error {
+ lo.closeOnUnload = b
+ return nil
+ }
+}
+
+// LoadContainerFromPath loads a new SIF container from path, according to opts.
+//
+// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources
+// are released.
+//
+// By default, the file is opened for read and write access. To change this behavior, consider
+// using OptLoadWithFlag.
+func LoadContainerFromPath(path string, opts ...LoadOpt) (*FileImage, error) {
+ lo := loadOpts{
+ flag: os.O_RDWR,
+ }
+
+ for _, opt := range opts {
+ if err := opt(&lo); err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+ }
+
+ fp, err := os.OpenFile(path, lo.flag, 0)
+ if err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+
+ f, err := loadContainer(fp)
+ if err != nil {
+ fp.Close()
+
+ return nil, fmt.Errorf("%w", err)
+ }
+
+ f.closeOnUnload = true
+ return f, nil
+}
+
+// LoadContainer loads a new SIF container from rw, according to opts.
+//
+// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources
+// are released. By default, UnloadContainer will close rw if it implements the io.Closer
+// interface. To change this behavior, consider using OptLoadWithCloseOnUnload.
+func LoadContainer(rw ReadWriter, opts ...LoadOpt) (*FileImage, error) {
+ lo := loadOpts{
+ closeOnUnload: true,
+ }
+
+ for _, opt := range opts {
+ if err := opt(&lo); err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+ }
+
+ f, err := loadContainer(rw)
+ if err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+
+ f.closeOnUnload = lo.closeOnUnload
+ return f, nil
+}
+
+// UnloadContainer unloads f, releasing associated resources.
+func (f *FileImage) UnloadContainer() error {
+ if c, ok := f.rw.(io.Closer); ok && f.closeOnUnload {
+ if err := c.Close(); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go
new file mode 100644
index 00000000000..635d6e89cfb
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go
@@ -0,0 +1,210 @@
+// Copyright (c) 2021, Sylabs Inc. All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+package sif
+
+import (
+ "errors"
+ "fmt"
+)
+
+// ErrNoObjects is the error returned when an image contains no data objects.
+var ErrNoObjects = errors.New("no objects in image")
+
+// ErrObjectNotFound is the error returned when a data object is not found.
+var ErrObjectNotFound = errors.New("object not found")
+
+// ErrMultipleObjectsFound is the error returned when multiple data objects are found.
+var ErrMultipleObjectsFound = errors.New("multiple objects found")
+
+// ErrInvalidObjectID is the error returned when an invalid object ID is supplied.
+var ErrInvalidObjectID = errors.New("invalid object ID")
+
+// ErrInvalidGroupID is the error returned when an invalid group ID is supplied.
+var ErrInvalidGroupID = errors.New("invalid group ID")
+
+// DescriptorSelectorFunc returns true if d matches, and false otherwise.
+type DescriptorSelectorFunc func(d Descriptor) (bool, error)
+
+// WithDataType selects descriptors that have data type dt.
+func WithDataType(dt DataType) DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ return d.DataType() == dt, nil
+ }
+}
+
+// WithID selects descriptors with a matching ID.
+func WithID(id uint32) DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ if id == 0 {
+ return false, ErrInvalidObjectID
+ }
+ return d.ID() == id, nil
+ }
+}
+
+// WithNoGroup selects descriptors that are not contained within an object group.
+func WithNoGroup() DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ return d.GroupID() == 0, nil
+ }
+}
+
+// WithGroupID returns a selector func that selects descriptors with a matching groupID.
+func WithGroupID(groupID uint32) DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ if groupID == 0 {
+ return false, ErrInvalidGroupID
+ }
+ return d.GroupID() == groupID, nil
+ }
+}
+
+// WithLinkedID selects descriptors that are linked to the data object with specified ID.
+func WithLinkedID(id uint32) DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ if id == 0 {
+ return false, ErrInvalidObjectID
+ }
+ linkedID, isGroup := d.LinkedID()
+ return !isGroup && linkedID == id, nil
+ }
+}
+
+// WithLinkedGroupID selects descriptors that are linked to the data object group with specified
+// ID.
+func WithLinkedGroupID(groupID uint32) DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ if groupID == 0 {
+ return false, ErrInvalidGroupID
+ }
+ linkedID, isGroup := d.LinkedID()
+ return isGroup && linkedID == groupID, nil
+ }
+}
+
+// WithPartitionType selects descriptors containing a partition of type pt.
+func WithPartitionType(pt PartType) DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ return d.raw.isPartitionOfType(pt), nil
+ }
+}
+
+// descriptorFromRaw populates a Descriptor from rd.
+func (f *FileImage) descriptorFromRaw(rd *rawDescriptor) Descriptor {
+ return Descriptor{
+ raw: *rd,
+ r: f.rw,
+ relativeID: rd.ID - f.minIDs[rd.GroupID],
+ }
+}
+
+// GetDescriptors returns a slice of in-use descriptors for which all selector funcs return true.
+// If the image contains no data objects, an error wrapping ErrNoObjects is returned.
+func (f *FileImage) GetDescriptors(fns ...DescriptorSelectorFunc) ([]Descriptor, error) {
+ if f.DescriptorsFree() == f.DescriptorsTotal() {
+ return nil, fmt.Errorf("%w", ErrNoObjects)
+ }
+
+ var ds []Descriptor
+
+ err := f.withDescriptors(multiSelectorFunc(fns...), func(d *rawDescriptor) error {
+ ds = append(ds, f.descriptorFromRaw(d))
+ return nil
+ })
+ if err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+
+ return ds, nil
+}
+
+// getDescriptor returns a pointer to the in-use descriptor selected by fns. If no descriptor is
+// selected by fns, ErrObjectNotFound is returned. If multiple descriptors are selected by fns,
+// ErrMultipleObjectsFound is returned.
+func (f *FileImage) getDescriptor(fns ...DescriptorSelectorFunc) (*rawDescriptor, error) {
+ var d *rawDescriptor
+
+ err := f.withDescriptors(multiSelectorFunc(fns...), func(found *rawDescriptor) error {
+ if d != nil {
+ return ErrMultipleObjectsFound
+ }
+ d = found
+ return nil
+ })
+
+ if err == nil && d == nil {
+ err = ErrObjectNotFound
+ }
+
+ return d, err
+}
+
+// GetDescriptor returns the in-use descriptor selected by fns. If the image contains no data
+// objects, an error wrapping ErrNoObjects is returned. If no descriptor is selected by fns, an
+// error wrapping ErrObjectNotFound is returned. If multiple descriptors are selected by fns, an
+// error wrapping ErrMultipleObjectsFound is returned.
+func (f *FileImage) GetDescriptor(fns ...DescriptorSelectorFunc) (Descriptor, error) {
+ if f.DescriptorsFree() == f.DescriptorsTotal() {
+ return Descriptor{}, fmt.Errorf("%w", ErrNoObjects)
+ }
+
+ d, err := f.getDescriptor(fns...)
+ if err != nil {
+ return Descriptor{}, fmt.Errorf("%w", err)
+ }
+
+ return f.descriptorFromRaw(d), nil
+}
+
+// multiSelectorFunc returns a DescriptorSelectorFunc that selects a descriptor iff all of fns
+// select the descriptor.
+func multiSelectorFunc(fns ...DescriptorSelectorFunc) DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ for _, fn := range fns {
+ if ok, err := fn(d); !ok || err != nil {
+ return ok, err
+ }
+ }
+ return true, nil
+ }
+}
+
+// withDescriptors calls onMatchFn with each in-use descriptor in f for which selectFn returns
+// true. If selectFn or onMatchFn return a non-nil error, the iteration halts, and the error is
+// returned to the caller.
+func (f *FileImage) withDescriptors(selectFn DescriptorSelectorFunc, onMatchFn func(*rawDescriptor) error) error {
+ for i, d := range f.rds {
+ if !d.Used {
+ continue
+ }
+
+ if ok, err := selectFn(f.descriptorFromRaw(&f.rds[i])); err != nil {
+ return err
+ } else if !ok {
+ continue
+ }
+
+ if err := onMatchFn(&f.rds[i]); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+var errAbort = errors.New("abort")
+
+// abortOnMatch is a semantic convenience function that always returns a non-nil error, which can
+// be used as a no-op matchFn.
+func abortOnMatch(*rawDescriptor) error { return errAbort }
+
+// WithDescriptors calls fn with each in-use descriptor in f, until fn returns true.
+func (f *FileImage) WithDescriptors(fn func(d Descriptor) bool) {
+ selectFn := func(d Descriptor) (bool, error) {
+ return fn(d), nil
+ }
+ _ = f.withDescriptors(selectFn, abortOnMatch)
+}
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go
new file mode 100644
index 00000000000..2d1c2091dc2
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go
@@ -0,0 +1,404 @@
+// Copyright (c) 2018-2022, Sylabs Inc. All rights reserved.
+// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
+// Copyright (c) 2017, Yannick Cote All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+// Package sif implements data structures and routines to create
+// and access SIF files.
+//
+// Layout of a SIF file (example):
+//
+// .================================================.
+// | GLOBAL HEADER: Sifheader |
+// | - launch: "#!/usr/bin/env..." |
+// | - magic: "SIF_MAGIC" |
+// | - version: "1" |
+// | - arch: "4" |
+// | - uuid: b2659d4e-bd50-4ea5-bd17-eec5e54f918e |
+// | - ctime: 1504657553 |
+// | - mtime: 1504657653 |
+// | - ndescr: 3 |
+// | - descroff: 120 | --.
+// | - descrlen: 432 | |
+// | - dataoff: 4096 | |
+// | - datalen: 619362 | |
+// |------------------------------------------------| <-'
+// | DESCR[0]: Sifdeffile |
+// | - Sifcommon |
+// | - datatype: DATA_DEFFILE |
+// | - id: 1 |
+// | - groupid: 1 |
+// | - link: NONE |
+// | - fileoff: 4096 | --.
+// | - filelen: 222 | |
+// |------------------------------------------------| <-----.
+// | DESCR[1]: Sifpartition | | |
+// | - Sifcommon | | |
+// | - datatype: DATA_PARTITION | | |
+// | - id: 2 | | |
+// | - groupid: 1 | | |
+// | - link: NONE | | |
+// | - fileoff: 4318 | ----. |
+// | - filelen: 618496 | | | |
+// | - fstype: Squashfs | | | |
+// | - parttype: System | | | |
+// | - content: Linux | | | |
+// |------------------------------------------------| | | |
+// | DESCR[2]: Sifsignature | | | |
+// | - Sifcommon | | | |
+// | - datatype: DATA_SIGNATURE | | | |
+// | - id: 3 | | | |
+// | - groupid: NONE | | | |
+// | - link: 2 | ------'
+// | - fileoff: 622814 | ------.
+// | - filelen: 644 | | | |
+// | - hashtype: SHA384 | | | |
+// | - entity: @ | | | |
+// |------------------------------------------------| <-' | |
+// | Definition file data | | |
+// | . | | |
+// | . | | |
+// | . | | |
+// |------------------------------------------------| <---' |
+// | File system partition image | |
+// | . | |
+// | . | |
+// | . | |
+// |------------------------------------------------| <-----'
+// | Signed verification data |
+// | . |
+// | . |
+// | . |
+// `================================================'
+package sif
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/google/uuid"
+)
+
+// SIF header constants and quantities.
+const (
+ hdrLaunchLen = 32 // len("#!/usr/bin/env... ")
+ hdrMagicLen = 10 // len("SIF_MAGIC")
+ hdrVersionLen = 3 // len("99")
+)
+
+var hdrMagic = [...]byte{'S', 'I', 'F', '_', 'M', 'A', 'G', 'I', 'C', '\x00'}
+
+// SpecVersion specifies a SIF specification version.
+type SpecVersion uint8
+
+func (v SpecVersion) String() string { return fmt.Sprintf("%02d", v) }
+
+// bytes returns the value of b, formatted for direct inclusion in a SIF header.
+func (v SpecVersion) bytes() [hdrVersionLen]byte {
+ var b [3]byte
+ copy(b[:], fmt.Sprintf("%02d", v))
+ return b
+}
+
+// SIF specification versions.
+const (
+ version01 SpecVersion = iota + 1
+)
+
+// CurrentVersion specifies the current SIF specification version.
+const CurrentVersion = version01
+
+const (
+ descrGroupMask = 0xf0000000 // groups start at that offset
+ descrEntityLen = 256 // len("Joe Bloe ...")
+ descrNameLen = 128 // descriptor name (string identifier)
+ descrMaxPrivLen = 384 // size reserved for descriptor specific data
+)
+
+// DataType represents the different SIF data object types stored in the image.
+type DataType int32
+
+// List of supported SIF data types.
+const (
+ DataDeffile DataType = iota + 0x4001 // definition file data object
+ DataEnvVar // environment variables data object
+ DataLabels // JSON labels data object
+ DataPartition // file system data object
+ DataSignature // signing/verification data object
+ DataGenericJSON // generic JSON meta-data
+ DataGeneric // generic / raw data
+ DataCryptoMessage // cryptographic message data object
+ DataSBOM // software bill of materials
+)
+
+// String returns a human-readable representation of t.
+func (t DataType) String() string {
+ switch t {
+ case DataDeffile:
+ return "Def.FILE"
+ case DataEnvVar:
+ return "Env.Vars"
+ case DataLabels:
+ return "JSON.Labels"
+ case DataPartition:
+ return "FS"
+ case DataSignature:
+ return "Signature"
+ case DataGenericJSON:
+ return "JSON.Generic"
+ case DataGeneric:
+ return "Generic/Raw"
+ case DataCryptoMessage:
+ return "Cryptographic Message"
+ case DataSBOM:
+ return "SBOM"
+ }
+ return "Unknown"
+}
+
+// FSType represents the different SIF file system types found in partition data objects.
+type FSType int32
+
+// List of supported file systems.
+const (
+ FsSquash FSType = iota + 1 // Squashfs file system, RDONLY
+ FsExt3 // EXT3 file system, RDWR (deprecated)
+ FsImmuObj // immutable data object archive
+ FsRaw // raw data
+ FsEncryptedSquashfs // Encrypted Squashfs file system, RDONLY
+)
+
+// String returns a human-readable representation of t.
+func (t FSType) String() string {
+ switch t {
+ case FsSquash:
+ return "Squashfs"
+ case FsExt3:
+ return "Ext3"
+ case FsImmuObj:
+ return "Archive"
+ case FsRaw:
+ return "Raw"
+ case FsEncryptedSquashfs:
+ return "Encrypted squashfs"
+ }
+ return "Unknown"
+}
+
+// PartType represents the different SIF container partition types (system and data).
+type PartType int32
+
+// List of supported partition types.
+const (
+ PartSystem PartType = iota + 1 // partition hosts an operating system
+ PartPrimSys // partition hosts the primary operating system
+ PartData // partition hosts data only
+ PartOverlay // partition hosts an overlay
+)
+
+// String returns a human-readable representation of t.
+func (t PartType) String() string {
+ switch t {
+ case PartSystem:
+ return "System"
+ case PartPrimSys:
+ return "*System"
+ case PartData:
+ return "Data"
+ case PartOverlay:
+ return "Overlay"
+ }
+ return "Unknown"
+}
+
+// hashType represents the different SIF hashing function types used to fingerprint data objects.
+type hashType int32
+
+// List of supported hash functions.
+const (
+ hashSHA256 hashType = iota + 1
+ hashSHA384
+ hashSHA512
+ hashBLAKE2S
+ hashBLAKE2B
+)
+
+// FormatType represents the different formats used to store cryptographic message objects.
+type FormatType int32
+
+// List of supported cryptographic message formats.
+const (
+ FormatOpenPGP FormatType = iota + 1
+ FormatPEM
+)
+
+// String returns a human-readable representation of t.
+func (t FormatType) String() string {
+ switch t {
+ case FormatOpenPGP:
+ return "OpenPGP"
+ case FormatPEM:
+ return "PEM"
+ }
+ return "Unknown"
+}
+
+// MessageType represents the different messages stored within cryptographic message objects.
+type MessageType int32
+
+// List of supported cryptographic message formats.
+const (
+ // openPGP formatted messages.
+ MessageClearSignature MessageType = 0x100
+
+ // PEM formatted messages.
+ MessageRSAOAEP MessageType = 0x200
+)
+
+// String returns a human-readable representation of t.
+func (t MessageType) String() string {
+ switch t {
+ case MessageClearSignature:
+ return "Clear Signature"
+ case MessageRSAOAEP:
+ return "RSA-OAEP"
+ }
+ return "Unknown"
+}
+
+// SBOMFormat represents the format used to store an SBOM object.
+type SBOMFormat int32
+
+// List of supported SBOM formats.
+const (
+ SBOMFormatCycloneDXJSON SBOMFormat = iota + 1 // CycloneDX (JSON)
+ SBOMFormatCycloneDXXML // CycloneDX (XML)
+ SBOMFormatGitHubJSON // GitHub dependency snapshot (JSON)
+ SBOMFormatSPDXJSON // SPDX (JSON)
+ SBOMFormatSPDXRDF // SPDX (RDF/xml)
+ SBOMFormatSPDXTagValue // SPDX (tag/value)
+ SBOMFormatSPDXYAML // SPDX (YAML)
+ SBOMFormatSyftJSON // Syft (JSON)
+)
+
+// String returns a human-readable representation of f.
+func (f SBOMFormat) String() string {
+ switch f {
+ case SBOMFormatCycloneDXJSON:
+ return "cyclonedx-json"
+ case SBOMFormatCycloneDXXML:
+ return "cyclonedx-xml"
+ case SBOMFormatGitHubJSON:
+ return "github-json"
+ case SBOMFormatSPDXJSON:
+ return "spdx-json"
+ case SBOMFormatSPDXRDF:
+ return "spdx-rdf"
+ case SBOMFormatSPDXTagValue:
+ return "spdx-tag-value"
+ case SBOMFormatSPDXYAML:
+ return "spdx-yaml"
+ case SBOMFormatSyftJSON:
+ return "syft-json"
+ }
+ return "unknown"
+}
+
+// header describes a loaded SIF file.
+type header struct {
+ LaunchScript [hdrLaunchLen]byte
+
+ Magic [hdrMagicLen]byte
+ Version [hdrVersionLen]byte
+ Arch archType
+ ID uuid.UUID
+
+ CreatedAt int64
+ ModifiedAt int64
+
+ DescriptorsFree int64
+ DescriptorsTotal int64
+ DescriptorsOffset int64
+ DescriptorsSize int64
+ DataOffset int64
+ DataSize int64
+}
+
+// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from h.
+func (h header) GetIntegrityReader() io.Reader {
+ return io.MultiReader(
+ bytes.NewReader(h.LaunchScript[:]),
+ bytes.NewReader(h.Magic[:]),
+ bytes.NewReader(h.Version[:]),
+ bytes.NewReader(h.ID[:]),
+ )
+}
+
+// ReadWriter describes the interface required to read and write SIF images.
+type ReadWriter interface {
+ io.ReaderAt
+ io.WriteSeeker
+ Truncate(int64) error
+}
+
+// FileImage describes the representation of a SIF file in memory.
+type FileImage struct {
+ rw ReadWriter // Backing storage for image.
+
+ h header // Raw global header from image.
+ rds []rawDescriptor // Raw descriptors from image.
+
+ closeOnUnload bool // Close rw on Unload.
+ minIDs map[uint32]uint32 // Minimum object IDs for each group ID.
+}
+
+// LaunchScript returns the image launch script.
+func (f *FileImage) LaunchScript() string {
+ return string(bytes.TrimRight(f.h.LaunchScript[:], "\x00"))
+}
+
+// Version returns the SIF specification version of the image.
+func (f *FileImage) Version() string {
+ return string(bytes.TrimRight(f.h.Version[:], "\x00"))
+}
+
+// PrimaryArch returns the primary CPU architecture of the image, or "unknown" if the primary CPU
+// architecture cannot be determined.
+func (f *FileImage) PrimaryArch() string { return f.h.Arch.GoArch() }
+
+// ID returns the ID of the image.
+func (f *FileImage) ID() string { return f.h.ID.String() }
+
+// CreatedAt returns the creation time of the image.
+func (f *FileImage) CreatedAt() time.Time { return time.Unix(f.h.CreatedAt, 0) }
+
+// ModifiedAt returns the last modification time of the image.
+func (f *FileImage) ModifiedAt() time.Time { return time.Unix(f.h.ModifiedAt, 0) }
+
+// DescriptorsFree returns the number of free descriptors in the image.
+func (f *FileImage) DescriptorsFree() int64 { return f.h.DescriptorsFree }
+
+// DescriptorsTotal returns the total number of descriptors in the image.
+func (f *FileImage) DescriptorsTotal() int64 { return f.h.DescriptorsTotal }
+
+// DescriptorsOffset returns the offset (in bytes) of the descriptors section in the image.
+func (f *FileImage) DescriptorsOffset() int64 { return f.h.DescriptorsOffset }
+
+// DescriptorsSize returns the size (in bytes) of the descriptors section in the image.
+func (f *FileImage) DescriptorsSize() int64 { return f.h.DescriptorsSize }
+
+// DataOffset returns the offset (in bytes) of the data section in the image.
+func (f *FileImage) DataOffset() int64 { return f.h.DataOffset }
+
+// DataSize returns the size (in bytes) of the data section in the image.
+func (f *FileImage) DataSize() int64 { return f.h.DataSize }
+
+// GetHeaderIntegrityReader returns an io.Reader that reads the integrity-protected fields from the
+// header of the image.
+func (f *FileImage) GetHeaderIntegrityReader() io.Reader {
+ return f.h.GetIntegrityReader()
+}
diff --git a/vendor/github.com/bits-and-blooms/bitset/LICENSE b/vendor/github.com/theupdateframework/go-tuf/LICENSE
similarity index 91%
rename from vendor/github.com/bits-and-blooms/bitset/LICENSE
rename to vendor/github.com/theupdateframework/go-tuf/LICENSE
index 59cab8a939b..38163dd4bd1 100644
--- a/vendor/github.com/bits-and-blooms/bitset/LICENSE
+++ b/vendor/github.com/theupdateframework/go-tuf/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2014 Will Fitzgerald. All rights reserved.
+Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Prime Directive, Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go b/vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go
new file mode 100644
index 00000000000..4d174d61f93
--- /dev/null
+++ b/vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go
@@ -0,0 +1,226 @@
+// Package encrypted provides a simple, secure system for encrypting data
+// symmetrically with a passphrase.
+//
+// It uses scrypt derive a key from the passphrase and the NaCl secret box
+// cipher for authenticated encryption.
+package encrypted
+
+import (
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+
+ "golang.org/x/crypto/nacl/secretbox"
+ "golang.org/x/crypto/scrypt"
+)
+
+const saltSize = 32
+
+const (
+ boxKeySize = 32
+ boxNonceSize = 24
+)
+
+const (
+ // N parameter was chosen to be ~100ms of work using the default implementation
+ // on the 2.3GHz Core i7 Haswell processor in a late-2013 Apple Retina Macbook
+ // Pro (it takes ~113ms).
+ scryptN = 32768
+ scryptR = 8
+ scryptP = 1
+)
+
+const (
+ nameScrypt = "scrypt"
+ nameSecretBox = "nacl/secretbox"
+)
+
+type data struct {
+ KDF scryptKDF `json:"kdf"`
+ Cipher secretBoxCipher `json:"cipher"`
+ Ciphertext []byte `json:"ciphertext"`
+}
+
+type scryptParams struct {
+ N int `json:"N"`
+ R int `json:"r"`
+ P int `json:"p"`
+}
+
+func newScryptKDF() (scryptKDF, error) {
+ salt := make([]byte, saltSize)
+ if err := fillRandom(salt); err != nil {
+ return scryptKDF{}, err
+ }
+ return scryptKDF{
+ Name: nameScrypt,
+ Params: scryptParams{
+ N: scryptN,
+ R: scryptR,
+ P: scryptP,
+ },
+ Salt: salt,
+ }, nil
+}
+
+type scryptKDF struct {
+ Name string `json:"name"`
+ Params scryptParams `json:"params"`
+ Salt []byte `json:"salt"`
+}
+
+func (s *scryptKDF) Key(passphrase []byte) ([]byte, error) {
+ return scrypt.Key(passphrase, s.Salt, s.Params.N, s.Params.R, s.Params.P, boxKeySize)
+}
+
+// CheckParams checks that the encoded KDF parameters are what we expect them to
+// be. If we do not do this, an attacker could cause a DoS by tampering with
+// them.
+func (s *scryptKDF) CheckParams() error {
+ if s.Params.N != scryptN || s.Params.R != scryptR || s.Params.P != scryptP {
+ return errors.New("encrypted: unexpected kdf parameters")
+ }
+ return nil
+}
+
+func newSecretBoxCipher() (secretBoxCipher, error) {
+ nonce := make([]byte, boxNonceSize)
+ if err := fillRandom(nonce); err != nil {
+ return secretBoxCipher{}, err
+ }
+ return secretBoxCipher{
+ Name: nameSecretBox,
+ Nonce: nonce,
+ }, nil
+}
+
+type secretBoxCipher struct {
+ Name string `json:"name"`
+ Nonce []byte `json:"nonce"`
+
+ encrypted bool
+}
+
+func (s *secretBoxCipher) Encrypt(plaintext, key []byte) []byte {
+ var keyBytes [boxKeySize]byte
+ var nonceBytes [boxNonceSize]byte
+
+ if len(key) != len(keyBytes) {
+ panic("incorrect key size")
+ }
+ if len(s.Nonce) != len(nonceBytes) {
+ panic("incorrect nonce size")
+ }
+
+ copy(keyBytes[:], key)
+ copy(nonceBytes[:], s.Nonce)
+
+ // ensure that we don't re-use nonces
+ if s.encrypted {
+ panic("Encrypt must only be called once for each cipher instance")
+ }
+ s.encrypted = true
+
+ return secretbox.Seal(nil, plaintext, &nonceBytes, &keyBytes)
+}
+
+func (s *secretBoxCipher) Decrypt(ciphertext, key []byte) ([]byte, error) {
+ var keyBytes [boxKeySize]byte
+ var nonceBytes [boxNonceSize]byte
+
+ if len(key) != len(keyBytes) {
+ panic("incorrect key size")
+ }
+ if len(s.Nonce) != len(nonceBytes) {
+ // return an error instead of panicking since the nonce is user input
+ return nil, errors.New("encrypted: incorrect nonce size")
+ }
+
+ copy(keyBytes[:], key)
+ copy(nonceBytes[:], s.Nonce)
+
+ res, ok := secretbox.Open(nil, ciphertext, &nonceBytes, &keyBytes)
+ if !ok {
+ return nil, errors.New("encrypted: decryption failed")
+ }
+ return res, nil
+}
+
+// Encrypt takes a passphrase and plaintext, and returns a JSON object
+// containing ciphertext and the details necessary to decrypt it.
+func Encrypt(plaintext, passphrase []byte) ([]byte, error) {
+ k, err := newScryptKDF()
+ if err != nil {
+ return nil, err
+ }
+ key, err := k.Key(passphrase)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := newSecretBoxCipher()
+ if err != nil {
+ return nil, err
+ }
+
+ data := &data{
+ KDF: k,
+ Cipher: c,
+ }
+ data.Ciphertext = c.Encrypt(plaintext, key)
+
+ return json.Marshal(data)
+}
+
+// Marshal encrypts the JSON encoding of v using passphrase.
+func Marshal(v interface{}, passphrase []byte) ([]byte, error) {
+ data, err := json.MarshalIndent(v, "", "\t")
+ if err != nil {
+ return nil, err
+ }
+ return Encrypt(data, passphrase)
+}
+
+// Decrypt takes a JSON-encoded ciphertext object encrypted using Encrypt and
+// tries to decrypt it using passphrase. If successful, it returns the
+// plaintext.
+func Decrypt(ciphertext, passphrase []byte) ([]byte, error) {
+ data := &data{}
+ if err := json.Unmarshal(ciphertext, data); err != nil {
+ return nil, err
+ }
+
+ if data.KDF.Name != nameScrypt {
+ return nil, fmt.Errorf("encrypted: unknown kdf name %q", data.KDF.Name)
+ }
+ if data.Cipher.Name != nameSecretBox {
+ return nil, fmt.Errorf("encrypted: unknown cipher name %q", data.Cipher.Name)
+ }
+ if err := data.KDF.CheckParams(); err != nil {
+ return nil, err
+ }
+
+ key, err := data.KDF.Key(passphrase)
+ if err != nil {
+ return nil, err
+ }
+
+ return data.Cipher.Decrypt(data.Ciphertext, key)
+}
+
+// Unmarshal decrypts the data using passphrase and unmarshals the resulting
+// plaintext into the value pointed to by v.
+func Unmarshal(data []byte, v interface{}, passphrase []byte) error {
+ decrypted, err := Decrypt(data, passphrase)
+ if err != nil {
+ return err
+ }
+ return json.Unmarshal(decrypted, v)
+}
+
+func fillRandom(b []byte) error {
+ _, err := io.ReadFull(rand.Reader, b)
+ return err
+}
diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/titanous/rocacheck/LICENSE
similarity index 91%
rename from vendor/github.com/mattn/go-colorable/LICENSE
rename to vendor/github.com/titanous/rocacheck/LICENSE
index 91b5cef30eb..7bdce481fa2 100644
--- a/vendor/github.com/mattn/go-colorable/LICENSE
+++ b/vendor/github.com/titanous/rocacheck/LICENSE
@@ -1,6 +1,7 @@
-The MIT License (MIT)
+MIT License
-Copyright (c) 2016 Yasuhiro Matsumoto
+Copyright (c) 2017, Jonathan Rudenberg
+Copyright (c) 2017, CRoCS, EnigmaBridge Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/github.com/titanous/rocacheck/README.md b/vendor/github.com/titanous/rocacheck/README.md
new file mode 100644
index 00000000000..b8e765ea9c4
--- /dev/null
+++ b/vendor/github.com/titanous/rocacheck/README.md
@@ -0,0 +1,7 @@
+# rocacheck [](https://godoc.org/github.com/titanous/rocacheck)
+
+Package rocacheck is a Go implementation of the [key fingerprint
+algorithm](https://github.com/crocs-muni/roca) that checks if an RSA key was
+generated by broken Infineon code and is vulnerable to factorization via the
+[Return of Coppersmith's Attack
+(ROCA)](https://crocs.fi.muni.cz/public/papers/rsa_ccs17) / CVE-2017-15361.
diff --git a/vendor/github.com/titanous/rocacheck/rocacheck.go b/vendor/github.com/titanous/rocacheck/rocacheck.go
new file mode 100644
index 00000000000..e813579bb87
--- /dev/null
+++ b/vendor/github.com/titanous/rocacheck/rocacheck.go
@@ -0,0 +1,52 @@
+// Package rocacheck checks if a key was generated by broken Infineon code and
+// is vulnerable to factorization via the Return of Coppersmith's Attack (ROCA)
+// / CVE-2017-15361.
+package rocacheck
+
+import (
+ "crypto/rsa"
+ "math/big"
+)
+
+type test struct {
+ Prime *big.Int
+ Fingerprints map[int64]struct{}
+}
+
+var tests = make([]test, 17)
+
+func init() {
+ bigOne := big.NewInt(1)
+ n := &big.Int{}
+ // relations table from https://github.com/crocs-muni/roca/pull/40
+ for i, r := range [][2]int64{
+ {2, 11}, {6, 13}, {8, 17}, {9, 19}, {3, 37}, {26, 53}, {20, 61},
+ {35, 71}, {24, 73}, {13, 79}, {6, 97}, {51, 103}, {53, 107},
+ {54, 109}, {42, 127}, {50, 151}, {78, 157},
+ } {
+ fps := make(map[int64]struct{})
+ bp := big.NewInt(r[1])
+ br := big.NewInt(r[0])
+ for j := int64(0); j < r[1]; j++ {
+ if n.Exp(big.NewInt(j), br, bp).Cmp(bigOne) == 0 {
+ fps[j] = struct{}{}
+ }
+ }
+ tests[i] = test{
+ Prime: big.NewInt(r[1]),
+ Fingerprints: fps,
+ }
+ }
+}
+
+// IsWeak returns true if a RSA public key is vulnerable to Return of
+// Coppersmith's Attack (ROCA).
+func IsWeak(k *rsa.PublicKey) bool {
+ tmp := &big.Int{}
+ for _, t := range tests {
+ if _, ok := t.Fingerprints[tmp.Mod(k.N, t.Prime).Int64()]; !ok {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/vbauerster/mpb/v7/.travis.yml b/vendor/github.com/vbauerster/mpb/v7/.travis.yml
deleted file mode 100644
index 9a203a67d2a..00000000000
--- a/vendor/github.com/vbauerster/mpb/v7/.travis.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-language: go
-arch:
- - amd64
- - ppc64le
-
-go:
- - 1.14.x
-
-script:
- - go test -race ./...
- - for i in _examples/*/; do go build $i/*.go || exit 1; done
diff --git a/vendor/github.com/vbauerster/mpb/v7/README.md b/vendor/github.com/vbauerster/mpb/v7/README.md
index 90d4fe639ca..413f9e1dbf0 100644
--- a/vendor/github.com/vbauerster/mpb/v7/README.md
+++ b/vendor/github.com/vbauerster/mpb/v7/README.md
@@ -1,8 +1,7 @@
# Multi Progress Bar
[](https://pkg.go.dev/github.com/vbauerster/mpb/v7)
-[](https://travis-ci.org/vbauerster/mpb)
-[](https://goreportcard.com/report/github.com/vbauerster/mpb)
+[](https://github.com/vbauerster/mpb/actions/workflows/test.yml)
[](https://www.paypal.me/vbauerster)
**mpb** is a Go lib for rendering progress bars in terminal applications.
@@ -37,10 +36,10 @@ func main() {
total := 100
name := "Single Bar:"
- // adding a single bar, which will inherit container's width
- bar := p.Add(int64(total),
- // progress bar filler with customized style
- mpb.NewBarFiller(mpb.BarStyle().Lbound("╢").Filler("▌").Tip("▌").Padding("░").Rbound("╟")),
+ // create a single bar, which will inherit container's width
+ bar := p.New(int64(total),
+ // BarFillerBuilder with custom style
+ mpb.BarStyle().Lbound("╢").Filler("▌").Tip("▌").Padding("░").Rbound("╟"),
mpb.PrependDecorators(
// display our name with one space on the right
decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}),
@@ -66,7 +65,7 @@ func main() {
```go
var wg sync.WaitGroup
- // passed &wg will be accounted at p.Wait() call
+ // passed wg will be accounted at p.Wait() call
p := mpb.New(mpb.WithWaitGroup(&wg))
total, numBars := 100, 3
wg.Add(numBars)
@@ -104,7 +103,7 @@ func main() {
}
}()
}
- // Waiting for passed &wg and for all bars to complete and flush
+ // wait for passed wg and for all bars to complete and flush
p.Wait()
```
diff --git a/vendor/github.com/vbauerster/mpb/v7/bar.go b/vendor/github.com/vbauerster/mpb/v7/bar.go
index ca191cf3910..7db860e30d6 100644
--- a/vendor/github.com/vbauerster/mpb/v7/bar.go
+++ b/vendor/github.com/vbauerster/mpb/v7/bar.go
@@ -5,9 +5,9 @@ import (
"context"
"fmt"
"io"
- "log"
"runtime/debug"
"strings"
+ "sync"
"time"
"github.com/acarl005/stripansi"
@@ -17,32 +17,21 @@ import (
// Bar represents a progress bar.
type Bar struct {
- priority int // used by heap
- index int // used by heap
-
- toShutdown bool
- toDrop bool
- noPop bool
- hasEwmaDecorators bool
- operateState chan func(*bState)
- frameCh chan *frame
-
- // cancel is called either by user or on complete event
- cancel func()
- // done is closed after cacheState is assigned
- done chan struct{}
- // cacheState is populated, right after close(b.done)
- cacheState *bState
-
+ index int // used by heap
+ priority int // used by heap
+ hasEwma bool
+ frameCh chan *renderFrame
+ operateState chan func(*bState)
+ done chan struct{}
container *Progress
- dlogger *log.Logger
+ bs *bState
+ cancel func()
recoveredPanic interface{}
}
-type extenderFunc func(in io.Reader, reqWidth int, st decor.Statistics) (out io.Reader, lines int)
+type extenderFunc func(rows []io.Reader, width int, stat decor.Statistics) []io.Reader
-// bState is actual bar state. It gets passed to *Bar.serve(...) monitor
-// goroutine.
+// bState is actual bar's state.
type bState struct {
id int
priority int
@@ -50,11 +39,10 @@ type bState struct {
total int64
current int64
refill int64
- lastN int64
- iterated bool
+ lastIncrement int64
trimSpace bool
completed bool
- completeFlushed bool
+ aborted bool
triggerComplete bool
dropOnComplete bool
noPop bool
@@ -63,36 +51,34 @@ type bState struct {
averageDecorators []decor.AverageDecorator
ewmaDecorators []decor.EwmaDecorator
shutdownListeners []decor.ShutdownListener
- bufP, bufB, bufA *bytes.Buffer
+ buffers [3]*bytes.Buffer
filler BarFiller
middleware func(BarFiller) BarFiller
extender extenderFunc
+ debugOut io.Writer
- // runningBar is a key for *pState.parkedBars
- runningBar *Bar
-
- debugOut io.Writer
+ wait struct {
+ bar *Bar // key for (*pState).queueBars
+ sync bool
+ }
}
-type frame struct {
- reader io.Reader
- lines int
+type renderFrame struct {
+ rows []io.Reader
+ shutdown int
}
func newBar(container *Progress, bs *bState) *Bar {
- logPrefix := fmt.Sprintf("%sbar#%02d ", container.dlogger.Prefix(), bs.id)
ctx, cancel := context.WithCancel(container.ctx)
bar := &Bar{
- container: container,
priority: bs.priority,
- toDrop: bs.dropOnComplete,
- noPop: bs.noPop,
+ hasEwma: len(bs.ewmaDecorators) != 0,
+ frameCh: make(chan *renderFrame, 1),
operateState: make(chan func(*bState)),
- frameCh: make(chan *frame, 1),
done: make(chan struct{}),
+ container: container,
cancel: cancel,
- dlogger: log.New(bs.debugOut, logPrefix, log.Lshortfile),
}
go bar.serve(ctx, bs)
@@ -100,12 +86,20 @@ func newBar(container *Progress, bs *bState) *Bar {
}
// ProxyReader wraps r with metrics required for progress tracking.
-// Panics if r is nil.
+// If r is 'unknown total/size' reader it's mandatory to call
+// (*Bar).SetTotal(-1, true) method after (Reader).Read returns io.EOF.
+// Panics if r is nil. If bar is already completed or aborted, returns
+// nil.
func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser {
if r == nil {
panic("expected non nil io.Reader")
}
- return newProxyReader(r, b)
+ select {
+ case <-b.done:
+ return nil
+ default:
+ return b.newProxyReader(r)
+ }
}
// ID returs id of the bar.
@@ -115,18 +109,18 @@ func (b *Bar) ID() int {
case b.operateState <- func(s *bState) { result <- s.id }:
return <-result
case <-b.done:
- return b.cacheState.id
+ return b.bs.id
}
}
-// Current returns bar's current number, in other words sum of all increments.
+// Current returns bar's current value, in other words sum of all increments.
func (b *Bar) Current() int64 {
result := make(chan int64)
select {
case b.operateState <- func(s *bState) { result <- s.current }:
return <-result
case <-b.done:
- return b.cacheState.current
+ return b.bs.current
}
}
@@ -145,7 +139,7 @@ func (b *Bar) SetRefill(amount int64) {
// TraverseDecorators traverses all available decorators and calls cb func on each.
func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) {
- done := make(chan struct{})
+ sync := make(chan struct{})
select {
case b.operateState <- func(s *bState) {
for _, decorators := range [...][]decor.Decorator{
@@ -156,28 +150,56 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) {
cb(extractBaseDecorator(d))
}
}
- close(done)
+ close(sync)
}:
- <-done
+ <-sync
case <-b.done:
}
}
-// SetTotal sets total dynamically.
-// If total is less than or equal to zero it takes progress' current value.
-func (b *Bar) SetTotal(total int64, triggerComplete bool) {
+// EnableTriggerComplete enables triggering complete event. It's
+// effective only for bar which was constructed with `total <= 0` and
+// after total has been set with (*Bar).SetTotal(int64, false). If bar
+// has been incremented to the total, complete event is triggered right
+// away.
+func (b *Bar) EnableTriggerComplete() {
select {
case b.operateState <- func(s *bState) {
- s.triggerComplete = triggerComplete
- if total <= 0 {
+ if s.triggerComplete || s.total <= 0 {
+ return
+ }
+ if s.current >= s.total {
+ s.current = s.total
+ s.completed = true
+ go b.forceRefresh()
+ } else {
+ s.triggerComplete = true
+ }
+ }:
+ case <-b.done:
+ }
+}
+
+// SetTotal sets total to an arbitrary value. It's effective only for
+// bar which was constructed with `total <= 0`. Setting total to negative
+// value is equivalent to (*Bar).SetTotal((*Bar).Current(), bool).
+// If triggerCompleteNow is true, total value is set to current and
+// complete event is triggered right away.
+func (b *Bar) SetTotal(total int64, triggerCompleteNow bool) {
+ select {
+ case b.operateState <- func(s *bState) {
+ if s.triggerComplete {
+ return
+ }
+ if total < 0 {
s.total = s.current
} else {
s.total = total
}
- if s.triggerComplete && !s.completed {
+ if triggerCompleteNow {
s.current = s.total
s.completed = true
- go b.forceRefreshIfLastUncompleted()
+ go b.forceRefresh()
}
}:
case <-b.done:
@@ -189,13 +211,12 @@ func (b *Bar) SetTotal(total int64, triggerComplete bool) {
func (b *Bar) SetCurrent(current int64) {
select {
case b.operateState <- func(s *bState) {
- s.iterated = true
- s.lastN = current - s.current
+ s.lastIncrement = current - s.current
s.current = current
if s.triggerComplete && s.current >= s.total {
s.current = s.total
s.completed = true
- go b.forceRefreshIfLastUncompleted()
+ go b.forceRefresh()
}
}:
case <-b.done:
@@ -214,15 +235,17 @@ func (b *Bar) IncrBy(n int) {
// IncrInt64 increments progress by amount of n.
func (b *Bar) IncrInt64(n int64) {
+ if n <= 0 {
+ return
+ }
select {
case b.operateState <- func(s *bState) {
- s.iterated = true
- s.lastN = n
+ s.lastIncrement = n
s.current += n
if s.triggerComplete && s.current >= s.total {
s.current = s.total
s.completed = true
- go b.forceRefreshIfLastUncompleted()
+ go b.forceRefresh()
}
}:
case <-b.done:
@@ -236,10 +259,18 @@ func (b *Bar) IncrInt64(n int64) {
func (b *Bar) DecoratorEwmaUpdate(dur time.Duration) {
select {
case b.operateState <- func(s *bState) {
- ewmaIterationUpdate(false, s, dur)
+ if s.lastIncrement > 0 {
+ s.decoratorEwmaUpdate(dur)
+ s.lastIncrement = 0
+ } else {
+ panic("increment required before ewma iteration update")
+ }
}:
case <-b.done:
- ewmaIterationUpdate(true, b.cacheState, dur)
+ if b.bs.lastIncrement > 0 {
+ b.bs.decoratorEwmaUpdate(dur)
+ b.bs.lastIncrement = 0
+ }
}
}
@@ -249,9 +280,7 @@ func (b *Bar) DecoratorEwmaUpdate(dur time.Duration) {
func (b *Bar) DecoratorAverageAdjust(start time.Time) {
select {
case b.operateState <- func(s *bState) {
- for _, d := range s.averageDecorators {
- d.AverageAdjust(start)
- }
+ s.decoratorAverageAdjust(start)
}:
case <-b.done:
}
@@ -266,38 +295,33 @@ func (b *Bar) SetPriority(priority int) {
// Abort interrupts bar's running goroutine. Abort won't be engaged
// if bar is already in complete state. If drop is true bar will be
-// removed as well.
+// removed as well. To make sure that bar has been removed call
+// (*Bar).Wait method.
func (b *Bar) Abort(drop bool) {
select {
case b.operateState <- func(s *bState) {
- if s.completed == true {
+ if s.completed || s.aborted {
return
}
- if drop {
- b.container.dropBar(b)
- b.cancel()
- return
- }
- go func() {
- var uncompleted int
- b.container.traverseBars(func(bar *Bar) bool {
- if b != bar && !bar.Completed() {
- uncompleted++
- return false
- }
- return true
- })
- if uncompleted == 0 {
- b.container.refreshCh <- time.Now()
- }
- b.cancel()
- }()
+ s.aborted = true
+ s.dropOnComplete = drop
+ go b.forceRefresh()
}:
- <-b.done
case <-b.done:
}
}
+// Aborted reports whether the bar is in aborted state.
+func (b *Bar) Aborted() bool {
+ result := make(chan bool)
+ select {
+ case b.operateState <- func(s *bState) { result <- s.aborted }:
+ return <-result
+ case <-b.done:
+ return b.bs.aborted
+ }
+}
+
// Completed reports whether the bar is in completed state.
func (b *Bar) Completed() bool {
result := make(chan bool)
@@ -305,22 +329,28 @@ func (b *Bar) Completed() bool {
case b.operateState <- func(s *bState) { result <- s.completed }:
return <-result
case <-b.done:
- return true
+ return b.bs.completed
}
}
-func (b *Bar) serve(ctx context.Context, s *bState) {
+// Wait blocks until bar is completed or aborted.
+func (b *Bar) Wait() {
+ <-b.done
+}
+
+func (b *Bar) serve(ctx context.Context, bs *bState) {
defer b.container.bwg.Done()
+ if bs.wait.bar != nil && bs.wait.sync {
+ bs.wait.bar.Wait()
+ }
for {
select {
case op := <-b.operateState:
- op(s)
+ op(bs)
case <-ctx.Done():
- // Notifying decorators about shutdown event
- for _, sl := range s.shutdownListeners {
- sl.Shutdown()
- }
- b.cacheState = s
+ bs.aborted = !bs.completed
+ bs.decoratorShutdownNotify()
+ b.bs = bs
close(b.done)
return
}
@@ -330,76 +360,72 @@ func (b *Bar) serve(ctx context.Context, s *bState) {
func (b *Bar) render(tw int) {
select {
case b.operateState <- func(s *bState) {
+ var rows []io.Reader
stat := newStatistics(tw, s)
defer func() {
// recovering if user defined decorator panics for example
if p := recover(); p != nil {
- if b.recoveredPanic == nil {
- s.extender = makePanicExtender(p)
- b.toShutdown = !b.toShutdown
- b.recoveredPanic = p
+ if s.debugOut != nil {
+ for _, fn := range []func() (int, error){
+ func() (int, error) {
+ return fmt.Fprintln(s.debugOut, p)
+ },
+ func() (int, error) {
+ return s.debugOut.Write(debug.Stack())
+ },
+ } {
+ if _, err := fn(); err != nil {
+ panic(err)
+ }
+ }
}
- reader, lines := s.extender(nil, s.reqWidth, stat)
- b.frameCh <- &frame{reader, lines + 1}
- b.dlogger.Println(p)
+ s.aborted = !s.completed
+ s.extender = makePanicExtender(p)
+ b.recoveredPanic = p
+ }
+ if fn := s.extender; fn != nil {
+ rows = fn(rows, s.reqWidth, stat)
+ }
+ frame := &renderFrame{
+ rows: rows,
}
- s.completeFlushed = s.completed
+ if s.completed || s.aborted {
+ b.cancel()
+ frame.shutdown++
+ }
+ b.frameCh <- frame
}()
- reader, lines := s.extender(s.draw(stat), s.reqWidth, stat)
- b.toShutdown = s.completed && !s.completeFlushed
- b.frameCh <- &frame{reader, lines + 1}
+ if b.recoveredPanic == nil {
+ rows = append(rows, s.draw(stat))
+ }
}:
case <-b.done:
- s := b.cacheState
- stat := newStatistics(tw, s)
- var r io.Reader
+ var rows []io.Reader
+ s, stat := b.bs, newStatistics(tw, b.bs)
if b.recoveredPanic == nil {
- r = s.draw(stat)
+ rows = append(rows, s.draw(stat))
}
- reader, lines := s.extender(r, s.reqWidth, stat)
- b.frameCh <- &frame{reader, lines + 1}
- }
-}
-
-func (b *Bar) subscribeDecorators() {
- var averageDecorators []decor.AverageDecorator
- var ewmaDecorators []decor.EwmaDecorator
- var shutdownListeners []decor.ShutdownListener
- b.TraverseDecorators(func(d decor.Decorator) {
- if d, ok := d.(decor.AverageDecorator); ok {
- averageDecorators = append(averageDecorators, d)
+ if fn := s.extender; fn != nil {
+ rows = fn(rows, s.reqWidth, stat)
}
- if d, ok := d.(decor.EwmaDecorator); ok {
- ewmaDecorators = append(ewmaDecorators, d)
+ frame := &renderFrame{
+ rows: rows,
}
- if d, ok := d.(decor.ShutdownListener); ok {
- shutdownListeners = append(shutdownListeners, d)
- }
- })
- b.hasEwmaDecorators = len(ewmaDecorators) != 0
- select {
- case b.operateState <- func(s *bState) {
- s.averageDecorators = averageDecorators
- s.ewmaDecorators = ewmaDecorators
- s.shutdownListeners = shutdownListeners
- }:
- case <-b.done:
+ b.frameCh <- frame
}
}
-func (b *Bar) forceRefreshIfLastUncompleted() {
- var uncompleted int
+func (b *Bar) forceRefresh() {
+ var anyOtherRunning bool
b.container.traverseBars(func(bar *Bar) bool {
- if b != bar && !bar.Completed() {
- uncompleted++
- return false
- }
- return true
+ anyOtherRunning = b != bar && bar.isRunning()
+ return !anyOtherRunning
})
- if uncompleted == 0 {
+ if !anyOtherRunning {
for {
select {
case b.container.refreshCh <- time.Now():
+ time.Sleep(prr)
case <-b.done:
return
}
@@ -407,51 +433,64 @@ func (b *Bar) forceRefreshIfLastUncompleted() {
}
}
+func (b *Bar) isRunning() bool {
+ result := make(chan bool)
+ select {
+ case b.operateState <- func(s *bState) {
+ result <- !s.completed && !s.aborted
+ }:
+ return <-result
+ case <-b.done:
+ return false
+ }
+}
+
func (b *Bar) wSyncTable() [][]chan int {
result := make(chan [][]chan int)
select {
case b.operateState <- func(s *bState) { result <- s.wSyncTable() }:
return <-result
case <-b.done:
- return b.cacheState.wSyncTable()
+ return b.bs.wSyncTable()
}
}
func (s *bState) draw(stat decor.Statistics) io.Reader {
- nlr := strings.NewReader("\n")
+ bufP, bufB, bufA := s.buffers[0], s.buffers[1], s.buffers[2]
+ nlr := bytes.NewReader([]byte("\n"))
tw := stat.AvailableWidth
for _, d := range s.pDecorators {
str := d.Decor(stat)
stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str))
- s.bufP.WriteString(str)
+ bufP.WriteString(str)
}
if stat.AvailableWidth < 1 {
- trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufP.String()), tw, "…"))
- s.bufP.Reset()
+ trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufP.String()), tw, "…"))
+ bufP.Reset()
return io.MultiReader(trunc, nlr)
}
if !s.trimSpace && stat.AvailableWidth > 1 {
stat.AvailableWidth -= 2
- s.bufB.WriteByte(' ')
- defer s.bufB.WriteByte(' ')
+ bufB.WriteByte(' ')
+ defer bufB.WriteByte(' ')
}
tw = stat.AvailableWidth
for _, d := range s.aDecorators {
str := d.Decor(stat)
stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str))
- s.bufA.WriteString(str)
+ bufA.WriteString(str)
}
if stat.AvailableWidth < 1 {
- trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufA.String()), tw, "…"))
- s.bufA.Reset()
- return io.MultiReader(s.bufP, s.bufB, trunc, nlr)
+ trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufA.String()), tw, "…"))
+ bufA.Reset()
+ return io.MultiReader(bufP, bufB, trunc, nlr)
}
- s.filler.Fill(s.bufB, s.reqWidth, stat)
+ s.filler.Fill(bufB, s.reqWidth, stat)
- return io.MultiReader(s.bufP, s.bufB, s.bufA, nlr)
+ return io.MultiReader(bufP, bufB, bufA, nlr)
}
func (s *bState) wSyncTable() [][]chan int {
@@ -476,14 +515,86 @@ func (s *bState) wSyncTable() [][]chan int {
return table
}
+func (s *bState) subscribeDecorators() {
+ for _, decorators := range [...][]decor.Decorator{
+ s.pDecorators,
+ s.aDecorators,
+ } {
+ for _, d := range decorators {
+ d = extractBaseDecorator(d)
+ if d, ok := d.(decor.AverageDecorator); ok {
+ s.averageDecorators = append(s.averageDecorators, d)
+ }
+ if d, ok := d.(decor.EwmaDecorator); ok {
+ s.ewmaDecorators = append(s.ewmaDecorators, d)
+ }
+ if d, ok := d.(decor.ShutdownListener); ok {
+ s.shutdownListeners = append(s.shutdownListeners, d)
+ }
+ }
+ }
+}
+
+func (s bState) decoratorEwmaUpdate(dur time.Duration) {
+ wg := new(sync.WaitGroup)
+ for i := 0; i < len(s.ewmaDecorators); i++ {
+ switch d := s.ewmaDecorators[i]; i {
+ case len(s.ewmaDecorators) - 1:
+ d.EwmaUpdate(s.lastIncrement, dur)
+ default:
+ wg.Add(1)
+ go func() {
+ d.EwmaUpdate(s.lastIncrement, dur)
+ wg.Done()
+ }()
+ }
+ }
+ wg.Wait()
+}
+
+func (s bState) decoratorAverageAdjust(start time.Time) {
+ wg := new(sync.WaitGroup)
+ for i := 0; i < len(s.averageDecorators); i++ {
+ switch d := s.averageDecorators[i]; i {
+ case len(s.averageDecorators) - 1:
+ d.AverageAdjust(start)
+ default:
+ wg.Add(1)
+ go func() {
+ d.AverageAdjust(start)
+ wg.Done()
+ }()
+ }
+ }
+ wg.Wait()
+}
+
+func (s bState) decoratorShutdownNotify() {
+ wg := new(sync.WaitGroup)
+ for i := 0; i < len(s.shutdownListeners); i++ {
+ switch d := s.shutdownListeners[i]; i {
+ case len(s.shutdownListeners) - 1:
+ d.Shutdown()
+ default:
+ wg.Add(1)
+ go func() {
+ d.Shutdown()
+ wg.Done()
+ }()
+ }
+ }
+ wg.Wait()
+}
+
func newStatistics(tw int, s *bState) decor.Statistics {
return decor.Statistics{
- ID: s.id,
AvailableWidth: tw,
+ ID: s.id,
Total: s.total,
Current: s.current,
Refill: s.refill,
- Completed: s.completeFlushed,
+ Completed: s.completed,
+ Aborted: s.aborted,
}
}
@@ -494,27 +605,13 @@ func extractBaseDecorator(d decor.Decorator) decor.Decorator {
return d
}
-func ewmaIterationUpdate(done bool, s *bState, dur time.Duration) {
- if !done && !s.iterated {
- panic("increment required before ewma iteration update")
- } else {
- s.iterated = false
- }
- for _, d := range s.ewmaDecorators {
- d.EwmaUpdate(s.lastN, dur)
- }
-}
-
func makePanicExtender(p interface{}) extenderFunc {
pstr := fmt.Sprint(p)
- stack := debug.Stack()
- stackLines := bytes.Count(stack, []byte("\n"))
- return func(_ io.Reader, _ int, st decor.Statistics) (io.Reader, int) {
- mr := io.MultiReader(
- strings.NewReader(runewidth.Truncate(pstr, st.AvailableWidth, "…")),
- strings.NewReader(fmt.Sprintf("\n%#v\n", st)),
- bytes.NewReader(stack),
+ return func(rows []io.Reader, _ int, stat decor.Statistics) []io.Reader {
+ r := io.MultiReader(
+ strings.NewReader(runewidth.Truncate(pstr, stat.AvailableWidth, "…")),
+ bytes.NewReader([]byte("\n")),
)
- return mr, stackLines + 1
+ return append(rows, r)
}
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler.go
index a69087c474e..81177fc2eea 100644
--- a/vendor/github.com/vbauerster/mpb/v7/bar_filler.go
+++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler.go
@@ -9,31 +9,42 @@ import (
// BarFiller interface.
// Bar (without decorators) renders itself by calling BarFiller's Fill method.
//
-// reqWidth is requested width, set by `func WithWidth(int) ContainerOption`.
+// reqWidth is requested width set by `func WithWidth(int) ContainerOption`.
// If not set, it defaults to terminal width.
//
-// Default implementations can be obtained via:
-//
-// func NewBarFiller(BarStyle()) BarFiller
-// func NewBarFiller(SpinnerStyle()) BarFiller
-//
type BarFiller interface {
Fill(w io.Writer, reqWidth int, stat decor.Statistics)
}
// BarFillerBuilder interface.
+// Default implementations are:
+//
+// BarStyle()
+// SpinnerStyle()
+// NopStyle()
+//
type BarFillerBuilder interface {
Build() BarFiller
}
-// BarFillerFunc is function type adapter to convert function into BarFiller.
+// BarFillerFunc is function type adapter to convert compatible function
+// into BarFiller interface.
type BarFillerFunc func(w io.Writer, reqWidth int, stat decor.Statistics)
func (f BarFillerFunc) Fill(w io.Writer, reqWidth int, stat decor.Statistics) {
f(w, reqWidth, stat)
}
+// BarFillerBuilderFunc is function type adapter to convert compatible
+// function into BarFillerBuilder interface.
+type BarFillerBuilderFunc func() BarFiller
+
+func (f BarFillerBuilderFunc) Build() BarFiller {
+ return f()
+}
+
// NewBarFiller constructs a BarFiller from provided BarFillerBuilder.
+// Deprecated. Prefer using `*Progress.New(...)` directly.
func NewBarFiller(b BarFillerBuilder) BarFiller {
return b.Build()
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go
index 80b2104555d..d8bf90a4a94 100644
--- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go
+++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go
@@ -32,13 +32,13 @@ type BarStyleComposer interface {
}
type bFiller struct {
+ rev bool
components [components]*component
tip struct {
count uint
onComplete *component
frames []*component
}
- flush func(dst io.Writer, filling, padding [][]byte)
}
type component struct {
@@ -113,14 +113,7 @@ func (s *barStyle) Reverse() BarStyleComposer {
}
func (s *barStyle) Build() BarFiller {
- bf := new(bFiller)
- if s.rev {
- bf.flush = func(dst io.Writer, filling, padding [][]byte) {
- flush(dst, padding, filling)
- }
- } else {
- bf.flush = flush
- }
+ bf := &bFiller{rev: s.rev}
bf.components[iLbound] = &component{
width: runewidth.StringWidth(stripansi.Strip(s.lbound)),
bytes: []byte(s.lbound),
@@ -164,8 +157,8 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) {
return
}
- w.Write(s.components[iLbound].bytes)
- defer w.Write(s.components[iRbound].bytes)
+ mustWrite(w, s.components[iLbound].bytes)
+ defer mustWrite(w, s.components[iRbound].bytes)
if width == 0 {
return
@@ -236,14 +229,25 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) {
}
}
- s.flush(w, filling, padding)
+ if s.rev {
+ flush(w, padding, filling)
+ } else {
+ flush(w, filling, padding)
+ }
}
-func flush(dst io.Writer, filling, padding [][]byte) {
+func flush(w io.Writer, filling, padding [][]byte) {
for i := len(filling) - 1; i >= 0; i-- {
- dst.Write(filling[i])
+ mustWrite(w, filling[i])
}
for i := 0; i < len(padding); i++ {
- dst.Write(padding[i])
+ mustWrite(w, padding[i])
+ }
+}
+
+func mustWrite(w io.Writer, p []byte) {
+ _, err := w.Write(p)
+ if err != nil {
+ panic(err)
}
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go
new file mode 100644
index 00000000000..1a7086fecb6
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go
@@ -0,0 +1,14 @@
+package mpb
+
+import (
+ "io"
+
+ "github.com/vbauerster/mpb/v7/decor"
+)
+
+// NopStyle provides BarFillerBuilder which builds NOP BarFiller.
+func NopStyle() BarFillerBuilder {
+ return BarFillerBuilderFunc(func() BarFiller {
+ return BarFillerFunc(func(io.Writer, int, decor.Statistics) {})
+ })
+}
diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go
index 58ae1c53283..d38525efc94 100644
--- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go
+++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go
@@ -73,15 +73,19 @@ func (s *sFiller) Fill(w io.Writer, width int, stat decor.Statistics) {
return
}
+ var err error
rest := width - frameWidth
switch s.position {
case positionLeft:
- io.WriteString(w, frame+strings.Repeat(" ", rest))
+ _, err = io.WriteString(w, frame+strings.Repeat(" ", rest))
case positionRight:
- io.WriteString(w, strings.Repeat(" ", rest)+frame)
+ _, err = io.WriteString(w, strings.Repeat(" ", rest)+frame)
default:
str := strings.Repeat(" ", rest/2) + frame + strings.Repeat(" ", rest/2+rest%2)
- io.WriteString(w, str)
+ _, err = io.WriteString(w, str)
+ }
+ if err != nil {
+ panic(err)
}
s.count++
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_option.go b/vendor/github.com/vbauerster/mpb/v7/bar_option.go
index 46b7de0bf56..3506ed2f11f 100644
--- a/vendor/github.com/vbauerster/mpb/v7/bar_option.go
+++ b/vendor/github.com/vbauerster/mpb/v7/bar_option.go
@@ -5,12 +5,20 @@ import (
"io"
"github.com/vbauerster/mpb/v7/decor"
- "github.com/vbauerster/mpb/v7/internal"
)
// BarOption is a func option to alter default behavior of a bar.
type BarOption func(*bState)
+func skipNil(decorators []decor.Decorator) (filtered []decor.Decorator) {
+ for _, d := range decorators {
+ if d != nil {
+ filtered = append(filtered, d)
+ }
+ }
+ return
+}
+
func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Decorator) {
type mergeWrapper interface {
MergeUnwrap() []decor.Decorator
@@ -26,14 +34,14 @@ func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Deco
// AppendDecorators let you inject decorators to the bar's right side.
func AppendDecorators(decorators ...decor.Decorator) BarOption {
return func(s *bState) {
- s.addDecorators(&s.aDecorators, decorators...)
+ s.addDecorators(&s.aDecorators, skipNil(decorators)...)
}
}
// PrependDecorators let you inject decorators to the bar's left side.
func PrependDecorators(decorators ...decor.Decorator) BarOption {
return func(s *bState) {
- s.addDecorators(&s.pDecorators, decorators...)
+ s.addDecorators(&s.pDecorators, skipNil(decorators)...)
}
}
@@ -51,14 +59,18 @@ func BarWidth(width int) BarOption {
}
}
-// BarQueueAfter queues this (being constructed) bar to relplace
-// runningBar after it has been completed.
-func BarQueueAfter(runningBar *Bar) BarOption {
- if runningBar == nil {
+// BarQueueAfter puts this (being constructed) bar into the queue.
+// BarPriority will be inherited from the argument bar.
+// When argument bar completes or aborts queued bar replaces its place.
+// If sync is true queued bar is suspended until argument bar completes
+// or aborts.
+func BarQueueAfter(bar *Bar, sync bool) BarOption {
+ if bar == nil {
return nil
}
return func(s *bState) {
- s.runningBar = runningBar
+ s.wait.bar = bar
+ s.wait.sync = sync
}
}
@@ -81,7 +93,10 @@ func BarFillerOnComplete(message string) BarOption {
return BarFillerMiddleware(func(base BarFiller) BarFiller {
return BarFillerFunc(func(w io.Writer, reqWidth int, st decor.Statistics) {
if st.Completed {
- io.WriteString(w, message)
+ _, err := io.WriteString(w, message)
+ if err != nil {
+ panic(err)
+ }
} else {
base.Fill(w, reqWidth, st)
}
@@ -97,29 +112,61 @@ func BarFillerMiddleware(middle func(BarFiller) BarFiller) BarOption {
}
// BarPriority sets bar's priority. Zero is highest priority, i.e. bar
-// will be on top. If `BarReplaceOnComplete` option is supplied, this
-// option is ignored.
+// will be on top. This option isn't effective with `BarQueueAfter` option.
func BarPriority(priority int) BarOption {
return func(s *bState) {
s.priority = priority
}
}
-// BarExtender provides a way to extend bar to the next new line.
+// BarExtender extends bar with arbitrary lines. Provided BarFiller will be
+// called at each render/flush cycle. Any lines written to the underlying
+// io.Writer will be printed after the bar itself.
func BarExtender(filler BarFiller) BarOption {
+ return barExtender(filler, false)
+}
+
+// BarExtenderRev extends bar with arbitrary lines in reverse order. Provided
+// BarFiller will be called at each render/flush cycle. Any lines written
+// to the underlying io.Writer will be printed before the bar itself.
+func BarExtenderRev(filler BarFiller) BarOption {
+ return barExtender(filler, true)
+}
+
+func barExtender(filler BarFiller, rev bool) BarOption {
if filler == nil {
return nil
}
return func(s *bState) {
- s.extender = makeExtenderFunc(filler)
+ s.extender = makeExtenderFunc(filler, rev)
}
}
-func makeExtenderFunc(filler BarFiller) extenderFunc {
+func makeExtenderFunc(filler BarFiller, rev bool) extenderFunc {
buf := new(bytes.Buffer)
- return func(r io.Reader, reqWidth int, st decor.Statistics) (io.Reader, int) {
- filler.Fill(buf, reqWidth, st)
- return io.MultiReader(r, buf), bytes.Count(buf.Bytes(), []byte("\n"))
+ base := func(rows []io.Reader, width int, stat decor.Statistics) []io.Reader {
+ buf.Reset()
+ filler.Fill(buf, width, stat)
+ for {
+ b, err := buf.ReadBytes('\n')
+ if err != nil {
+ break
+ }
+ rows = append(rows, bytes.NewReader(b))
+ }
+ return rows
+ }
+
+ if !rev {
+ return base
+ } else {
+ return func(rows []io.Reader, width int, stat decor.Statistics) []io.Reader {
+ rows = base(rows, width, stat)
+ for left, right := 0, len(rows)-1; left < right; left, right = left+1, right-1 {
+ rows[left], rows[right] = rows[right], rows[left]
+ }
+ return rows
+ }
}
}
@@ -138,9 +185,12 @@ func BarNoPop() BarOption {
}
}
-// BarOptional will invoke provided option only when pick is true.
-func BarOptional(option BarOption, pick bool) BarOption {
- return BarOptOn(option, internal.Predicate(pick))
+// BarOptional will invoke provided option only when cond is true.
+func BarOptional(option BarOption, cond bool) BarOption {
+ if cond {
+ return option
+ }
+ return nil
}
// BarOptOn will invoke provided option only when higher order predicate
diff --git a/vendor/github.com/vbauerster/mpb/v7/container_option.go b/vendor/github.com/vbauerster/mpb/v7/container_option.go
index a858c3c51dd..38239d4fa9f 100644
--- a/vendor/github.com/vbauerster/mpb/v7/container_option.go
+++ b/vendor/github.com/vbauerster/mpb/v7/container_option.go
@@ -2,11 +2,8 @@ package mpb
import (
"io"
- "io/ioutil"
"sync"
"time"
-
- "github.com/vbauerster/mpb/v7/internal"
)
// ContainerOption is a func option to alter default behavior of a bar
@@ -33,7 +30,7 @@ func WithWidth(width int) ContainerOption {
}
}
-// WithRefreshRate overrides default 120ms refresh rate.
+// WithRefreshRate overrides default 150ms refresh rate.
func WithRefreshRate(d time.Duration) ContainerOption {
return func(s *pState) {
s.rr = d
@@ -76,7 +73,7 @@ func WithShutdownNotifier(ch chan struct{}) ContainerOption {
func WithOutput(w io.Writer) ContainerOption {
return func(s *pState) {
if w == nil {
- s.output = ioutil.Discard
+ s.output = io.Discard
s.outputDiscarded = true
return
}
@@ -101,9 +98,12 @@ func PopCompletedMode() ContainerOption {
}
}
-// ContainerOptional will invoke provided option only when pick is true.
-func ContainerOptional(option ContainerOption, pick bool) ContainerOption {
- return ContainerOptOn(option, internal.Predicate(pick))
+// ContainerOptional will invoke provided option only when cond is true.
+func ContainerOptional(option ContainerOption, cond bool) ContainerOption {
+ if cond {
+ return option
+ }
+ return nil
}
// ContainerOptOn will invoke provided option only when higher order
diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go
index 925c8b1dcf1..19fd90e9423 100644
--- a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go
+++ b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go
@@ -11,7 +11,7 @@ import (
// ErrNotTTY not a TeleTYpewriter error.
var ErrNotTTY = errors.New("not a terminal")
-// http://ascii-table.com/ansi-escape-sequences.php
+// https://github.com/dylanaraps/pure-sh-bible#cursor-movement
const (
escOpen = "\x1b["
cuuAndEd = "A\x1b[J"
@@ -20,19 +20,30 @@ const (
// Writer is a buffered the writer that updates the terminal. The
// contents of writer will be flushed when Flush is called.
type Writer struct {
- out io.Writer
- buf bytes.Buffer
- lines int
- fd int
- isTerminal bool
+ out io.Writer
+ buf bytes.Buffer
+ lines int // how much lines to clear before flushing new ones
+ fd int
+ terminal bool
+ termSize func(int) (int, int, error)
}
// New returns a new Writer with defaults.
func New(out io.Writer) *Writer {
- w := &Writer{out: out}
+ w := &Writer{
+ out: out,
+ termSize: func(_ int) (int, int, error) {
+ return -1, -1, ErrNotTTY
+ },
+ }
if f, ok := out.(*os.File); ok {
w.fd = int(f.Fd())
- w.isTerminal = IsTerminal(w.fd)
+ if IsTerminal(w.fd) {
+ w.terminal = true
+ w.termSize = func(fd int) (int, int, error) {
+ return GetSize(fd)
+ }
+ }
}
return w
}
@@ -67,18 +78,14 @@ func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
return w.buf.ReadFrom(r)
}
-// GetWidth returns width of underlying terminal.
-func (w *Writer) GetWidth() (int, error) {
- if !w.isTerminal {
- return -1, ErrNotTTY
- }
- tw, _, err := GetSize(w.fd)
- return tw, err
+// GetTermSize returns WxH of underlying terminal.
+func (w *Writer) GetTermSize() (width, height int, err error) {
+ return w.termSize(w.fd)
}
-func (w *Writer) ansiCuuAndEd() (err error) {
+func (w *Writer) ansiCuuAndEd() error {
buf := make([]byte, 8)
buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lines), 10)
- _, err = w.out.Write(append(buf, cuuAndEd...))
- return
+ _, err := w.out.Write(append(buf, cuuAndEd...))
+ return err
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go
index 8f99dbe324e..2c4c3707be6 100644
--- a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go
+++ b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go
@@ -16,7 +16,7 @@ var (
)
func (w *Writer) clearLines() error {
- if !w.isTerminal {
+ if !w.terminal {
// hope it's cygwin or similar
return w.ansiCuuAndEd()
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go b/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go
index e81fae367ee..aad7709c062 100644
--- a/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go
+++ b/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go
@@ -47,12 +47,13 @@ const (
// Statistics consists of progress related statistics, that Decorator
// may need.
type Statistics struct {
- ID int
AvailableWidth int
+ ID int
Total int64
Current int64
Refill int64
Completed bool
+ Aborted bool
}
// Decorator interface.
diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/merge.go b/vendor/github.com/vbauerster/mpb/v7/decor/merge.go
index e41406a64da..84767115515 100644
--- a/vendor/github.com/vbauerster/mpb/v7/decor/merge.go
+++ b/vendor/github.com/vbauerster/mpb/v7/decor/merge.go
@@ -17,6 +17,9 @@ import (
// +----+--------+---------+--------+
//
func Merge(decorator Decorator, placeholders ...WC) Decorator {
+ if decorator == nil {
+ return nil
+ }
if _, ok := decorator.Sync(); !ok || len(placeholders) == 0 {
return decorator
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go b/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go
new file mode 100644
index 00000000000..10ff67009cc
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go
@@ -0,0 +1,41 @@
+package decor
+
+// OnAbort returns decorator, which wraps provided decorator with sole
+// purpose to display provided message on abort event. It has no effect
+// if bar.Abort(drop bool) is called with true argument.
+//
+// `decorator` Decorator to wrap
+//
+// `message` message to display on abort event
+//
+func OnAbort(decorator Decorator, message string) Decorator {
+ if decorator == nil {
+ return nil
+ }
+ d := &onAbortWrapper{
+ Decorator: decorator,
+ msg: message,
+ }
+ if md, ok := decorator.(*mergeDecorator); ok {
+ d.Decorator, md.Decorator = md.Decorator, d
+ return md
+ }
+ return d
+}
+
+type onAbortWrapper struct {
+ Decorator
+ msg string
+}
+
+func (d *onAbortWrapper) Decor(s Statistics) string {
+ if s.Aborted {
+ wc := d.GetConf()
+ return wc.FormatMsg(d.msg)
+ }
+ return d.Decorator.Decor(s)
+}
+
+func (d *onAbortWrapper) Base() Decorator {
+ return d.Decorator
+}
diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go b/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go
index f46b19abaa9..2ada2b31b16 100644
--- a/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go
+++ b/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go
@@ -1,6 +1,6 @@
package decor
-// OnComplete returns decorator, which wraps provided decorator, with
+// OnComplete returns decorator, which wraps provided decorator with
// sole purpose to display provided message on complete event.
//
// `decorator` Decorator to wrap
@@ -8,6 +8,9 @@ package decor
// `message` message to display on complete event
//
func OnComplete(decorator Decorator, message string) Decorator {
+ if decorator == nil {
+ return nil
+ }
d := &onCompleteWrapper{
Decorator: decorator,
msg: message,
diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go b/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go
new file mode 100644
index 00000000000..74a3d96676e
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go
@@ -0,0 +1,55 @@
+package decor
+
+// OnCondition applies decorator only if a condition is true.
+//
+// `decorator` Decorator
+//
+// `cond` bool
+//
+func OnCondition(decorator Decorator, cond bool) Decorator {
+ return Conditional(cond, decorator, nil)
+}
+
+// OnPredicate applies decorator only if a predicate evaluates to true.
+//
+// `decorator` Decorator
+//
+// `predicate` func() bool
+//
+func OnPredicate(decorator Decorator, predicate func() bool) Decorator {
+ return Predicative(predicate, decorator, nil)
+}
+
+// Conditional returns decorator `a` if condition is true, otherwise
+// decorator `b`.
+//
+// `cond` bool
+//
+// `a` Decorator
+//
+// `b` Decorator
+//
+func Conditional(cond bool, a, b Decorator) Decorator {
+ if cond {
+ return a
+ } else {
+ return b
+ }
+}
+
+// Predicative returns decorator `a` if predicate evaluates to true,
+// otherwise decorator `b`.
+//
+// `predicate` func() bool
+//
+// `a` Decorator
+//
+// `b` Decorator
+//
+func Predicative(predicate func() bool, a, b Decorator) Decorator {
+ if predicate() {
+ return a
+ } else {
+ return b
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go b/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go
new file mode 100644
index 00000000000..c6a34384e63
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go
@@ -0,0 +1,10 @@
+package decor
+
+import "io"
+
+func mustWriteString(w io.Writer, s string) {
+ _, err := io.WriteString(w, s)
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go
index 2b0a7a9562c..e72668993ee 100644
--- a/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go
+++ b/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go
@@ -2,7 +2,6 @@ package decor
import (
"fmt"
- "io"
"strconv"
"github.com/vbauerster/mpb/v7/internal"
@@ -24,12 +23,11 @@ func (s percentageType) Format(st fmt.State, verb rune) {
}
}
- io.WriteString(st, strconv.FormatFloat(float64(s), 'f', prec, 64))
-
+ mustWriteString(st, strconv.FormatFloat(float64(s), 'f', prec, 64))
if st.Flag(' ') {
- io.WriteString(st, " ")
+ mustWriteString(st, " ")
}
- io.WriteString(st, "%")
+ mustWriteString(st, "%")
}
// Percentage returns percentage decorator. It's a wrapper of NewPercentage.
diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go b/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go
index e4b9740584b..09ecc23f807 100644
--- a/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go
+++ b/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go
@@ -2,8 +2,6 @@ package decor
import (
"fmt"
- "io"
- "math"
"strconv"
)
@@ -47,16 +45,15 @@ func (self SizeB1024) Format(st fmt.State, verb rune) {
unit = _iMiB
case self < _iTiB:
unit = _iGiB
- case self <= math.MaxInt64:
+ default:
unit = _iTiB
}
- io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
-
+ mustWriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
if st.Flag(' ') {
- io.WriteString(st, " ")
+ mustWriteString(st, " ")
}
- io.WriteString(st, unit.String())
+ mustWriteString(st, unit.String())
}
const (
@@ -96,14 +93,13 @@ func (self SizeB1000) Format(st fmt.State, verb rune) {
unit = _MB
case self < _TB:
unit = _GB
- case self <= math.MaxInt64:
+ default:
unit = _TB
}
- io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
-
+ mustWriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
if st.Flag(' ') {
- io.WriteString(st, " ")
+ mustWriteString(st, " ")
}
- io.WriteString(st, unit.String())
+ mustWriteString(st, unit.String())
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/speed.go b/vendor/github.com/vbauerster/mpb/v7/decor/speed.go
index 634edabfdc0..f052352fc40 100644
--- a/vendor/github.com/vbauerster/mpb/v7/decor/speed.go
+++ b/vendor/github.com/vbauerster/mpb/v7/decor/speed.go
@@ -2,7 +2,6 @@ package decor
import (
"fmt"
- "io"
"math"
"time"
@@ -24,7 +23,7 @@ type speedFormatter struct {
func (self *speedFormatter) Format(st fmt.State, verb rune) {
self.Formatter.Format(st, verb)
- io.WriteString(st, "/s")
+ mustWriteString(st, "/s")
}
// EwmaSpeed exponential-weighted-moving-average based speed decorator.
diff --git a/vendor/github.com/vbauerster/mpb/v7/go.mod b/vendor/github.com/vbauerster/mpb/v7/go.mod
index 7b177d0dbf6..ac3543ebda6 100644
--- a/vendor/github.com/vbauerster/mpb/v7/go.mod
+++ b/vendor/github.com/vbauerster/mpb/v7/go.mod
@@ -4,7 +4,7 @@ require (
github.com/VividCortex/ewma v1.2.0
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
github.com/mattn/go-runewidth v0.0.13
- golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e
+ golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2
)
go 1.14
diff --git a/vendor/github.com/vbauerster/mpb/v7/go.sum b/vendor/github.com/vbauerster/mpb/v7/go.sum
index 45584e0bf34..3d50254068c 100644
--- a/vendor/github.com/vbauerster/mpb/v7/go.sum
+++ b/vendor/github.com/vbauerster/mpb/v7/go.sum
@@ -6,5 +6,5 @@ github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e h1:WUoyKPm6nCo1BnNUvPGnFG3T5DUVem42yDJZZ4CNxMA=
-golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2 h1:wM1k/lXfpc5HdkJJyW9GELpd8ERGdnh8sMGL6Gzq3Ho=
+golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
diff --git a/vendor/github.com/vbauerster/mpb/v7/internal/predicate.go b/vendor/github.com/vbauerster/mpb/v7/internal/predicate.go
deleted file mode 100644
index 1e4dd24d9da..00000000000
--- a/vendor/github.com/vbauerster/mpb/v7/internal/predicate.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package internal
-
-// Predicate helper for internal use.
-func Predicate(pick bool) func() bool {
- return func() bool { return pick }
-}
diff --git a/vendor/github.com/vbauerster/mpb/v7/priority_queue.go b/vendor/github.com/vbauerster/mpb/v7/priority_queue.go
index 29d9bd5a826..152482e9ac9 100644
--- a/vendor/github.com/vbauerster/mpb/v7/priority_queue.go
+++ b/vendor/github.com/vbauerster/mpb/v7/priority_queue.go
@@ -6,7 +6,8 @@ type priorityQueue []*Bar
func (pq priorityQueue) Len() int { return len(pq) }
func (pq priorityQueue) Less(i, j int) bool {
- return pq[i].priority < pq[j].priority
+ // less priority pops first
+ return pq[i].priority > pq[j].priority
}
func (pq priorityQueue) Swap(i, j int) {
diff --git a/vendor/github.com/vbauerster/mpb/v7/progress.go b/vendor/github.com/vbauerster/mpb/v7/progress.go
index c60c6569406..1dda06e5f0f 100644
--- a/vendor/github.com/vbauerster/mpb/v7/progress.go
+++ b/vendor/github.com/vbauerster/mpb/v7/progress.go
@@ -6,24 +6,19 @@ import (
"context"
"fmt"
"io"
- "io/ioutil"
- "log"
"math"
"os"
"sync"
"time"
"github.com/vbauerster/mpb/v7/cwriter"
- "github.com/vbauerster/mpb/v7/decor"
)
const (
- // default RefreshRate
- prr = 150 * time.Millisecond
+ prr = 150 * time.Millisecond // default RefreshRate
)
-// Progress represents a container that renders one or more progress
-// bars.
+// Progress represents a container that renders one or more progress bars.
type Progress struct {
ctx context.Context
uwg *sync.WaitGroup
@@ -33,21 +28,19 @@ type Progress struct {
done chan struct{}
refreshCh chan time.Time
once sync.Once
- dlogger *log.Logger
}
-// pState holds bars in its priorityQueue. It gets passed to
-// *Progress.serve(...) monitor goroutine.
+// pState holds bars in its priorityQueue, it gets passed to (*Progress).serve monitor goroutine.
type pState struct {
- bHeap priorityQueue
- heapUpdated bool
- pMatrix map[int][]chan int
- aMatrix map[int][]chan int
- barShutdownQueue []*Bar
+ bHeap priorityQueue
+ heapUpdated bool
+ pMatrix map[int][]chan int
+ aMatrix map[int][]chan int
// following are provided/overrided by user
idCount int
reqWidth int
+ popPriority int
popCompleted bool
outputDiscarded bool
rr time.Duration
@@ -55,27 +48,27 @@ type pState struct {
externalRefresh <-chan interface{}
renderDelay <-chan struct{}
shutdownNotifier chan struct{}
- parkedBars map[*Bar]*Bar
+ queueBars map[*Bar]*Bar
output io.Writer
debugOut io.Writer
}
// New creates new Progress container instance. It's not possible to
-// reuse instance after *Progress.Wait() method has been called.
+// reuse instance after (*Progress).Wait method has been called.
func New(options ...ContainerOption) *Progress {
return NewWithContext(context.Background(), options...)
}
// NewWithContext creates new Progress container instance with provided
-// context. It's not possible to reuse instance after *Progress.Wait()
+// context. It's not possible to reuse instance after (*Progress).Wait
// method has been called.
func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress {
s := &pState{
- bHeap: priorityQueue{},
- rr: prr,
- parkedBars: make(map[*Bar]*Bar),
- output: os.Stdout,
- debugOut: ioutil.Discard,
+ bHeap: priorityQueue{},
+ rr: prr,
+ queueBars: make(map[*Bar]*Bar),
+ output: os.Stdout,
+ popPriority: math.MinInt32,
}
for _, opt := range options {
@@ -91,7 +84,6 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress {
bwg: new(sync.WaitGroup),
operateState: make(chan func(*pState)),
done: make(chan struct{}),
- dlogger: log.New(s.debugOut, "[mpb] ", log.Lshortfile),
}
p.cwg.Add(1)
@@ -99,25 +91,27 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress {
return p
}
-// AddBar creates a bar with default bar filler. Different filler can
-// be chosen and applied via `*Progress.Add(...) *Bar` method.
+// AddBar creates a bar with default bar filler.
func (p *Progress) AddBar(total int64, options ...BarOption) *Bar {
- return p.Add(total, NewBarFiller(BarStyle()), options...)
+ return p.New(total, BarStyle(), options...)
}
-// AddSpinner creates a bar with default spinner filler. Different
-// filler can be chosen and applied via `*Progress.Add(...) *Bar`
-// method.
+// AddSpinner creates a bar with default spinner filler.
func (p *Progress) AddSpinner(total int64, options ...BarOption) *Bar {
- return p.Add(total, NewBarFiller(SpinnerStyle()), options...)
+ return p.New(total, SpinnerStyle(), options...)
+}
+
+// New creates a bar with provided BarFillerBuilder.
+func (p *Progress) New(total int64, builder BarFillerBuilder, options ...BarOption) *Bar {
+ return p.Add(total, builder.Build(), options...)
}
// Add creates a bar which renders itself by provided filler.
-// If `total <= 0` trigger complete event is disabled until reset with *bar.SetTotal(int64, bool).
-// Panics if *Progress instance is done, i.e. called after *Progress.Wait().
+// If `total <= 0` triggering complete event by increment methods is disabled.
+// Panics if *Progress instance is done, i.e. called after (*Progress).Wait().
func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar {
if filler == nil {
- filler = BarFillerFunc(func(io.Writer, int, decor.Statistics) {})
+ filler = NopStyle().Build()
}
p.bwg.Add(1)
result := make(chan *Bar)
@@ -125,9 +119,8 @@ func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar
case p.operateState <- func(ps *pState) {
bs := ps.makeBarState(total, filler, options...)
bar := newBar(p, bs)
- if bs.runningBar != nil {
- bs.runningBar.noPop = true
- ps.parkedBars[bs.runningBar] = bar
+ if bs.wait.bar != nil {
+ ps.queueBars[bs.wait.bar] = bar
} else {
heap.Push(&ps.bHeap, bar)
ps.heapUpdated = true
@@ -136,7 +129,6 @@ func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar
result <- bar
}:
bar := <-result
- bar.subscribeDecorators()
return bar
case <-p.done:
p.bwg.Done()
@@ -144,21 +136,8 @@ func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar
}
}
-func (p *Progress) dropBar(b *Bar) {
- select {
- case p.operateState <- func(s *pState) {
- if b.index < 0 {
- return
- }
- heap.Remove(&s.bHeap, b.index)
- s.heapUpdated = true
- }:
- case <-p.done:
- }
-}
-
func (p *Progress) traverseBars(cb func(b *Bar) bool) {
- done := make(chan struct{})
+ sync := make(chan struct{})
select {
case p.operateState <- func(s *pState) {
for i := 0; i < s.bHeap.Len(); i++ {
@@ -167,9 +146,9 @@ func (p *Progress) traverseBars(cb func(b *Bar) bool) {
break
}
}
- close(done)
+ close(sync)
}:
- <-done
+ <-sync
case <-p.done:
}
}
@@ -203,8 +182,8 @@ func (p *Progress) BarCount() int {
// After this method has been called, there is no way to reuse *Progress
// instance.
func (p *Progress) Wait() {
+ // wait for user wg, if any
if p.uwg != nil {
- // wait for user wg
p.uwg.Wait()
}
@@ -226,25 +205,116 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) {
p.refreshCh = s.newTicker(p.done)
+ render := func(debugOut io.Writer) {
+ err := s.render(cw)
+ for err != nil {
+ if debugOut != nil {
+ _, err = fmt.Fprintln(debugOut, err)
+ } else {
+ panic(err)
+ }
+ debugOut = nil
+ }
+ }
+
for {
select {
case op := <-p.operateState:
op(s)
case <-p.refreshCh:
- if err := s.render(cw); err != nil {
- p.dlogger.Println(err)
- }
+ render(s.debugOut)
case <-s.shutdownNotifier:
for s.heapUpdated {
- if err := s.render(cw); err != nil {
- p.dlogger.Println(err)
- }
+ render(s.debugOut)
}
return
}
}
}
+func (s *pState) render(cw *cwriter.Writer) error {
+ if s.heapUpdated {
+ s.updateSyncMatrix()
+ s.heapUpdated = false
+ }
+ syncWidth(s.pMatrix)
+ syncWidth(s.aMatrix)
+
+ width, height, err := cw.GetTermSize()
+ if err != nil {
+ width = s.reqWidth
+ height = s.bHeap.Len()
+ }
+ for i := 0; i < s.bHeap.Len(); i++ {
+ bar := s.bHeap[i]
+ go bar.render(width)
+ }
+
+ return s.flush(cw, height)
+}
+
+func (s *pState) flush(cw *cwriter.Writer, height int) error {
+ var wg sync.WaitGroup
+ var popCount int
+ rows := make([]io.Reader, 0, height)
+ pool := make([]*Bar, 0, s.bHeap.Len())
+ for s.bHeap.Len() > 0 {
+ var usedRows int
+ b := heap.Pop(&s.bHeap).(*Bar)
+ frame := <-b.frameCh
+ for i := len(frame.rows) - 1; i >= 0; i-- {
+ if row := frame.rows[i]; len(rows) < height {
+ rows = append(rows, row)
+ usedRows++
+ } else {
+ wg.Add(1)
+ go func() {
+ _, _ = io.Copy(io.Discard, row)
+ wg.Done()
+ }()
+ }
+ }
+ if frame.shutdown != 0 {
+ b.Wait() // waiting for b.done, so it's safe to read b.bs
+ drop := b.bs.dropOnComplete
+ if qb, ok := s.queueBars[b]; ok {
+ delete(s.queueBars, b)
+ qb.priority = b.priority
+ pool = append(pool, qb)
+ drop = true
+ } else if s.popCompleted && !b.bs.noPop {
+ if frame.shutdown > 1 {
+ popCount += usedRows
+ drop = true
+ } else {
+ s.popPriority++
+ b.priority = s.popPriority
+ }
+ }
+ if drop {
+ s.heapUpdated = true
+ continue
+ }
+ }
+ pool = append(pool, b)
+ }
+
+ for _, b := range pool {
+ heap.Push(&s.bHeap, b)
+ }
+
+ for i := len(rows) - 1; i >= 0; i-- {
+ _, err := cw.ReadFrom(rows[i])
+ if err != nil {
+ wg.Wait()
+ return err
+ }
+ }
+
+ wg.Wait()
+ return cw.Flush(len(rows) - popCount)
+}
+
func (s *pState) newTicker(done <-chan struct{}) chan time.Time {
ch := make(chan time.Time)
if s.shutdownNotifier == nil {
@@ -283,75 +353,6 @@ func (s *pState) newTicker(done <-chan struct{}) chan time.Time {
return ch
}
-func (s *pState) render(cw *cwriter.Writer) error {
- if s.heapUpdated {
- s.updateSyncMatrix()
- s.heapUpdated = false
- }
- syncWidth(s.pMatrix)
- syncWidth(s.aMatrix)
-
- tw, err := cw.GetWidth()
- if err != nil {
- tw = s.reqWidth
- }
- for i := 0; i < s.bHeap.Len(); i++ {
- bar := s.bHeap[i]
- go bar.render(tw)
- }
-
- return s.flush(cw)
-}
-
-func (s *pState) flush(cw *cwriter.Writer) error {
- var totalLines int
- bm := make(map[*Bar]int, s.bHeap.Len())
- for s.bHeap.Len() > 0 {
- b := heap.Pop(&s.bHeap).(*Bar)
- frame := <-b.frameCh
- cw.ReadFrom(frame.reader)
- if b.toShutdown {
- if b.recoveredPanic != nil {
- s.barShutdownQueue = append(s.barShutdownQueue, b)
- b.toShutdown = false
- } else {
- // shutdown at next flush
- // this ensures no bar ends up with less than 100% rendered
- defer func() {
- s.barShutdownQueue = append(s.barShutdownQueue, b)
- }()
- }
- }
- bm[b] = frame.lines
- totalLines += frame.lines
- }
-
- for _, b := range s.barShutdownQueue {
- if parkedBar := s.parkedBars[b]; parkedBar != nil {
- parkedBar.priority = b.priority
- heap.Push(&s.bHeap, parkedBar)
- delete(s.parkedBars, b)
- b.toDrop = true
- }
- if s.popCompleted && !b.noPop {
- totalLines -= bm[b]
- b.toDrop = true
- }
- if b.toDrop {
- delete(bm, b)
- s.heapUpdated = true
- }
- b.cancel()
- }
- s.barShutdownQueue = s.barShutdownQueue[0:0]
-
- for b := range bm {
- heap.Push(&s.bHeap, b)
- }
-
- return cw.Flush(totalLines)
-}
-
func (s *pState) updateSyncMatrix() {
s.pMatrix = make(map[int][]chan int)
s.aMatrix = make(map[int][]chan int)
@@ -377,7 +378,6 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio
reqWidth: s.reqWidth,
total: total,
filler: filler,
- extender: func(r io.Reader, _ int, _ decor.Statistics) (io.Reader, int) { return r, 0 },
debugOut: s.debugOut,
}
@@ -396,13 +396,11 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio
bs.middleware = nil
}
- if s.popCompleted && !bs.noPop {
- bs.priority = -(math.MaxInt32 - s.idCount)
+ for i := 0; i < len(bs.buffers); i++ {
+ bs.buffers[i] = bytes.NewBuffer(make([]byte, 0, 512))
}
- bs.bufP = bytes.NewBuffer(make([]byte, 0, 128))
- bs.bufB = bytes.NewBuffer(make([]byte, 0, 256))
- bs.bufA = bytes.NewBuffer(make([]byte, 0, 128))
+ bs.subscribeDecorators()
return bs
}
@@ -413,7 +411,7 @@ func syncWidth(matrix map[int][]chan int) {
}
}
-var maxWidthDistributor = func(column []chan int) {
+func maxWidthDistributor(column []chan int) {
var maxWidth int
for _, ch := range column {
if w := <-ch; w > maxWidth {
diff --git a/vendor/github.com/vbauerster/mpb/v7/proxyreader.go b/vendor/github.com/vbauerster/mpb/v7/proxyreader.go
index 316f438d76b..51b63ea7a84 100644
--- a/vendor/github.com/vbauerster/mpb/v7/proxyreader.go
+++ b/vendor/github.com/vbauerster/mpb/v7/proxyreader.go
@@ -2,7 +2,6 @@ package mpb
import (
"io"
- "io/ioutil"
"time"
)
@@ -11,73 +10,63 @@ type proxyReader struct {
bar *Bar
}
-func (x *proxyReader) Read(p []byte) (int, error) {
+func (x proxyReader) Read(p []byte) (int, error) {
n, err := x.ReadCloser.Read(p)
x.bar.IncrBy(n)
- if err == io.EOF {
- go x.bar.SetTotal(0, true)
- }
return n, err
}
type proxyWriterTo struct {
- io.ReadCloser // *proxyReader
- wt io.WriterTo
- bar *Bar
+ proxyReader
+ wt io.WriterTo
}
-func (x *proxyWriterTo) WriteTo(w io.Writer) (int64, error) {
+func (x proxyWriterTo) WriteTo(w io.Writer) (int64, error) {
n, err := x.wt.WriteTo(w)
x.bar.IncrInt64(n)
- if err == io.EOF {
- go x.bar.SetTotal(0, true)
- }
return n, err
}
type ewmaProxyReader struct {
- io.ReadCloser // *proxyReader
- bar *Bar
- iT time.Time
+ proxyReader
}
-func (x *ewmaProxyReader) Read(p []byte) (int, error) {
- n, err := x.ReadCloser.Read(p)
+func (x ewmaProxyReader) Read(p []byte) (int, error) {
+ start := time.Now()
+ n, err := x.proxyReader.Read(p)
if n > 0 {
- x.bar.DecoratorEwmaUpdate(time.Since(x.iT))
- x.iT = time.Now()
+ x.bar.DecoratorEwmaUpdate(time.Since(start))
}
return n, err
}
type ewmaProxyWriterTo struct {
- io.ReadCloser // *ewmaProxyReader
- wt io.WriterTo // *proxyWriterTo
- bar *Bar
- iT time.Time
+ ewmaProxyReader
+ wt proxyWriterTo
}
-func (x *ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) {
+func (x ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) {
+ start := time.Now()
n, err := x.wt.WriteTo(w)
if n > 0 {
- x.bar.DecoratorEwmaUpdate(time.Since(x.iT))
- x.iT = time.Now()
+ x.bar.DecoratorEwmaUpdate(time.Since(start))
}
return n, err
}
-func newProxyReader(r io.Reader, bar *Bar) io.ReadCloser {
- rc := toReadCloser(r)
- rc = &proxyReader{rc, bar}
-
- if wt, isWriterTo := r.(io.WriterTo); bar.hasEwmaDecorators {
- now := time.Now()
- rc = &ewmaProxyReader{rc, bar, now}
- if isWriterTo {
- rc = &ewmaProxyWriterTo{rc, wt, bar, now}
+func (b *Bar) newProxyReader(r io.Reader) (rc io.ReadCloser) {
+ pr := proxyReader{toReadCloser(r), b}
+ if wt, ok := r.(io.WriterTo); ok {
+ pw := proxyWriterTo{pr, wt}
+ if b.hasEwma {
+ rc = ewmaProxyWriterTo{ewmaProxyReader{pr}, pw}
+ } else {
+ rc = pw
}
- } else if isWriterTo {
- rc = &proxyWriterTo{rc, wt, bar}
+ } else if b.hasEwma {
+ rc = ewmaProxyReader{pr}
+ } else {
+ rc = pr
}
return rc
}
@@ -86,5 +75,5 @@ func toReadCloser(r io.Reader) io.ReadCloser {
if rc, ok := r.(io.ReadCloser); ok {
return rc
}
- return ioutil.NopCloser(r)
+ return io.NopCloser(r)
}
diff --git a/vendor/github.com/xeipuuv/gojsonpointer/README.md b/vendor/github.com/xeipuuv/gojsonpointer/README.md
index 00059242cab..a4f5f1458ff 100644
--- a/vendor/github.com/xeipuuv/gojsonpointer/README.md
+++ b/vendor/github.com/xeipuuv/gojsonpointer/README.md
@@ -35,7 +35,7 @@ An implementation of JSON Pointer - Go language
## References
-http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
+https://tools.ietf.org/html/rfc6901
### Note
The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented.
diff --git a/vendor/go.mozilla.org/pkcs7/.travis.yml b/vendor/go.mozilla.org/pkcs7/.travis.yml
deleted file mode 100644
index eac4c176201..00000000000
--- a/vendor/go.mozilla.org/pkcs7/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: go
-go:
- - "1.11"
- - "1.12"
- - "1.13"
- - tip
-before_install:
- - make gettools
-script:
- - make
diff --git a/vendor/go.mozilla.org/pkcs7/README.md b/vendor/go.mozilla.org/pkcs7/README.md
index bf37059c5ef..a55d117c682 100644
--- a/vendor/go.mozilla.org/pkcs7/README.md
+++ b/vendor/go.mozilla.org/pkcs7/README.md
@@ -1,7 +1,7 @@
# pkcs7
[](https://godoc.org/go.mozilla.org/pkcs7)
-[](https://travis-ci.org/mozilla-services/pkcs7)
+[](https://github.com/mozilla-services/pkcs7/actions/workflows/ci.yml?query=branch%3Amaster+event%3Apush)
pkcs7 implements parsing and creating signed and enveloped messages.
diff --git a/vendor/go.mozilla.org/pkcs7/ber.go b/vendor/go.mozilla.org/pkcs7/ber.go
index 585256739c0..73da024a0d5 100644
--- a/vendor/go.mozilla.org/pkcs7/ber.go
+++ b/vendor/go.mozilla.org/pkcs7/ber.go
@@ -175,7 +175,7 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
if offset > berLen {
return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
}
- hack := 0
+ indefinite := false
if l > 0x80 {
numberOfBytes := (int)(l & 0x7F)
if numberOfBytes > 4 { // int is only guaranteed to be 32bit
@@ -197,14 +197,7 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
}
}
} else if l == 0x80 {
- // find length by searching content
- markerIndex := bytes.LastIndex(ber[offset:], []byte{0x0, 0x0})
- if markerIndex == -1 {
- return nil, 0, errors.New("ber2der: Invalid BER format")
- }
- length = markerIndex
- hack = 2
- debugprint("--> (compute length) marker found at offset: %d\n", markerIndex+offset)
+ indefinite = true
} else {
length = (int)(l)
}
@@ -220,6 +213,9 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
debugprint("--> content end : %d\n", contentEnd)
debugprint("--> content : % X\n", ber[offset:contentEnd])
var obj asn1Object
+ if indefinite && kind == 0 {
+ return nil, 0, errors.New("ber2der: Indefinite form tag must have constructed encoding")
+ }
if kind == 0 {
obj = asn1Primitive{
tagBytes: ber[tagStart:tagEnd],
@@ -228,14 +224,25 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
}
} else {
var subObjects []asn1Object
- for offset < contentEnd {
+ for (offset < contentEnd) || indefinite {
var subObj asn1Object
var err error
- subObj, offset, err = readObject(ber[:contentEnd], offset)
+ subObj, offset, err = readObject(ber, offset)
if err != nil {
return nil, 0, err
}
subObjects = append(subObjects, subObj)
+
+ if indefinite {
+ terminated, err := isIndefiniteTermination(ber, offset)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ if terminated {
+ break
+ }
+ }
}
obj = asn1Structured{
tagBytes: ber[tagStart:tagEnd],
@@ -243,7 +250,20 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
}
}
- return obj, contentEnd + hack, nil
+ // Apply indefinite form length with 0x0000 terminator.
+ if indefinite {
+ contentEnd = offset + 2
+ }
+
+ return obj, contentEnd, nil
+}
+
+func isIndefiniteTermination(ber []byte, offset int) (bool, error) {
+ if len(ber) - offset < 2 {
+ return false, errors.New("ber2der: Invalid BER format")
+ }
+
+ return bytes.Index(ber[offset:], []byte{0x0, 0x0}) == 0, nil
}
func debugprint(format string, a ...interface{}) {
diff --git a/vendor/go.mozilla.org/pkcs7/encrypt.go b/vendor/go.mozilla.org/pkcs7/encrypt.go
index da57ae643c2..6b2655708c6 100644
--- a/vendor/go.mozilla.org/pkcs7/encrypt.go
+++ b/vendor/go.mozilla.org/pkcs7/encrypt.go
@@ -35,7 +35,7 @@ type recipientInfo struct {
type encryptedContentInfo struct {
ContentType asn1.ObjectIdentifier
ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
- EncryptedContent asn1.RawValue `asn1:"tag:0,optional,explicit"`
+ EncryptedContent asn1.RawValue `asn1:"tag:0,optional"`
}
const (
diff --git a/vendor/go.mozilla.org/pkcs7/sign.go b/vendor/go.mozilla.org/pkcs7/sign.go
index addd7638359..31c3654c517 100644
--- a/vendor/go.mozilla.org/pkcs7/sign.go
+++ b/vendor/go.mozilla.org/pkcs7/sign.go
@@ -124,10 +124,10 @@ func (sd *SignedData) AddSigner(ee *x509.Certificate, pkey crypto.PrivateKey, co
// The signature algorithm used to hash the data is the one of the end-entity
// certificate.
func (sd *SignedData) AddSignerChain(ee *x509.Certificate, pkey crypto.PrivateKey, parents []*x509.Certificate, config SignerInfoConfig) error {
-// Following RFC 2315, 9.2 SignerInfo type, the distinguished name of
-// the issuer of the end-entity signer is stored in the issuerAndSerialNumber
-// section of the SignedData.SignerInfo, alongside the serial number of
-// the end-entity.
+ // Following RFC 2315, 9.2 SignerInfo type, the distinguished name of
+ // the issuer of the end-entity signer is stored in the issuerAndSerialNumber
+ // section of the SignedData.SignerInfo, alongside the serial number of
+ // the end-entity.
var ias issuerAndSerial
ias.SerialNumber = ee.SerialNumber
if len(parents) == 0 {
diff --git a/vendor/go.mozilla.org/pkcs7/verify.go b/vendor/go.mozilla.org/pkcs7/verify.go
index c8ead2362e0..f09e27245c9 100644
--- a/vendor/go.mozilla.org/pkcs7/verify.go
+++ b/vendor/go.mozilla.org/pkcs7/verify.go
@@ -18,8 +18,12 @@ func (p7 *PKCS7) Verify() (err error) {
}
// VerifyWithChain checks the signatures of a PKCS7 object.
-// If truststore is not nil, it also verifies the chain of trust of the end-entity
-// signer cert to one of the root in the truststore.
+//
+// If truststore is not nil, it also verifies the chain of trust of
+// the end-entity signer cert to one of the roots in the
+// truststore. When the PKCS7 object includes the signing time
+// authenticated attr verifies the chain at that time and UTC now
+// otherwise.
func (p7 *PKCS7) VerifyWithChain(truststore *x509.CertPool) (err error) {
if len(p7.Signers) == 0 {
return errors.New("pkcs7: Message has no signers")
@@ -32,6 +36,81 @@ func (p7 *PKCS7) VerifyWithChain(truststore *x509.CertPool) (err error) {
return nil
}
+// VerifyWithChainAtTime checks the signatures of a PKCS7 object.
+//
+// If truststore is not nil, it also verifies the chain of trust of
+// the end-entity signer cert to a root in the truststore at
+// currentTime. It does not use the signing time authenticated
+// attribute.
+func (p7 *PKCS7) VerifyWithChainAtTime(truststore *x509.CertPool, currentTime time.Time) (err error) {
+ if len(p7.Signers) == 0 {
+ return errors.New("pkcs7: Message has no signers")
+ }
+ for _, signer := range p7.Signers {
+ if err := verifySignatureAtTime(p7, signer, truststore, currentTime); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func verifySignatureAtTime(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool, currentTime time.Time) (err error) {
+ signedData := p7.Content
+ ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
+ if ee == nil {
+ return errors.New("pkcs7: No certificate for signer")
+ }
+ if len(signer.AuthenticatedAttributes) > 0 {
+ // TODO(fullsailor): First check the content type match
+ var (
+ digest []byte
+ signingTime time.Time
+ )
+ err := unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeMessageDigest, &digest)
+ if err != nil {
+ return err
+ }
+ hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
+ h.Write(p7.Content)
+ computed := h.Sum(nil)
+ if subtle.ConstantTimeCompare(digest, computed) != 1 {
+ return &MessageDigestMismatchError{
+ ExpectedDigest: digest,
+ ActualDigest: computed,
+ }
+ }
+ signedData, err = marshalAttributes(signer.AuthenticatedAttributes)
+ if err != nil {
+ return err
+ }
+ err = unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeSigningTime, &signingTime)
+ if err == nil {
+ // signing time found, performing validity check
+ if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) {
+ return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q",
+ signingTime.Format(time.RFC3339),
+ ee.NotBefore.Format(time.RFC3339),
+ ee.NotAfter.Format(time.RFC3339))
+ }
+ }
+ }
+ if truststore != nil {
+ _, err = verifyCertChain(ee, p7.Certificates, truststore, currentTime)
+ if err != nil {
+ return err
+ }
+ }
+ sigalg, err := getSignatureAlgorithm(signer.DigestEncryptionAlgorithm, signer.DigestAlgorithm)
+ if err != nil {
+ return err
+ }
+ return ee.CheckSignature(sigalg, signedData, signer.EncryptedDigest)
+}
+
func verifySignature(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool) (err error) {
signedData := p7.Content
ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
@@ -70,7 +149,7 @@ func verifySignature(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool) (e
return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q",
signingTime.Format(time.RFC3339),
ee.NotBefore.Format(time.RFC3339),
- ee.NotBefore.Format(time.RFC3339))
+ ee.NotAfter.Format(time.RFC3339))
}
}
}
diff --git a/vendor/go.mozilla.org/pkcs7/verify_test_dsa.go b/vendor/go.mozilla.org/pkcs7/verify_test_dsa.go
new file mode 100644
index 00000000000..1eb05bc3eae
--- /dev/null
+++ b/vendor/go.mozilla.org/pkcs7/verify_test_dsa.go
@@ -0,0 +1,182 @@
+// +build go1.11 go1.12 go1.13 go1.14 go1.15
+
+package pkcs7
+
+import (
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "testing"
+)
+
+func TestVerifyEC2(t *testing.T) {
+ fixture := UnmarshalDSATestFixture(EC2IdentityDocumentFixture)
+ p7, err := Parse(fixture.Input)
+ if err != nil {
+ t.Errorf("Parse encountered unexpected error: %v", err)
+ }
+ p7.Certificates = []*x509.Certificate{fixture.Certificate}
+ if err := p7.Verify(); err != nil {
+ t.Errorf("Verify failed with error: %v", err)
+ }
+}
+
+var EC2IdentityDocumentFixture = `
+-----BEGIN PKCS7-----
+MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCA
+JIAEggGmewogICJwcml2YXRlSXAiIDogIjE3Mi4zMC4wLjI1MiIsCiAgImRldnBh
+eVByb2R1Y3RDb2RlcyIgOiBudWxsLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1
+cy1lYXN0LTFhIiwKICAidmVyc2lvbiIgOiAiMjAxMC0wOC0zMSIsCiAgImluc3Rh
+bmNlSWQiIDogImktZjc5ZmU1NmMiLAogICJiaWxsaW5nUHJvZHVjdHMiIDogbnVs
+bCwKICAiaW5zdGFuY2VUeXBlIiA6ICJ0Mi5taWNybyIsCiAgImFjY291bnRJZCIg
+OiAiMTIxNjU5MDE0MzM0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLWZjZTNjNjk2IiwK
+ICAicGVuZGluZ1RpbWUiIDogIjIwMTYtMDQtMDhUMDM6MDE6MzhaIiwKICAiYXJj
+aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJy
+YW1kaXNrSWQiIDogbnVsbCwKICAicmVnaW9uIiA6ICJ1cy1lYXN0LTEiCn0AAAAA
+AAAxggEYMIIBFAIBATBpMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5n
+dG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2Vi
+IFNlcnZpY2VzIExMQwIJAJa6SNnlXhpnMAkGBSsOAwIaBQCgXTAYBgkqhkiG9w0B
+CQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xNjA0MDgwMzAxNDRaMCMG
+CSqGSIb3DQEJBDEWBBTuUc28eBXmImAautC+wOjqcFCBVjAJBgcqhkjOOAQDBC8w
+LQIVAKA54NxGHWWCz5InboDmY/GHs33nAhQ6O/ZI86NwjA9Vz3RNMUJrUPU5tAAA
+AAAAAA==
+-----END PKCS7-----
+-----BEGIN CERTIFICATE-----
+MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw
+FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD
+VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z
+ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u
+IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl
+cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e
+ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3
+VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P
+hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j
+k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U
+hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF
+lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf
+MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW
+MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw
+vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw
+7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K
+-----END CERTIFICATE-----`
+
+func TestDSASignWithOpenSSLAndVerify(t *testing.T) {
+ content := []byte(`
+A ship in port is safe,
+but that's not what ships are built for.
+-- Grace Hopper`)
+ // write the content to a temp file
+ tmpContentFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_content")
+ if err != nil {
+ t.Fatal(err)
+ }
+ ioutil.WriteFile(tmpContentFile.Name(), content, 0755)
+
+ // write the signer cert to a temp file
+ tmpSignerCertFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signer")
+ if err != nil {
+ t.Fatal(err)
+ }
+ ioutil.WriteFile(tmpSignerCertFile.Name(), dsaPublicCert, 0755)
+
+ // write the signer key to a temp file
+ tmpSignerKeyFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_key")
+ if err != nil {
+ t.Fatal(err)
+ }
+ ioutil.WriteFile(tmpSignerKeyFile.Name(), dsaPrivateKey, 0755)
+
+ tmpSignedFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signature")
+ if err != nil {
+ t.Fatal(err)
+ }
+ // call openssl to sign the content
+ opensslCMD := exec.Command("openssl", "smime", "-sign", "-nodetach", "-md", "sha1",
+ "-in", tmpContentFile.Name(), "-out", tmpSignedFile.Name(),
+ "-signer", tmpSignerCertFile.Name(), "-inkey", tmpSignerKeyFile.Name(),
+ "-certfile", tmpSignerCertFile.Name(), "-outform", "PEM")
+ out, err := opensslCMD.CombinedOutput()
+ if err != nil {
+ t.Fatalf("openssl command failed with %s: %s", err, out)
+ }
+
+ // verify the signed content
+ pemSignature, err := ioutil.ReadFile(tmpSignedFile.Name())
+ if err != nil {
+ t.Fatal(err)
+ }
+ fmt.Printf("%s\n", pemSignature)
+ derBlock, _ := pem.Decode(pemSignature)
+ if derBlock == nil {
+ t.Fatalf("failed to read DER block from signature PEM %s", tmpSignedFile.Name())
+ }
+ p7, err := Parse(derBlock.Bytes)
+ if err != nil {
+ t.Fatalf("Parse encountered unexpected error: %v", err)
+ }
+ if err := p7.Verify(); err != nil {
+ t.Fatalf("Verify failed with error: %v", err)
+ }
+ os.Remove(tmpSignerCertFile.Name()) // clean up
+ os.Remove(tmpSignerKeyFile.Name()) // clean up
+ os.Remove(tmpContentFile.Name()) // clean up
+}
+
+var dsaPrivateKey = []byte(`-----BEGIN PRIVATE KEY-----
+MIIBSwIBADCCASwGByqGSM44BAEwggEfAoGBAP1/U4EddRIpUt9KnC7s5Of2EbdS
+PO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00b/JmYLdrmVCl
+pJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNaFpEy9nXzrith
+1yrv8iIDGZ3RSAHHAhUAl2BQjxUjC8yykrmCouuEC/BYHPUCgYEA9+GghdabPd7L
+vKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJFnEj6EwoFhO3
+zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7zKTxvqhRkImo
+g9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoEFgIUfW4aPdQBn9gJZp2KuNpzgHzvfsE=
+-----END PRIVATE KEY-----`)
+
+var dsaPublicCert = []byte(`-----BEGIN CERTIFICATE-----
+MIIDOjCCAvWgAwIBAgIEPCY/UDANBglghkgBZQMEAwIFADBsMRAwDgYDVQQGEwdV
+bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
+VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRAwDgYDVQQDEwdVbmtub3du
+MB4XDTE4MTAyMjEzNDMwN1oXDTQ2MDMwOTEzNDMwN1owbDEQMA4GA1UEBhMHVW5r
+bm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4GA1UE
+ChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMHVW5rbm93bjCC
+AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADD
+Hj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gE
+exAiwk+7qdf+t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii
+Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4
+V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozI
+puE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl
+nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDCriMPbEVBoRK4SOUeFwg7+VRf4TTp
+rcOQC9IVVoCjXzuWEGrp3ZI7YWJSpFnSch4lk29RH8O0HpI/NOzKnOBtnKr782pt
+1k/bJVMH9EaLd6MKnAVjrCDMYBB0MhebZ8QHY2elZZCWoqDYAcIDOsEx+m4NLErT
+ypPnjS5M0jm1PKMhMB8wHQYDVR0OBBYEFC0Yt5XdM0Kc95IX8NQ8XRssGPx7MA0G
+CWCGSAFlAwQDAgUAAzAAMC0CFQCIgQtrZZ9hdZG1ROhR5hc8nYEmbgIUAIlgC688
+qzy/7yePTlhlpj+ahMM=
+-----END CERTIFICATE-----`)
+
+type DSATestFixture struct {
+ Input []byte
+ Certificate *x509.Certificate
+}
+
+func UnmarshalDSATestFixture(testPEMBlock string) DSATestFixture {
+ var result DSATestFixture
+ var derBlock *pem.Block
+ var pemBlock = []byte(testPEMBlock)
+ for {
+ derBlock, pemBlock = pem.Decode(pemBlock)
+ if derBlock == nil {
+ break
+ }
+ switch derBlock.Type {
+ case "PKCS7":
+ result.Input = derBlock.Bytes
+ case "CERTIFICATE":
+ result.Certificate, _ = x509.ParseCertificate(derBlock.Bytes)
+ }
+ }
+
+ return result
+}
diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS
deleted file mode 100644
index 2b00ddba0df..00000000000
--- a/vendor/golang.org/x/crypto/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at https://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS
deleted file mode 100644
index 1fbd3e976fa..00000000000
--- a/vendor/golang.org/x/crypto/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at https://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
index a2ecf5c325b..93eb5ae6de6 100644
--- a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
+++ b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
@@ -12,7 +12,7 @@ import (
"errors"
"math/bits"
- "golang.org/x/crypto/internal/subtle"
+ "golang.org/x/crypto/internal/alias"
)
const (
@@ -189,7 +189,7 @@ func (s *Cipher) XORKeyStream(dst, src []byte) {
panic("chacha20: output smaller than input")
}
dst = dst[:len(src)]
- if subtle.InexactOverlap(dst, src) {
+ if alias.InexactOverlap(dst, src) {
panic("chacha20: invalid buffer overlap")
}
diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
index c5898db4658..4652247b8a6 100644
--- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
+++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
@@ -15,6 +15,7 @@ const bufSize = 256
// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only
// be called when the vector facility is available. Implementation in asm_s390x.s.
+//
//go:noescape
func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go
index cda3fdd3540..bc62161d6e4 100644
--- a/vendor/golang.org/x/crypto/curve25519/curve25519.go
+++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go
@@ -9,7 +9,8 @@ package curve25519 // import "golang.org/x/crypto/curve25519"
import (
"crypto/subtle"
- "fmt"
+ "errors"
+ "strconv"
"golang.org/x/crypto/curve25519/internal/field"
)
@@ -124,10 +125,10 @@ func X25519(scalar, point []byte) ([]byte, error) {
func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
var in [32]byte
if l := len(scalar); l != 32 {
- return nil, fmt.Errorf("bad scalar length: %d, expected %d", l, 32)
+ return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32")
}
if l := len(point); l != 32 {
- return nil, fmt.Errorf("bad point length: %d, expected %d", l, 32)
+ return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32")
}
copy(in[:], scalar)
if &point[0] == &Basepoint[0] {
@@ -138,7 +139,7 @@ func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
copy(base[:], point)
ScalarMult(dst, &in, &base)
if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 {
- return nil, fmt.Errorf("bad input point: low order point")
+ return nil, errors.New("bad input point: low order point")
}
}
return dst[:], nil
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
index 44dc8e8caf9..edcf163c4ed 100644
--- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
+++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
@@ -1,13 +1,16 @@
// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+//go:build amd64 && gc && !purego
// +build amd64,gc,!purego
package field
// feMul sets out = a * b. It works like feMulGeneric.
+//
//go:noescape
func feMul(out *Element, a *Element, b *Element)
// feSquare sets out = a * a. It works like feSquareGeneric.
+//
//go:noescape
func feSquare(out *Element, a *Element)
diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go
index 71ad917dadd..a7828345fcc 100644
--- a/vendor/golang.org/x/crypto/ed25519/ed25519.go
+++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go
@@ -1,13 +1,7 @@
-// Copyright 2016 The Go Authors. All rights reserved.
+// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// In Go 1.13, the ed25519 package was promoted to the standard library as
-// crypto/ed25519, and this package became a wrapper for the standard library one.
-//
-//go:build !go1.13
-// +build !go1.13
-
// Package ed25519 implements the Ed25519 signature algorithm. See
// https://ed25519.cr.yp.to/.
//
@@ -16,21 +10,15 @@
// representation includes a public key suffix to make multiple signing
// operations with the same key more efficient. This package refers to the RFC
// 8032 private key as the “seed”.
+//
+// Beginning with Go 1.13, the functionality of this package was moved to the
+// standard library as crypto/ed25519. This package only acts as a compatibility
+// wrapper.
package ed25519
-// This code is a port of the public domain, “ref10” implementation of ed25519
-// from SUPERCOP.
-
import (
- "bytes"
- "crypto"
- cryptorand "crypto/rand"
- "crypto/sha512"
- "errors"
+ "crypto/ed25519"
"io"
- "strconv"
-
- "golang.org/x/crypto/ed25519/internal/edwards25519"
)
const (
@@ -45,57 +33,21 @@ const (
)
// PublicKey is the type of Ed25519 public keys.
-type PublicKey []byte
+//
+// This type is an alias for crypto/ed25519's PublicKey type.
+// See the crypto/ed25519 package for the methods on this type.
+type PublicKey = ed25519.PublicKey
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
-type PrivateKey []byte
-
-// Public returns the PublicKey corresponding to priv.
-func (priv PrivateKey) Public() crypto.PublicKey {
- publicKey := make([]byte, PublicKeySize)
- copy(publicKey, priv[32:])
- return PublicKey(publicKey)
-}
-
-// Seed returns the private key seed corresponding to priv. It is provided for
-// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
-// in this package.
-func (priv PrivateKey) Seed() []byte {
- seed := make([]byte, SeedSize)
- copy(seed, priv[:32])
- return seed
-}
-
-// Sign signs the given message with priv.
-// Ed25519 performs two passes over messages to be signed and therefore cannot
-// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
-// indicate the message hasn't been hashed. This can be achieved by passing
-// crypto.Hash(0) as the value for opts.
-func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
- if opts.HashFunc() != crypto.Hash(0) {
- return nil, errors.New("ed25519: cannot sign hashed message")
- }
-
- return Sign(priv, message), nil
-}
+//
+// This type is an alias for crypto/ed25519's PrivateKey type.
+// See the crypto/ed25519 package for the methods on this type.
+type PrivateKey = ed25519.PrivateKey
// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
- if rand == nil {
- rand = cryptorand.Reader
- }
-
- seed := make([]byte, SeedSize)
- if _, err := io.ReadFull(rand, seed); err != nil {
- return nil, nil, err
- }
-
- privateKey := NewKeyFromSeed(seed)
- publicKey := make([]byte, PublicKeySize)
- copy(publicKey, privateKey[32:])
-
- return publicKey, privateKey, nil
+ return ed25519.GenerateKey(rand)
}
// NewKeyFromSeed calculates a private key from a seed. It will panic if
@@ -103,121 +55,17 @@ func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
// package.
func NewKeyFromSeed(seed []byte) PrivateKey {
- if l := len(seed); l != SeedSize {
- panic("ed25519: bad seed length: " + strconv.Itoa(l))
- }
-
- digest := sha512.Sum512(seed)
- digest[0] &= 248
- digest[31] &= 127
- digest[31] |= 64
-
- var A edwards25519.ExtendedGroupElement
- var hBytes [32]byte
- copy(hBytes[:], digest[:])
- edwards25519.GeScalarMultBase(&A, &hBytes)
- var publicKeyBytes [32]byte
- A.ToBytes(&publicKeyBytes)
-
- privateKey := make([]byte, PrivateKeySize)
- copy(privateKey, seed)
- copy(privateKey[32:], publicKeyBytes[:])
-
- return privateKey
+ return ed25519.NewKeyFromSeed(seed)
}
// Sign signs the message with privateKey and returns a signature. It will
// panic if len(privateKey) is not PrivateKeySize.
func Sign(privateKey PrivateKey, message []byte) []byte {
- if l := len(privateKey); l != PrivateKeySize {
- panic("ed25519: bad private key length: " + strconv.Itoa(l))
- }
-
- h := sha512.New()
- h.Write(privateKey[:32])
-
- var digest1, messageDigest, hramDigest [64]byte
- var expandedSecretKey [32]byte
- h.Sum(digest1[:0])
- copy(expandedSecretKey[:], digest1[:])
- expandedSecretKey[0] &= 248
- expandedSecretKey[31] &= 63
- expandedSecretKey[31] |= 64
-
- h.Reset()
- h.Write(digest1[32:])
- h.Write(message)
- h.Sum(messageDigest[:0])
-
- var messageDigestReduced [32]byte
- edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
- var R edwards25519.ExtendedGroupElement
- edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
-
- var encodedR [32]byte
- R.ToBytes(&encodedR)
-
- h.Reset()
- h.Write(encodedR[:])
- h.Write(privateKey[32:])
- h.Write(message)
- h.Sum(hramDigest[:0])
- var hramDigestReduced [32]byte
- edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
-
- var s [32]byte
- edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
-
- signature := make([]byte, SignatureSize)
- copy(signature[:], encodedR[:])
- copy(signature[32:], s[:])
-
- return signature
+ return ed25519.Sign(privateKey, message)
}
// Verify reports whether sig is a valid signature of message by publicKey. It
// will panic if len(publicKey) is not PublicKeySize.
func Verify(publicKey PublicKey, message, sig []byte) bool {
- if l := len(publicKey); l != PublicKeySize {
- panic("ed25519: bad public key length: " + strconv.Itoa(l))
- }
-
- if len(sig) != SignatureSize || sig[63]&224 != 0 {
- return false
- }
-
- var A edwards25519.ExtendedGroupElement
- var publicKeyBytes [32]byte
- copy(publicKeyBytes[:], publicKey)
- if !A.FromBytes(&publicKeyBytes) {
- return false
- }
- edwards25519.FeNeg(&A.X, &A.X)
- edwards25519.FeNeg(&A.T, &A.T)
-
- h := sha512.New()
- h.Write(sig[:32])
- h.Write(publicKey[:])
- h.Write(message)
- var digest [64]byte
- h.Sum(digest[:0])
-
- var hReduced [32]byte
- edwards25519.ScReduce(&hReduced, &digest)
-
- var R edwards25519.ProjectiveGroupElement
- var s [32]byte
- copy(s[:], sig[32:])
-
- // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in
- // the range [0, order) in order to prevent signature malleability.
- if !edwards25519.ScMinimal(&s) {
- return false
- }
-
- edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s)
-
- var checkR [32]byte
- R.ToBytes(&checkR)
- return bytes.Equal(sig[:32], checkR[:])
+ return ed25519.Verify(publicKey, message, sig)
}
diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go
deleted file mode 100644
index b5974dc8b27..00000000000
--- a/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.13
-// +build go1.13
-
-// Package ed25519 implements the Ed25519 signature algorithm. See
-// https://ed25519.cr.yp.to/.
-//
-// These functions are also compatible with the “Ed25519” function defined in
-// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
-// representation includes a public key suffix to make multiple signing
-// operations with the same key more efficient. This package refers to the RFC
-// 8032 private key as the “seed”.
-//
-// Beginning with Go 1.13, the functionality of this package was moved to the
-// standard library as crypto/ed25519. This package only acts as a compatibility
-// wrapper.
-package ed25519
-
-import (
- "crypto/ed25519"
- "io"
-)
-
-const (
- // PublicKeySize is the size, in bytes, of public keys as used in this package.
- PublicKeySize = 32
- // PrivateKeySize is the size, in bytes, of private keys as used in this package.
- PrivateKeySize = 64
- // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
- SignatureSize = 64
- // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
- SeedSize = 32
-)
-
-// PublicKey is the type of Ed25519 public keys.
-//
-// This type is an alias for crypto/ed25519's PublicKey type.
-// See the crypto/ed25519 package for the methods on this type.
-type PublicKey = ed25519.PublicKey
-
-// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
-//
-// This type is an alias for crypto/ed25519's PrivateKey type.
-// See the crypto/ed25519 package for the methods on this type.
-type PrivateKey = ed25519.PrivateKey
-
-// GenerateKey generates a public/private key pair using entropy from rand.
-// If rand is nil, crypto/rand.Reader will be used.
-func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
- return ed25519.GenerateKey(rand)
-}
-
-// NewKeyFromSeed calculates a private key from a seed. It will panic if
-// len(seed) is not SeedSize. This function is provided for interoperability
-// with RFC 8032. RFC 8032's private keys correspond to seeds in this
-// package.
-func NewKeyFromSeed(seed []byte) PrivateKey {
- return ed25519.NewKeyFromSeed(seed)
-}
-
-// Sign signs the message with privateKey and returns a signature. It will
-// panic if len(privateKey) is not PrivateKeySize.
-func Sign(privateKey PrivateKey, message []byte) []byte {
- return ed25519.Sign(privateKey, message)
-}
-
-// Verify reports whether sig is a valid signature of message by publicKey. It
-// will panic if len(publicKey) is not PublicKeySize.
-func Verify(publicKey PublicKey, message, sig []byte) bool {
- return ed25519.Verify(publicKey, message, sig)
-}
diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
deleted file mode 100644
index e39f086c1d8..00000000000
--- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
+++ /dev/null
@@ -1,1422 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package edwards25519
-
-// These values are from the public domain, “ref10” implementation of ed25519
-// from SUPERCOP.
-
-// d is a constant in the Edwards curve equation.
-var d = FieldElement{
- -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116,
-}
-
-// d2 is 2*d.
-var d2 = FieldElement{
- -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199,
-}
-
-// SqrtM1 is the square-root of -1 in the field.
-var SqrtM1 = FieldElement{
- -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482,
-}
-
-// A is a constant in the Montgomery-form of curve25519.
-var A = FieldElement{
- 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-}
-
-// bi contains precomputed multiples of the base-point. See the Ed25519 paper
-// for a discussion about how these values are used.
-var bi = [8]PreComputedGroupElement{
- {
- FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605},
- FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378},
- FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546},
- },
- {
- FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024},
- FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574},
- FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357},
- },
- {
- FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380},
- FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306},
- FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942},
- },
- {
- FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766},
- FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701},
- FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300},
- },
- {
- FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877},
- FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951},
- FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784},
- },
- {
- FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436},
- FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918},
- FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877},
- },
- {
- FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800},
- FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305},
- FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300},
- },
- {
- FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876},
- FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619},
- FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683},
- },
-}
-
-// base contains precomputed multiples of the base-point. See the Ed25519 paper
-// for a discussion about how these values are used.
-var base = [32][8]PreComputedGroupElement{
- {
- {
- FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605},
- FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378},
- FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546},
- },
- {
- FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303},
- FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081},
- FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697},
- },
- {
- FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024},
- FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574},
- FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357},
- },
- {
- FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540},
- FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397},
- FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325},
- },
- {
- FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380},
- FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306},
- FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942},
- },
- {
- FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777},
- FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737},
- FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652},
- },
- {
- FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766},
- FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701},
- FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300},
- },
- {
- FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726},
- FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955},
- FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425},
- },
- },
- {
- {
- FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171},
- FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510},
- FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660},
- },
- {
- FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639},
- FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963},
- FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950},
- },
- {
- FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568},
- FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335},
- FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628},
- },
- {
- FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007},
- FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772},
- FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653},
- },
- {
- FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567},
- FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686},
- FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372},
- },
- {
- FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887},
- FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954},
- FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953},
- },
- {
- FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833},
- FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532},
- FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876},
- },
- {
- FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268},
- FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214},
- FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038},
- },
- },
- {
- {
- FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800},
- FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645},
- FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664},
- },
- {
- FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933},
- FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182},
- FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222},
- },
- {
- FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991},
- FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880},
- FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092},
- },
- {
- FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295},
- FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788},
- FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553},
- },
- {
- FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026},
- FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347},
- FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033},
- },
- {
- FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395},
- FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278},
- FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890},
- },
- {
- FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995},
- FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596},
- FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891},
- },
- {
- FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060},
- FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608},
- FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606},
- },
- },
- {
- {
- FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389},
- FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016},
- FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341},
- },
- {
- FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505},
- FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553},
- FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655},
- },
- {
- FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220},
- FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631},
- FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099},
- },
- {
- FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556},
- FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749},
- FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930},
- },
- {
- FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391},
- FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253},
- FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066},
- },
- {
- FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958},
- FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082},
- FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383},
- },
- {
- FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521},
- FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807},
- FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948},
- },
- {
- FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134},
- FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455},
- FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629},
- },
- },
- {
- {
- FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069},
- FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746},
- FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919},
- },
- {
- FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837},
- FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906},
- FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771},
- },
- {
- FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817},
- FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098},
- FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409},
- },
- {
- FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504},
- FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727},
- FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420},
- },
- {
- FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003},
- FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605},
- FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384},
- },
- {
- FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701},
- FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683},
- FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708},
- },
- {
- FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563},
- FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260},
- FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387},
- },
- {
- FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672},
- FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686},
- FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665},
- },
- },
- {
- {
- FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182},
- FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277},
- FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628},
- },
- {
- FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474},
- FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539},
- FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822},
- },
- {
- FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970},
- FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756},
- FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508},
- },
- {
- FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683},
- FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655},
- FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158},
- },
- {
- FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125},
- FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839},
- FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664},
- },
- {
- FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294},
- FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899},
- FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070},
- },
- {
- FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294},
- FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949},
- FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083},
- },
- {
- FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420},
- FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940},
- FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396},
- },
- },
- {
- {
- FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567},
- FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127},
- FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294},
- },
- {
- FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887},
- FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964},
- FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195},
- },
- {
- FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244},
- FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999},
- FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762},
- },
- {
- FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274},
- FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236},
- FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605},
- },
- {
- FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761},
- FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884},
- FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482},
- },
- {
- FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638},
- FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490},
- FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170},
- },
- {
- FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736},
- FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124},
- FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392},
- },
- {
- FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029},
- FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048},
- FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958},
- },
- },
- {
- {
- FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593},
- FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071},
- FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692},
- },
- {
- FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687},
- FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441},
- FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001},
- },
- {
- FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460},
- FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007},
- FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762},
- },
- {
- FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005},
- FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674},
- FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035},
- },
- {
- FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590},
- FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957},
- FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812},
- },
- {
- FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740},
- FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122},
- FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158},
- },
- {
- FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885},
- FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140},
- FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857},
- },
- {
- FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155},
- FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260},
- FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483},
- },
- },
- {
- {
- FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677},
- FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815},
- FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751},
- },
- {
- FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203},
- FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208},
- FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230},
- },
- {
- FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850},
- FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389},
- FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968},
- },
- {
- FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689},
- FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880},
- FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304},
- },
- {
- FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632},
- FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412},
- FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566},
- },
- {
- FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038},
- FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232},
- FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943},
- },
- {
- FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856},
- FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738},
- FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971},
- },
- {
- FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718},
- FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697},
- FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883},
- },
- },
- {
- {
- FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912},
- FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358},
- FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849},
- },
- {
- FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307},
- FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977},
- FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335},
- },
- {
- FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644},
- FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616},
- FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735},
- },
- {
- FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099},
- FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341},
- FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336},
- },
- {
- FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646},
- FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425},
- FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388},
- },
- {
- FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743},
- FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822},
- FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462},
- },
- {
- FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985},
- FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702},
- FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797},
- },
- {
- FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293},
- FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100},
- FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688},
- },
- },
- {
- {
- FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186},
- FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610},
- FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707},
- },
- {
- FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220},
- FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025},
- FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044},
- },
- {
- FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992},
- FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027},
- FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197},
- },
- {
- FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901},
- FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952},
- FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878},
- },
- {
- FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390},
- FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730},
- FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730},
- },
- {
- FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180},
- FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272},
- FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715},
- },
- {
- FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970},
- FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772},
- FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865},
- },
- {
- FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750},
- FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373},
- FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348},
- },
- },
- {
- {
- FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144},
- FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195},
- FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086},
- },
- {
- FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684},
- FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518},
- FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233},
- },
- {
- FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793},
- FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794},
- FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435},
- },
- {
- FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921},
- FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518},
- FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563},
- },
- {
- FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278},
- FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024},
- FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030},
- },
- {
- FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783},
- FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717},
- FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844},
- },
- {
- FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333},
- FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048},
- FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760},
- },
- {
- FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760},
- FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757},
- FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112},
- },
- },
- {
- {
- FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468},
- FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184},
- FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289},
- },
- {
- FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066},
- FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882},
- FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226},
- },
- {
- FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101},
- FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279},
- FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811},
- },
- {
- FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709},
- FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714},
- FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121},
- },
- {
- FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464},
- FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847},
- FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400},
- },
- {
- FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414},
- FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158},
- FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045},
- },
- {
- FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415},
- FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459},
- FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079},
- },
- {
- FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412},
- FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743},
- FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836},
- },
- },
- {
- {
- FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022},
- FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429},
- FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065},
- },
- {
- FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861},
- FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000},
- FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101},
- },
- {
- FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815},
- FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642},
- FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966},
- },
- {
- FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574},
- FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742},
- FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689},
- },
- {
- FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020},
- FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772},
- FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982},
- },
- {
- FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953},
- FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218},
- FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265},
- },
- {
- FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073},
- FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325},
- FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798},
- },
- {
- FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870},
- FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863},
- FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927},
- },
- },
- {
- {
- FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267},
- FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663},
- FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862},
- },
- {
- FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673},
- FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943},
- FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020},
- },
- {
- FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238},
- FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064},
- FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795},
- },
- {
- FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052},
- FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904},
- FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531},
- },
- {
- FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979},
- FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841},
- FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431},
- },
- {
- FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324},
- FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940},
- FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320},
- },
- {
- FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184},
- FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114},
- FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878},
- },
- {
- FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784},
- FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091},
- FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585},
- },
- },
- {
- {
- FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208},
- FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864},
- FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661},
- },
- {
- FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233},
- FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212},
- FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525},
- },
- {
- FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068},
- FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397},
- FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988},
- },
- {
- FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889},
- FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038},
- FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697},
- },
- {
- FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875},
- FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905},
- FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656},
- },
- {
- FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818},
- FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714},
- FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203},
- },
- {
- FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931},
- FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024},
- FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084},
- },
- {
- FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204},
- FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817},
- FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667},
- },
- },
- {
- {
- FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504},
- FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768},
- FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255},
- },
- {
- FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790},
- FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438},
- FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333},
- },
- {
- FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971},
- FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905},
- FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409},
- },
- {
- FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409},
- FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499},
- FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363},
- },
- {
- FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664},
- FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324},
- FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940},
- },
- {
- FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990},
- FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914},
- FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290},
- },
- {
- FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257},
- FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433},
- FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236},
- },
- {
- FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045},
- FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093},
- FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347},
- },
- },
- {
- {
- FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191},
- FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507},
- FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906},
- },
- {
- FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018},
- FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109},
- FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926},
- },
- {
- FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528},
- FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625},
- FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286},
- },
- {
- FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033},
- FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866},
- FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896},
- },
- {
- FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075},
- FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347},
- FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437},
- },
- {
- FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165},
- FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588},
- FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193},
- },
- {
- FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017},
- FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883},
- FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961},
- },
- {
- FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043},
- FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663},
- FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362},
- },
- },
- {
- {
- FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860},
- FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466},
- FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063},
- },
- {
- FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997},
- FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295},
- FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369},
- },
- {
- FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385},
- FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109},
- FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906},
- },
- {
- FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424},
- FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185},
- FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962},
- },
- {
- FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325},
- FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593},
- FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404},
- },
- {
- FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644},
- FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801},
- FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804},
- },
- {
- FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884},
- FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577},
- FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849},
- },
- {
- FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473},
- FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644},
- FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319},
- },
- },
- {
- {
- FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599},
- FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768},
- FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084},
- },
- {
- FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328},
- FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369},
- FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920},
- },
- {
- FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815},
- FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025},
- FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397},
- },
- {
- FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448},
- FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981},
- FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165},
- },
- {
- FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501},
- FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073},
- FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861},
- },
- {
- FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845},
- FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211},
- FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870},
- },
- {
- FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096},
- FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803},
- FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168},
- },
- {
- FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965},
- FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505},
- FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598},
- },
- },
- {
- {
- FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782},
- FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900},
- FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479},
- },
- {
- FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208},
- FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232},
- FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719},
- },
- {
- FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271},
- FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326},
- FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132},
- },
- {
- FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300},
- FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570},
- FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670},
- },
- {
- FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994},
- FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913},
- FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317},
- },
- {
- FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730},
- FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096},
- FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078},
- },
- {
- FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411},
- FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905},
- FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654},
- },
- {
- FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870},
- FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498},
- FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579},
- },
- },
- {
- {
- FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677},
- FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647},
- FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743},
- },
- {
- FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468},
- FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375},
- FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155},
- },
- {
- FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725},
- FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612},
- FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943},
- },
- {
- FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944},
- FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928},
- FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406},
- },
- {
- FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139},
- FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963},
- FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693},
- },
- {
- FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734},
- FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680},
- FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410},
- },
- {
- FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931},
- FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654},
- FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710},
- },
- {
- FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180},
- FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684},
- FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895},
- },
- },
- {
- {
- FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501},
- FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413},
- FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880},
- },
- {
- FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874},
- FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962},
- FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899},
- },
- {
- FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152},
- FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063},
- FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080},
- },
- {
- FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146},
- FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183},
- FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133},
- },
- {
- FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421},
- FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622},
- FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197},
- },
- {
- FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663},
- FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753},
- FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755},
- },
- {
- FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862},
- FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118},
- FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171},
- },
- {
- FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380},
- FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824},
- FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270},
- },
- },
- {
- {
- FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438},
- FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584},
- FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562},
- },
- {
- FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471},
- FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610},
- FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269},
- },
- {
- FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650},
- FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369},
- FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461},
- },
- {
- FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462},
- FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793},
- FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218},
- },
- {
- FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226},
- FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019},
- FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037},
- },
- {
- FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171},
- FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132},
- FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841},
- },
- {
- FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181},
- FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210},
- FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040},
- },
- {
- FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935},
- FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105},
- FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814},
- },
- },
- {
- {
- FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852},
- FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581},
- FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646},
- },
- {
- FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844},
- FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025},
- FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453},
- },
- {
- FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068},
- FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192},
- FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921},
- },
- {
- FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259},
- FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426},
- FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072},
- },
- {
- FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305},
- FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832},
- FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943},
- },
- {
- FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011},
- FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447},
- FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494},
- },
- {
- FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245},
- FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859},
- FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915},
- },
- {
- FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707},
- FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848},
- FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224},
- },
- },
- {
- {
- FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391},
- FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215},
- FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101},
- },
- {
- FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713},
- FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849},
- FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930},
- },
- {
- FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940},
- FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031},
- FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404},
- },
- {
- FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243},
- FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116},
- FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525},
- },
- {
- FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509},
- FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883},
- FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865},
- },
- {
- FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660},
- FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273},
- FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138},
- },
- {
- FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560},
- FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135},
- FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941},
- },
- {
- FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739},
- FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756},
- FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819},
- },
- },
- {
- {
- FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347},
- FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028},
- FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075},
- },
- {
- FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799},
- FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609},
- FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817},
- },
- {
- FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989},
- FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523},
- FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278},
- },
- {
- FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045},
- FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377},
- FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480},
- },
- {
- FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016},
- FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426},
- FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525},
- },
- {
- FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396},
- FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080},
- FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892},
- },
- {
- FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275},
- FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074},
- FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140},
- },
- {
- FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717},
- FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101},
- FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127},
- },
- },
- {
- {
- FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632},
- FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415},
- FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160},
- },
- {
- FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876},
- FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625},
- FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478},
- },
- {
- FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164},
- FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595},
- FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248},
- },
- {
- FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858},
- FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193},
- FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184},
- },
- {
- FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942},
- FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635},
- FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948},
- },
- {
- FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935},
- FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415},
- FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416},
- },
- {
- FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018},
- FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778},
- FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659},
- },
- {
- FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385},
- FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503},
- FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329},
- },
- },
- {
- {
- FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056},
- FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838},
- FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948},
- },
- {
- FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691},
- FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118},
- FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517},
- },
- {
- FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269},
- FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904},
- FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589},
- },
- {
- FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193},
- FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910},
- FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930},
- },
- {
- FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667},
- FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481},
- FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876},
- },
- {
- FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640},
- FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278},
- FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112},
- },
- {
- FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272},
- FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012},
- FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221},
- },
- {
- FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046},
- FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345},
- FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310},
- },
- },
- {
- {
- FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937},
- FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636},
- FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008},
- },
- {
- FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429},
- FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576},
- FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066},
- },
- {
- FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490},
- FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104},
- FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053},
- },
- {
- FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275},
- FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511},
- FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095},
- },
- {
- FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439},
- FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939},
- FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424},
- },
- {
- FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310},
- FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608},
- FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079},
- },
- {
- FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101},
- FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418},
- FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576},
- },
- {
- FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356},
- FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996},
- FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099},
- },
- },
- {
- {
- FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728},
- FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658},
- FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242},
- },
- {
- FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001},
- FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766},
- FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373},
- },
- {
- FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458},
- FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628},
- FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657},
- },
- {
- FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062},
- FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616},
- FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014},
- },
- {
- FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383},
- FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814},
- FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718},
- },
- {
- FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417},
- FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222},
- FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444},
- },
- {
- FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597},
- FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970},
- FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799},
- },
- {
- FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647},
- FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511},
- FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032},
- },
- },
- {
- {
- FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834},
- FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461},
- FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062},
- },
- {
- FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516},
- FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547},
- FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240},
- },
- {
- FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038},
- FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741},
- FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103},
- },
- {
- FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747},
- FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323},
- FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016},
- },
- {
- FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373},
- FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228},
- FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141},
- },
- {
- FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399},
- FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831},
- FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376},
- },
- {
- FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313},
- FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958},
- FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577},
- },
- {
- FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743},
- FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684},
- FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476},
- },
- },
-}
diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
deleted file mode 100644
index fd03c252af4..00000000000
--- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
+++ /dev/null
@@ -1,1793 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package edwards25519
-
-import "encoding/binary"
-
-// This code is a port of the public domain, “ref10” implementation of ed25519
-// from SUPERCOP.
-
-// FieldElement represents an element of the field GF(2^255 - 19). An element
-// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
-// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
-// context.
-type FieldElement [10]int32
-
-var zero FieldElement
-
-func FeZero(fe *FieldElement) {
- copy(fe[:], zero[:])
-}
-
-func FeOne(fe *FieldElement) {
- FeZero(fe)
- fe[0] = 1
-}
-
-func FeAdd(dst, a, b *FieldElement) {
- dst[0] = a[0] + b[0]
- dst[1] = a[1] + b[1]
- dst[2] = a[2] + b[2]
- dst[3] = a[3] + b[3]
- dst[4] = a[4] + b[4]
- dst[5] = a[5] + b[5]
- dst[6] = a[6] + b[6]
- dst[7] = a[7] + b[7]
- dst[8] = a[8] + b[8]
- dst[9] = a[9] + b[9]
-}
-
-func FeSub(dst, a, b *FieldElement) {
- dst[0] = a[0] - b[0]
- dst[1] = a[1] - b[1]
- dst[2] = a[2] - b[2]
- dst[3] = a[3] - b[3]
- dst[4] = a[4] - b[4]
- dst[5] = a[5] - b[5]
- dst[6] = a[6] - b[6]
- dst[7] = a[7] - b[7]
- dst[8] = a[8] - b[8]
- dst[9] = a[9] - b[9]
-}
-
-func FeCopy(dst, src *FieldElement) {
- copy(dst[:], src[:])
-}
-
-// Replace (f,g) with (g,g) if b == 1;
-// replace (f,g) with (f,g) if b == 0.
-//
-// Preconditions: b in {0,1}.
-func FeCMove(f, g *FieldElement, b int32) {
- b = -b
- f[0] ^= b & (f[0] ^ g[0])
- f[1] ^= b & (f[1] ^ g[1])
- f[2] ^= b & (f[2] ^ g[2])
- f[3] ^= b & (f[3] ^ g[3])
- f[4] ^= b & (f[4] ^ g[4])
- f[5] ^= b & (f[5] ^ g[5])
- f[6] ^= b & (f[6] ^ g[6])
- f[7] ^= b & (f[7] ^ g[7])
- f[8] ^= b & (f[8] ^ g[8])
- f[9] ^= b & (f[9] ^ g[9])
-}
-
-func load3(in []byte) int64 {
- var r int64
- r = int64(in[0])
- r |= int64(in[1]) << 8
- r |= int64(in[2]) << 16
- return r
-}
-
-func load4(in []byte) int64 {
- var r int64
- r = int64(in[0])
- r |= int64(in[1]) << 8
- r |= int64(in[2]) << 16
- r |= int64(in[3]) << 24
- return r
-}
-
-func FeFromBytes(dst *FieldElement, src *[32]byte) {
- h0 := load4(src[:])
- h1 := load3(src[4:]) << 6
- h2 := load3(src[7:]) << 5
- h3 := load3(src[10:]) << 3
- h4 := load3(src[13:]) << 2
- h5 := load4(src[16:])
- h6 := load3(src[20:]) << 7
- h7 := load3(src[23:]) << 5
- h8 := load3(src[26:]) << 4
- h9 := (load3(src[29:]) & 8388607) << 2
-
- FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
-}
-
-// FeToBytes marshals h to s.
-// Preconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-//
-// Write p=2^255-19; q=floor(h/p).
-// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
-//
-// Proof:
-// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
-// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
-//
-// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
-// Then 0> 25
- q = (h[0] + q) >> 26
- q = (h[1] + q) >> 25
- q = (h[2] + q) >> 26
- q = (h[3] + q) >> 25
- q = (h[4] + q) >> 26
- q = (h[5] + q) >> 25
- q = (h[6] + q) >> 26
- q = (h[7] + q) >> 25
- q = (h[8] + q) >> 26
- q = (h[9] + q) >> 25
-
- // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
- h[0] += 19 * q
- // Goal: Output h-2^255 q, which is between 0 and 2^255-20.
-
- carry[0] = h[0] >> 26
- h[1] += carry[0]
- h[0] -= carry[0] << 26
- carry[1] = h[1] >> 25
- h[2] += carry[1]
- h[1] -= carry[1] << 25
- carry[2] = h[2] >> 26
- h[3] += carry[2]
- h[2] -= carry[2] << 26
- carry[3] = h[3] >> 25
- h[4] += carry[3]
- h[3] -= carry[3] << 25
- carry[4] = h[4] >> 26
- h[5] += carry[4]
- h[4] -= carry[4] << 26
- carry[5] = h[5] >> 25
- h[6] += carry[5]
- h[5] -= carry[5] << 25
- carry[6] = h[6] >> 26
- h[7] += carry[6]
- h[6] -= carry[6] << 26
- carry[7] = h[7] >> 25
- h[8] += carry[7]
- h[7] -= carry[7] << 25
- carry[8] = h[8] >> 26
- h[9] += carry[8]
- h[8] -= carry[8] << 26
- carry[9] = h[9] >> 25
- h[9] -= carry[9] << 25
- // h10 = carry9
-
- // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
- // Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
- // evidently 2^255 h10-2^255 q = 0.
- // Goal: Output h[0]+...+2^230 h[9].
-
- s[0] = byte(h[0] >> 0)
- s[1] = byte(h[0] >> 8)
- s[2] = byte(h[0] >> 16)
- s[3] = byte((h[0] >> 24) | (h[1] << 2))
- s[4] = byte(h[1] >> 6)
- s[5] = byte(h[1] >> 14)
- s[6] = byte((h[1] >> 22) | (h[2] << 3))
- s[7] = byte(h[2] >> 5)
- s[8] = byte(h[2] >> 13)
- s[9] = byte((h[2] >> 21) | (h[3] << 5))
- s[10] = byte(h[3] >> 3)
- s[11] = byte(h[3] >> 11)
- s[12] = byte((h[3] >> 19) | (h[4] << 6))
- s[13] = byte(h[4] >> 2)
- s[14] = byte(h[4] >> 10)
- s[15] = byte(h[4] >> 18)
- s[16] = byte(h[5] >> 0)
- s[17] = byte(h[5] >> 8)
- s[18] = byte(h[5] >> 16)
- s[19] = byte((h[5] >> 24) | (h[6] << 1))
- s[20] = byte(h[6] >> 7)
- s[21] = byte(h[6] >> 15)
- s[22] = byte((h[6] >> 23) | (h[7] << 3))
- s[23] = byte(h[7] >> 5)
- s[24] = byte(h[7] >> 13)
- s[25] = byte((h[7] >> 21) | (h[8] << 4))
- s[26] = byte(h[8] >> 4)
- s[27] = byte(h[8] >> 12)
- s[28] = byte((h[8] >> 20) | (h[9] << 6))
- s[29] = byte(h[9] >> 2)
- s[30] = byte(h[9] >> 10)
- s[31] = byte(h[9] >> 18)
-}
-
-func FeIsNegative(f *FieldElement) byte {
- var s [32]byte
- FeToBytes(&s, f)
- return s[0] & 1
-}
-
-func FeIsNonZero(f *FieldElement) int32 {
- var s [32]byte
- FeToBytes(&s, f)
- var x uint8
- for _, b := range s {
- x |= b
- }
- x |= x >> 4
- x |= x >> 2
- x |= x >> 1
- return int32(x & 1)
-}
-
-// FeNeg sets h = -f
-//
-// Preconditions:
-// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-//
-// Postconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-func FeNeg(h, f *FieldElement) {
- h[0] = -f[0]
- h[1] = -f[1]
- h[2] = -f[2]
- h[3] = -f[3]
- h[4] = -f[4]
- h[5] = -f[5]
- h[6] = -f[6]
- h[7] = -f[7]
- h[8] = -f[8]
- h[9] = -f[9]
-}
-
-func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) {
- var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64
-
- /*
- |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
- i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
- |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
- i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
- */
-
- c0 = (h0 + (1 << 25)) >> 26
- h1 += c0
- h0 -= c0 << 26
- c4 = (h4 + (1 << 25)) >> 26
- h5 += c4
- h4 -= c4 << 26
- /* |h0| <= 2^25 */
- /* |h4| <= 2^25 */
- /* |h1| <= 1.51*2^58 */
- /* |h5| <= 1.51*2^58 */
-
- c1 = (h1 + (1 << 24)) >> 25
- h2 += c1
- h1 -= c1 << 25
- c5 = (h5 + (1 << 24)) >> 25
- h6 += c5
- h5 -= c5 << 25
- /* |h1| <= 2^24; from now on fits into int32 */
- /* |h5| <= 2^24; from now on fits into int32 */
- /* |h2| <= 1.21*2^59 */
- /* |h6| <= 1.21*2^59 */
-
- c2 = (h2 + (1 << 25)) >> 26
- h3 += c2
- h2 -= c2 << 26
- c6 = (h6 + (1 << 25)) >> 26
- h7 += c6
- h6 -= c6 << 26
- /* |h2| <= 2^25; from now on fits into int32 unchanged */
- /* |h6| <= 2^25; from now on fits into int32 unchanged */
- /* |h3| <= 1.51*2^58 */
- /* |h7| <= 1.51*2^58 */
-
- c3 = (h3 + (1 << 24)) >> 25
- h4 += c3
- h3 -= c3 << 25
- c7 = (h7 + (1 << 24)) >> 25
- h8 += c7
- h7 -= c7 << 25
- /* |h3| <= 2^24; from now on fits into int32 unchanged */
- /* |h7| <= 2^24; from now on fits into int32 unchanged */
- /* |h4| <= 1.52*2^33 */
- /* |h8| <= 1.52*2^33 */
-
- c4 = (h4 + (1 << 25)) >> 26
- h5 += c4
- h4 -= c4 << 26
- c8 = (h8 + (1 << 25)) >> 26
- h9 += c8
- h8 -= c8 << 26
- /* |h4| <= 2^25; from now on fits into int32 unchanged */
- /* |h8| <= 2^25; from now on fits into int32 unchanged */
- /* |h5| <= 1.01*2^24 */
- /* |h9| <= 1.51*2^58 */
-
- c9 = (h9 + (1 << 24)) >> 25
- h0 += c9 * 19
- h9 -= c9 << 25
- /* |h9| <= 2^24; from now on fits into int32 unchanged */
- /* |h0| <= 1.8*2^37 */
-
- c0 = (h0 + (1 << 25)) >> 26
- h1 += c0
- h0 -= c0 << 26
- /* |h0| <= 2^25; from now on fits into int32 unchanged */
- /* |h1| <= 1.01*2^24 */
-
- h[0] = int32(h0)
- h[1] = int32(h1)
- h[2] = int32(h2)
- h[3] = int32(h3)
- h[4] = int32(h4)
- h[5] = int32(h5)
- h[6] = int32(h6)
- h[7] = int32(h7)
- h[8] = int32(h8)
- h[9] = int32(h9)
-}
-
-// FeMul calculates h = f * g
-// Can overlap h with f or g.
-//
-// Preconditions:
-// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
-// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
-//
-// Postconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-//
-// Notes on implementation strategy:
-//
-// Using schoolbook multiplication.
-// Karatsuba would save a little in some cost models.
-//
-// Most multiplications by 2 and 19 are 32-bit precomputations;
-// cheaper than 64-bit postcomputations.
-//
-// There is one remaining multiplication by 19 in the carry chain;
-// one *19 precomputation can be merged into this,
-// but the resulting data flow is considerably less clean.
-//
-// There are 12 carries below.
-// 10 of them are 2-way parallelizable and vectorizable.
-// Can get away with 11 carries, but then data flow is much deeper.
-//
-// With tighter constraints on inputs, can squeeze carries into int32.
-func FeMul(h, f, g *FieldElement) {
- f0 := int64(f[0])
- f1 := int64(f[1])
- f2 := int64(f[2])
- f3 := int64(f[3])
- f4 := int64(f[4])
- f5 := int64(f[5])
- f6 := int64(f[6])
- f7 := int64(f[7])
- f8 := int64(f[8])
- f9 := int64(f[9])
-
- f1_2 := int64(2 * f[1])
- f3_2 := int64(2 * f[3])
- f5_2 := int64(2 * f[5])
- f7_2 := int64(2 * f[7])
- f9_2 := int64(2 * f[9])
-
- g0 := int64(g[0])
- g1 := int64(g[1])
- g2 := int64(g[2])
- g3 := int64(g[3])
- g4 := int64(g[4])
- g5 := int64(g[5])
- g6 := int64(g[6])
- g7 := int64(g[7])
- g8 := int64(g[8])
- g9 := int64(g[9])
-
- g1_19 := int64(19 * g[1]) /* 1.4*2^29 */
- g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */
- g3_19 := int64(19 * g[3])
- g4_19 := int64(19 * g[4])
- g5_19 := int64(19 * g[5])
- g6_19 := int64(19 * g[6])
- g7_19 := int64(19 * g[7])
- g8_19 := int64(19 * g[8])
- g9_19 := int64(19 * g[9])
-
- h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19
- h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19
- h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19
- h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19
- h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19
- h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19
- h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19
- h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19
- h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19
- h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0
-
- FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
-}
-
-func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) {
- f0 := int64(f[0])
- f1 := int64(f[1])
- f2 := int64(f[2])
- f3 := int64(f[3])
- f4 := int64(f[4])
- f5 := int64(f[5])
- f6 := int64(f[6])
- f7 := int64(f[7])
- f8 := int64(f[8])
- f9 := int64(f[9])
- f0_2 := int64(2 * f[0])
- f1_2 := int64(2 * f[1])
- f2_2 := int64(2 * f[2])
- f3_2 := int64(2 * f[3])
- f4_2 := int64(2 * f[4])
- f5_2 := int64(2 * f[5])
- f6_2 := int64(2 * f[6])
- f7_2 := int64(2 * f[7])
- f5_38 := 38 * f5 // 1.31*2^30
- f6_19 := 19 * f6 // 1.31*2^30
- f7_38 := 38 * f7 // 1.31*2^30
- f8_19 := 19 * f8 // 1.31*2^30
- f9_38 := 38 * f9 // 1.31*2^30
-
- h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38
- h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19
- h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19
- h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38
- h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38
- h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19
- h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19
- h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38
- h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38
- h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5
-
- return
-}
-
-// FeSquare calculates h = f*f. Can overlap h with f.
-//
-// Preconditions:
-// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
-//
-// Postconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-func FeSquare(h, f *FieldElement) {
- h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f)
- FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
-}
-
-// FeSquare2 sets h = 2 * f * f
-//
-// Can overlap h with f.
-//
-// Preconditions:
-// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
-//
-// Postconditions:
-// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
-// See fe_mul.c for discussion of implementation strategy.
-func FeSquare2(h, f *FieldElement) {
- h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f)
-
- h0 += h0
- h1 += h1
- h2 += h2
- h3 += h3
- h4 += h4
- h5 += h5
- h6 += h6
- h7 += h7
- h8 += h8
- h9 += h9
-
- FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
-}
-
-func FeInvert(out, z *FieldElement) {
- var t0, t1, t2, t3 FieldElement
- var i int
-
- FeSquare(&t0, z) // 2^1
- FeSquare(&t1, &t0) // 2^2
- for i = 1; i < 2; i++ { // 2^3
- FeSquare(&t1, &t1)
- }
- FeMul(&t1, z, &t1) // 2^3 + 2^0
- FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0
- FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1
- FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0
- FeSquare(&t2, &t1) // 5,4,3,2,1
- for i = 1; i < 5; i++ { // 9,8,7,6,5
- FeSquare(&t2, &t2)
- }
- FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0
- FeSquare(&t2, &t1) // 10..1
- for i = 1; i < 10; i++ { // 19..10
- FeSquare(&t2, &t2)
- }
- FeMul(&t2, &t2, &t1) // 19..0
- FeSquare(&t3, &t2) // 20..1
- for i = 1; i < 20; i++ { // 39..20
- FeSquare(&t3, &t3)
- }
- FeMul(&t2, &t3, &t2) // 39..0
- FeSquare(&t2, &t2) // 40..1
- for i = 1; i < 10; i++ { // 49..10
- FeSquare(&t2, &t2)
- }
- FeMul(&t1, &t2, &t1) // 49..0
- FeSquare(&t2, &t1) // 50..1
- for i = 1; i < 50; i++ { // 99..50
- FeSquare(&t2, &t2)
- }
- FeMul(&t2, &t2, &t1) // 99..0
- FeSquare(&t3, &t2) // 100..1
- for i = 1; i < 100; i++ { // 199..100
- FeSquare(&t3, &t3)
- }
- FeMul(&t2, &t3, &t2) // 199..0
- FeSquare(&t2, &t2) // 200..1
- for i = 1; i < 50; i++ { // 249..50
- FeSquare(&t2, &t2)
- }
- FeMul(&t1, &t2, &t1) // 249..0
- FeSquare(&t1, &t1) // 250..1
- for i = 1; i < 5; i++ { // 254..5
- FeSquare(&t1, &t1)
- }
- FeMul(out, &t1, &t0) // 254..5,3,1,0
-}
-
-func fePow22523(out, z *FieldElement) {
- var t0, t1, t2 FieldElement
- var i int
-
- FeSquare(&t0, z)
- for i = 1; i < 1; i++ {
- FeSquare(&t0, &t0)
- }
- FeSquare(&t1, &t0)
- for i = 1; i < 2; i++ {
- FeSquare(&t1, &t1)
- }
- FeMul(&t1, z, &t1)
- FeMul(&t0, &t0, &t1)
- FeSquare(&t0, &t0)
- for i = 1; i < 1; i++ {
- FeSquare(&t0, &t0)
- }
- FeMul(&t0, &t1, &t0)
- FeSquare(&t1, &t0)
- for i = 1; i < 5; i++ {
- FeSquare(&t1, &t1)
- }
- FeMul(&t0, &t1, &t0)
- FeSquare(&t1, &t0)
- for i = 1; i < 10; i++ {
- FeSquare(&t1, &t1)
- }
- FeMul(&t1, &t1, &t0)
- FeSquare(&t2, &t1)
- for i = 1; i < 20; i++ {
- FeSquare(&t2, &t2)
- }
- FeMul(&t1, &t2, &t1)
- FeSquare(&t1, &t1)
- for i = 1; i < 10; i++ {
- FeSquare(&t1, &t1)
- }
- FeMul(&t0, &t1, &t0)
- FeSquare(&t1, &t0)
- for i = 1; i < 50; i++ {
- FeSquare(&t1, &t1)
- }
- FeMul(&t1, &t1, &t0)
- FeSquare(&t2, &t1)
- for i = 1; i < 100; i++ {
- FeSquare(&t2, &t2)
- }
- FeMul(&t1, &t2, &t1)
- FeSquare(&t1, &t1)
- for i = 1; i < 50; i++ {
- FeSquare(&t1, &t1)
- }
- FeMul(&t0, &t1, &t0)
- FeSquare(&t0, &t0)
- for i = 1; i < 2; i++ {
- FeSquare(&t0, &t0)
- }
- FeMul(out, &t0, z)
-}
-
-// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 *
-// y^2 where d = -121665/121666.
-//
-// Several representations are used:
-// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z
-// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT
-// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T
-// PreComputedGroupElement: (y+x,y-x,2dxy)
-
-type ProjectiveGroupElement struct {
- X, Y, Z FieldElement
-}
-
-type ExtendedGroupElement struct {
- X, Y, Z, T FieldElement
-}
-
-type CompletedGroupElement struct {
- X, Y, Z, T FieldElement
-}
-
-type PreComputedGroupElement struct {
- yPlusX, yMinusX, xy2d FieldElement
-}
-
-type CachedGroupElement struct {
- yPlusX, yMinusX, Z, T2d FieldElement
-}
-
-func (p *ProjectiveGroupElement) Zero() {
- FeZero(&p.X)
- FeOne(&p.Y)
- FeOne(&p.Z)
-}
-
-func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) {
- var t0 FieldElement
-
- FeSquare(&r.X, &p.X)
- FeSquare(&r.Z, &p.Y)
- FeSquare2(&r.T, &p.Z)
- FeAdd(&r.Y, &p.X, &p.Y)
- FeSquare(&t0, &r.Y)
- FeAdd(&r.Y, &r.Z, &r.X)
- FeSub(&r.Z, &r.Z, &r.X)
- FeSub(&r.X, &t0, &r.Y)
- FeSub(&r.T, &r.T, &r.Z)
-}
-
-func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) {
- var recip, x, y FieldElement
-
- FeInvert(&recip, &p.Z)
- FeMul(&x, &p.X, &recip)
- FeMul(&y, &p.Y, &recip)
- FeToBytes(s, &y)
- s[31] ^= FeIsNegative(&x) << 7
-}
-
-func (p *ExtendedGroupElement) Zero() {
- FeZero(&p.X)
- FeOne(&p.Y)
- FeOne(&p.Z)
- FeZero(&p.T)
-}
-
-func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) {
- var q ProjectiveGroupElement
- p.ToProjective(&q)
- q.Double(r)
-}
-
-func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) {
- FeAdd(&r.yPlusX, &p.Y, &p.X)
- FeSub(&r.yMinusX, &p.Y, &p.X)
- FeCopy(&r.Z, &p.Z)
- FeMul(&r.T2d, &p.T, &d2)
-}
-
-func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) {
- FeCopy(&r.X, &p.X)
- FeCopy(&r.Y, &p.Y)
- FeCopy(&r.Z, &p.Z)
-}
-
-func (p *ExtendedGroupElement) ToBytes(s *[32]byte) {
- var recip, x, y FieldElement
-
- FeInvert(&recip, &p.Z)
- FeMul(&x, &p.X, &recip)
- FeMul(&y, &p.Y, &recip)
- FeToBytes(s, &y)
- s[31] ^= FeIsNegative(&x) << 7
-}
-
-func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool {
- var u, v, v3, vxx, check FieldElement
-
- FeFromBytes(&p.Y, s)
- FeOne(&p.Z)
- FeSquare(&u, &p.Y)
- FeMul(&v, &u, &d)
- FeSub(&u, &u, &p.Z) // y = y^2-1
- FeAdd(&v, &v, &p.Z) // v = dy^2+1
-
- FeSquare(&v3, &v)
- FeMul(&v3, &v3, &v) // v3 = v^3
- FeSquare(&p.X, &v3)
- FeMul(&p.X, &p.X, &v)
- FeMul(&p.X, &p.X, &u) // x = uv^7
-
- fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8)
- FeMul(&p.X, &p.X, &v3)
- FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8)
-
- var tmpX, tmp2 [32]byte
-
- FeSquare(&vxx, &p.X)
- FeMul(&vxx, &vxx, &v)
- FeSub(&check, &vxx, &u) // vx^2-u
- if FeIsNonZero(&check) == 1 {
- FeAdd(&check, &vxx, &u) // vx^2+u
- if FeIsNonZero(&check) == 1 {
- return false
- }
- FeMul(&p.X, &p.X, &SqrtM1)
-
- FeToBytes(&tmpX, &p.X)
- for i, v := range tmpX {
- tmp2[31-i] = v
- }
- }
-
- if FeIsNegative(&p.X) != (s[31] >> 7) {
- FeNeg(&p.X, &p.X)
- }
-
- FeMul(&p.T, &p.X, &p.Y)
- return true
-}
-
-func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) {
- FeMul(&r.X, &p.X, &p.T)
- FeMul(&r.Y, &p.Y, &p.Z)
- FeMul(&r.Z, &p.Z, &p.T)
-}
-
-func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) {
- FeMul(&r.X, &p.X, &p.T)
- FeMul(&r.Y, &p.Y, &p.Z)
- FeMul(&r.Z, &p.Z, &p.T)
- FeMul(&r.T, &p.X, &p.Y)
-}
-
-func (p *PreComputedGroupElement) Zero() {
- FeOne(&p.yPlusX)
- FeOne(&p.yMinusX)
- FeZero(&p.xy2d)
-}
-
-func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) {
- var t0 FieldElement
-
- FeAdd(&r.X, &p.Y, &p.X)
- FeSub(&r.Y, &p.Y, &p.X)
- FeMul(&r.Z, &r.X, &q.yPlusX)
- FeMul(&r.Y, &r.Y, &q.yMinusX)
- FeMul(&r.T, &q.T2d, &p.T)
- FeMul(&r.X, &p.Z, &q.Z)
- FeAdd(&t0, &r.X, &r.X)
- FeSub(&r.X, &r.Z, &r.Y)
- FeAdd(&r.Y, &r.Z, &r.Y)
- FeAdd(&r.Z, &t0, &r.T)
- FeSub(&r.T, &t0, &r.T)
-}
-
-func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) {
- var t0 FieldElement
-
- FeAdd(&r.X, &p.Y, &p.X)
- FeSub(&r.Y, &p.Y, &p.X)
- FeMul(&r.Z, &r.X, &q.yMinusX)
- FeMul(&r.Y, &r.Y, &q.yPlusX)
- FeMul(&r.T, &q.T2d, &p.T)
- FeMul(&r.X, &p.Z, &q.Z)
- FeAdd(&t0, &r.X, &r.X)
- FeSub(&r.X, &r.Z, &r.Y)
- FeAdd(&r.Y, &r.Z, &r.Y)
- FeSub(&r.Z, &t0, &r.T)
- FeAdd(&r.T, &t0, &r.T)
-}
-
-func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) {
- var t0 FieldElement
-
- FeAdd(&r.X, &p.Y, &p.X)
- FeSub(&r.Y, &p.Y, &p.X)
- FeMul(&r.Z, &r.X, &q.yPlusX)
- FeMul(&r.Y, &r.Y, &q.yMinusX)
- FeMul(&r.T, &q.xy2d, &p.T)
- FeAdd(&t0, &p.Z, &p.Z)
- FeSub(&r.X, &r.Z, &r.Y)
- FeAdd(&r.Y, &r.Z, &r.Y)
- FeAdd(&r.Z, &t0, &r.T)
- FeSub(&r.T, &t0, &r.T)
-}
-
-func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) {
- var t0 FieldElement
-
- FeAdd(&r.X, &p.Y, &p.X)
- FeSub(&r.Y, &p.Y, &p.X)
- FeMul(&r.Z, &r.X, &q.yMinusX)
- FeMul(&r.Y, &r.Y, &q.yPlusX)
- FeMul(&r.T, &q.xy2d, &p.T)
- FeAdd(&t0, &p.Z, &p.Z)
- FeSub(&r.X, &r.Z, &r.Y)
- FeAdd(&r.Y, &r.Z, &r.Y)
- FeSub(&r.Z, &t0, &r.T)
- FeAdd(&r.T, &t0, &r.T)
-}
-
-func slide(r *[256]int8, a *[32]byte) {
- for i := range r {
- r[i] = int8(1 & (a[i>>3] >> uint(i&7)))
- }
-
- for i := range r {
- if r[i] != 0 {
- for b := 1; b <= 6 && i+b < 256; b++ {
- if r[i+b] != 0 {
- if r[i]+(r[i+b]<= -15 {
- r[i] -= r[i+b] << uint(b)
- for k := i + b; k < 256; k++ {
- if r[k] == 0 {
- r[k] = 1
- break
- }
- r[k] = 0
- }
- } else {
- break
- }
- }
- }
- }
- }
-}
-
-// GeDoubleScalarMultVartime sets r = a*A + b*B
-// where a = a[0]+256*a[1]+...+256^31 a[31].
-// and b = b[0]+256*b[1]+...+256^31 b[31].
-// B is the Ed25519 base point (x,4/5) with x positive.
-func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) {
- var aSlide, bSlide [256]int8
- var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A
- var t CompletedGroupElement
- var u, A2 ExtendedGroupElement
- var i int
-
- slide(&aSlide, a)
- slide(&bSlide, b)
-
- A.ToCached(&Ai[0])
- A.Double(&t)
- t.ToExtended(&A2)
-
- for i := 0; i < 7; i++ {
- geAdd(&t, &A2, &Ai[i])
- t.ToExtended(&u)
- u.ToCached(&Ai[i+1])
- }
-
- r.Zero()
-
- for i = 255; i >= 0; i-- {
- if aSlide[i] != 0 || bSlide[i] != 0 {
- break
- }
- }
-
- for ; i >= 0; i-- {
- r.Double(&t)
-
- if aSlide[i] > 0 {
- t.ToExtended(&u)
- geAdd(&t, &u, &Ai[aSlide[i]/2])
- } else if aSlide[i] < 0 {
- t.ToExtended(&u)
- geSub(&t, &u, &Ai[(-aSlide[i])/2])
- }
-
- if bSlide[i] > 0 {
- t.ToExtended(&u)
- geMixedAdd(&t, &u, &bi[bSlide[i]/2])
- } else if bSlide[i] < 0 {
- t.ToExtended(&u)
- geMixedSub(&t, &u, &bi[(-bSlide[i])/2])
- }
-
- t.ToProjective(r)
- }
-}
-
-// equal returns 1 if b == c and 0 otherwise, assuming that b and c are
-// non-negative.
-func equal(b, c int32) int32 {
- x := uint32(b ^ c)
- x--
- return int32(x >> 31)
-}
-
-// negative returns 1 if b < 0 and 0 otherwise.
-func negative(b int32) int32 {
- return (b >> 31) & 1
-}
-
-func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) {
- FeCMove(&t.yPlusX, &u.yPlusX, b)
- FeCMove(&t.yMinusX, &u.yMinusX, b)
- FeCMove(&t.xy2d, &u.xy2d, b)
-}
-
-func selectPoint(t *PreComputedGroupElement, pos int32, b int32) {
- var minusT PreComputedGroupElement
- bNegative := negative(b)
- bAbs := b - (((-bNegative) & b) << 1)
-
- t.Zero()
- for i := int32(0); i < 8; i++ {
- PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1))
- }
- FeCopy(&minusT.yPlusX, &t.yMinusX)
- FeCopy(&minusT.yMinusX, &t.yPlusX)
- FeNeg(&minusT.xy2d, &t.xy2d)
- PreComputedGroupElementCMove(t, &minusT, bNegative)
-}
-
-// GeScalarMultBase computes h = a*B, where
-// a = a[0]+256*a[1]+...+256^31 a[31]
-// B is the Ed25519 base point (x,4/5) with x positive.
-//
-// Preconditions:
-// a[31] <= 127
-func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) {
- var e [64]int8
-
- for i, v := range a {
- e[2*i] = int8(v & 15)
- e[2*i+1] = int8((v >> 4) & 15)
- }
-
- // each e[i] is between 0 and 15 and e[63] is between 0 and 7.
-
- carry := int8(0)
- for i := 0; i < 63; i++ {
- e[i] += carry
- carry = (e[i] + 8) >> 4
- e[i] -= carry << 4
- }
- e[63] += carry
- // each e[i] is between -8 and 8.
-
- h.Zero()
- var t PreComputedGroupElement
- var r CompletedGroupElement
- for i := int32(1); i < 64; i += 2 {
- selectPoint(&t, i/2, int32(e[i]))
- geMixedAdd(&r, h, &t)
- r.ToExtended(h)
- }
-
- var s ProjectiveGroupElement
-
- h.Double(&r)
- r.ToProjective(&s)
- s.Double(&r)
- r.ToProjective(&s)
- s.Double(&r)
- r.ToProjective(&s)
- s.Double(&r)
- r.ToExtended(h)
-
- for i := int32(0); i < 64; i += 2 {
- selectPoint(&t, i/2, int32(e[i]))
- geMixedAdd(&r, h, &t)
- r.ToExtended(h)
- }
-}
-
-// The scalars are GF(2^252 + 27742317777372353535851937790883648493).
-
-// Input:
-// a[0]+256*a[1]+...+256^31*a[31] = a
-// b[0]+256*b[1]+...+256^31*b[31] = b
-// c[0]+256*c[1]+...+256^31*c[31] = c
-//
-// Output:
-// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l
-// where l = 2^252 + 27742317777372353535851937790883648493.
-func ScMulAdd(s, a, b, c *[32]byte) {
- a0 := 2097151 & load3(a[:])
- a1 := 2097151 & (load4(a[2:]) >> 5)
- a2 := 2097151 & (load3(a[5:]) >> 2)
- a3 := 2097151 & (load4(a[7:]) >> 7)
- a4 := 2097151 & (load4(a[10:]) >> 4)
- a5 := 2097151 & (load3(a[13:]) >> 1)
- a6 := 2097151 & (load4(a[15:]) >> 6)
- a7 := 2097151 & (load3(a[18:]) >> 3)
- a8 := 2097151 & load3(a[21:])
- a9 := 2097151 & (load4(a[23:]) >> 5)
- a10 := 2097151 & (load3(a[26:]) >> 2)
- a11 := (load4(a[28:]) >> 7)
- b0 := 2097151 & load3(b[:])
- b1 := 2097151 & (load4(b[2:]) >> 5)
- b2 := 2097151 & (load3(b[5:]) >> 2)
- b3 := 2097151 & (load4(b[7:]) >> 7)
- b4 := 2097151 & (load4(b[10:]) >> 4)
- b5 := 2097151 & (load3(b[13:]) >> 1)
- b6 := 2097151 & (load4(b[15:]) >> 6)
- b7 := 2097151 & (load3(b[18:]) >> 3)
- b8 := 2097151 & load3(b[21:])
- b9 := 2097151 & (load4(b[23:]) >> 5)
- b10 := 2097151 & (load3(b[26:]) >> 2)
- b11 := (load4(b[28:]) >> 7)
- c0 := 2097151 & load3(c[:])
- c1 := 2097151 & (load4(c[2:]) >> 5)
- c2 := 2097151 & (load3(c[5:]) >> 2)
- c3 := 2097151 & (load4(c[7:]) >> 7)
- c4 := 2097151 & (load4(c[10:]) >> 4)
- c5 := 2097151 & (load3(c[13:]) >> 1)
- c6 := 2097151 & (load4(c[15:]) >> 6)
- c7 := 2097151 & (load3(c[18:]) >> 3)
- c8 := 2097151 & load3(c[21:])
- c9 := 2097151 & (load4(c[23:]) >> 5)
- c10 := 2097151 & (load3(c[26:]) >> 2)
- c11 := (load4(c[28:]) >> 7)
- var carry [23]int64
-
- s0 := c0 + a0*b0
- s1 := c1 + a0*b1 + a1*b0
- s2 := c2 + a0*b2 + a1*b1 + a2*b0
- s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0
- s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0
- s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0
- s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0
- s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0
- s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0
- s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0
- s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0
- s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0
- s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1
- s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2
- s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3
- s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4
- s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5
- s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6
- s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7
- s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8
- s20 := a9*b11 + a10*b10 + a11*b9
- s21 := a10*b11 + a11*b10
- s22 := a11 * b11
- s23 := int64(0)
-
- carry[0] = (s0 + (1 << 20)) >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[2] = (s2 + (1 << 20)) >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[4] = (s4 + (1 << 20)) >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[6] = (s6 + (1 << 20)) >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[8] = (s8 + (1 << 20)) >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[10] = (s10 + (1 << 20)) >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
- carry[12] = (s12 + (1 << 20)) >> 21
- s13 += carry[12]
- s12 -= carry[12] << 21
- carry[14] = (s14 + (1 << 20)) >> 21
- s15 += carry[14]
- s14 -= carry[14] << 21
- carry[16] = (s16 + (1 << 20)) >> 21
- s17 += carry[16]
- s16 -= carry[16] << 21
- carry[18] = (s18 + (1 << 20)) >> 21
- s19 += carry[18]
- s18 -= carry[18] << 21
- carry[20] = (s20 + (1 << 20)) >> 21
- s21 += carry[20]
- s20 -= carry[20] << 21
- carry[22] = (s22 + (1 << 20)) >> 21
- s23 += carry[22]
- s22 -= carry[22] << 21
-
- carry[1] = (s1 + (1 << 20)) >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[3] = (s3 + (1 << 20)) >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[5] = (s5 + (1 << 20)) >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[7] = (s7 + (1 << 20)) >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[9] = (s9 + (1 << 20)) >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[11] = (s11 + (1 << 20)) >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
- carry[13] = (s13 + (1 << 20)) >> 21
- s14 += carry[13]
- s13 -= carry[13] << 21
- carry[15] = (s15 + (1 << 20)) >> 21
- s16 += carry[15]
- s15 -= carry[15] << 21
- carry[17] = (s17 + (1 << 20)) >> 21
- s18 += carry[17]
- s17 -= carry[17] << 21
- carry[19] = (s19 + (1 << 20)) >> 21
- s20 += carry[19]
- s19 -= carry[19] << 21
- carry[21] = (s21 + (1 << 20)) >> 21
- s22 += carry[21]
- s21 -= carry[21] << 21
-
- s11 += s23 * 666643
- s12 += s23 * 470296
- s13 += s23 * 654183
- s14 -= s23 * 997805
- s15 += s23 * 136657
- s16 -= s23 * 683901
- s23 = 0
-
- s10 += s22 * 666643
- s11 += s22 * 470296
- s12 += s22 * 654183
- s13 -= s22 * 997805
- s14 += s22 * 136657
- s15 -= s22 * 683901
- s22 = 0
-
- s9 += s21 * 666643
- s10 += s21 * 470296
- s11 += s21 * 654183
- s12 -= s21 * 997805
- s13 += s21 * 136657
- s14 -= s21 * 683901
- s21 = 0
-
- s8 += s20 * 666643
- s9 += s20 * 470296
- s10 += s20 * 654183
- s11 -= s20 * 997805
- s12 += s20 * 136657
- s13 -= s20 * 683901
- s20 = 0
-
- s7 += s19 * 666643
- s8 += s19 * 470296
- s9 += s19 * 654183
- s10 -= s19 * 997805
- s11 += s19 * 136657
- s12 -= s19 * 683901
- s19 = 0
-
- s6 += s18 * 666643
- s7 += s18 * 470296
- s8 += s18 * 654183
- s9 -= s18 * 997805
- s10 += s18 * 136657
- s11 -= s18 * 683901
- s18 = 0
-
- carry[6] = (s6 + (1 << 20)) >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[8] = (s8 + (1 << 20)) >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[10] = (s10 + (1 << 20)) >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
- carry[12] = (s12 + (1 << 20)) >> 21
- s13 += carry[12]
- s12 -= carry[12] << 21
- carry[14] = (s14 + (1 << 20)) >> 21
- s15 += carry[14]
- s14 -= carry[14] << 21
- carry[16] = (s16 + (1 << 20)) >> 21
- s17 += carry[16]
- s16 -= carry[16] << 21
-
- carry[7] = (s7 + (1 << 20)) >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[9] = (s9 + (1 << 20)) >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[11] = (s11 + (1 << 20)) >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
- carry[13] = (s13 + (1 << 20)) >> 21
- s14 += carry[13]
- s13 -= carry[13] << 21
- carry[15] = (s15 + (1 << 20)) >> 21
- s16 += carry[15]
- s15 -= carry[15] << 21
-
- s5 += s17 * 666643
- s6 += s17 * 470296
- s7 += s17 * 654183
- s8 -= s17 * 997805
- s9 += s17 * 136657
- s10 -= s17 * 683901
- s17 = 0
-
- s4 += s16 * 666643
- s5 += s16 * 470296
- s6 += s16 * 654183
- s7 -= s16 * 997805
- s8 += s16 * 136657
- s9 -= s16 * 683901
- s16 = 0
-
- s3 += s15 * 666643
- s4 += s15 * 470296
- s5 += s15 * 654183
- s6 -= s15 * 997805
- s7 += s15 * 136657
- s8 -= s15 * 683901
- s15 = 0
-
- s2 += s14 * 666643
- s3 += s14 * 470296
- s4 += s14 * 654183
- s5 -= s14 * 997805
- s6 += s14 * 136657
- s7 -= s14 * 683901
- s14 = 0
-
- s1 += s13 * 666643
- s2 += s13 * 470296
- s3 += s13 * 654183
- s4 -= s13 * 997805
- s5 += s13 * 136657
- s6 -= s13 * 683901
- s13 = 0
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = (s0 + (1 << 20)) >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[2] = (s2 + (1 << 20)) >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[4] = (s4 + (1 << 20)) >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[6] = (s6 + (1 << 20)) >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[8] = (s8 + (1 << 20)) >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[10] = (s10 + (1 << 20)) >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
-
- carry[1] = (s1 + (1 << 20)) >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[3] = (s3 + (1 << 20)) >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[5] = (s5 + (1 << 20)) >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[7] = (s7 + (1 << 20)) >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[9] = (s9 + (1 << 20)) >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[11] = (s11 + (1 << 20)) >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = s0 >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[1] = s1 >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[2] = s2 >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[3] = s3 >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[4] = s4 >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[5] = s5 >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[6] = s6 >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[7] = s7 >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[8] = s8 >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[9] = s9 >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[10] = s10 >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
- carry[11] = s11 >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = s0 >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[1] = s1 >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[2] = s2 >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[3] = s3 >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[4] = s4 >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[5] = s5 >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[6] = s6 >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[7] = s7 >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[8] = s8 >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[9] = s9 >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[10] = s10 >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
-
- s[0] = byte(s0 >> 0)
- s[1] = byte(s0 >> 8)
- s[2] = byte((s0 >> 16) | (s1 << 5))
- s[3] = byte(s1 >> 3)
- s[4] = byte(s1 >> 11)
- s[5] = byte((s1 >> 19) | (s2 << 2))
- s[6] = byte(s2 >> 6)
- s[7] = byte((s2 >> 14) | (s3 << 7))
- s[8] = byte(s3 >> 1)
- s[9] = byte(s3 >> 9)
- s[10] = byte((s3 >> 17) | (s4 << 4))
- s[11] = byte(s4 >> 4)
- s[12] = byte(s4 >> 12)
- s[13] = byte((s4 >> 20) | (s5 << 1))
- s[14] = byte(s5 >> 7)
- s[15] = byte((s5 >> 15) | (s6 << 6))
- s[16] = byte(s6 >> 2)
- s[17] = byte(s6 >> 10)
- s[18] = byte((s6 >> 18) | (s7 << 3))
- s[19] = byte(s7 >> 5)
- s[20] = byte(s7 >> 13)
- s[21] = byte(s8 >> 0)
- s[22] = byte(s8 >> 8)
- s[23] = byte((s8 >> 16) | (s9 << 5))
- s[24] = byte(s9 >> 3)
- s[25] = byte(s9 >> 11)
- s[26] = byte((s9 >> 19) | (s10 << 2))
- s[27] = byte(s10 >> 6)
- s[28] = byte((s10 >> 14) | (s11 << 7))
- s[29] = byte(s11 >> 1)
- s[30] = byte(s11 >> 9)
- s[31] = byte(s11 >> 17)
-}
-
-// Input:
-// s[0]+256*s[1]+...+256^63*s[63] = s
-//
-// Output:
-// s[0]+256*s[1]+...+256^31*s[31] = s mod l
-// where l = 2^252 + 27742317777372353535851937790883648493.
-func ScReduce(out *[32]byte, s *[64]byte) {
- s0 := 2097151 & load3(s[:])
- s1 := 2097151 & (load4(s[2:]) >> 5)
- s2 := 2097151 & (load3(s[5:]) >> 2)
- s3 := 2097151 & (load4(s[7:]) >> 7)
- s4 := 2097151 & (load4(s[10:]) >> 4)
- s5 := 2097151 & (load3(s[13:]) >> 1)
- s6 := 2097151 & (load4(s[15:]) >> 6)
- s7 := 2097151 & (load3(s[18:]) >> 3)
- s8 := 2097151 & load3(s[21:])
- s9 := 2097151 & (load4(s[23:]) >> 5)
- s10 := 2097151 & (load3(s[26:]) >> 2)
- s11 := 2097151 & (load4(s[28:]) >> 7)
- s12 := 2097151 & (load4(s[31:]) >> 4)
- s13 := 2097151 & (load3(s[34:]) >> 1)
- s14 := 2097151 & (load4(s[36:]) >> 6)
- s15 := 2097151 & (load3(s[39:]) >> 3)
- s16 := 2097151 & load3(s[42:])
- s17 := 2097151 & (load4(s[44:]) >> 5)
- s18 := 2097151 & (load3(s[47:]) >> 2)
- s19 := 2097151 & (load4(s[49:]) >> 7)
- s20 := 2097151 & (load4(s[52:]) >> 4)
- s21 := 2097151 & (load3(s[55:]) >> 1)
- s22 := 2097151 & (load4(s[57:]) >> 6)
- s23 := (load4(s[60:]) >> 3)
-
- s11 += s23 * 666643
- s12 += s23 * 470296
- s13 += s23 * 654183
- s14 -= s23 * 997805
- s15 += s23 * 136657
- s16 -= s23 * 683901
- s23 = 0
-
- s10 += s22 * 666643
- s11 += s22 * 470296
- s12 += s22 * 654183
- s13 -= s22 * 997805
- s14 += s22 * 136657
- s15 -= s22 * 683901
- s22 = 0
-
- s9 += s21 * 666643
- s10 += s21 * 470296
- s11 += s21 * 654183
- s12 -= s21 * 997805
- s13 += s21 * 136657
- s14 -= s21 * 683901
- s21 = 0
-
- s8 += s20 * 666643
- s9 += s20 * 470296
- s10 += s20 * 654183
- s11 -= s20 * 997805
- s12 += s20 * 136657
- s13 -= s20 * 683901
- s20 = 0
-
- s7 += s19 * 666643
- s8 += s19 * 470296
- s9 += s19 * 654183
- s10 -= s19 * 997805
- s11 += s19 * 136657
- s12 -= s19 * 683901
- s19 = 0
-
- s6 += s18 * 666643
- s7 += s18 * 470296
- s8 += s18 * 654183
- s9 -= s18 * 997805
- s10 += s18 * 136657
- s11 -= s18 * 683901
- s18 = 0
-
- var carry [17]int64
-
- carry[6] = (s6 + (1 << 20)) >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[8] = (s8 + (1 << 20)) >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[10] = (s10 + (1 << 20)) >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
- carry[12] = (s12 + (1 << 20)) >> 21
- s13 += carry[12]
- s12 -= carry[12] << 21
- carry[14] = (s14 + (1 << 20)) >> 21
- s15 += carry[14]
- s14 -= carry[14] << 21
- carry[16] = (s16 + (1 << 20)) >> 21
- s17 += carry[16]
- s16 -= carry[16] << 21
-
- carry[7] = (s7 + (1 << 20)) >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[9] = (s9 + (1 << 20)) >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[11] = (s11 + (1 << 20)) >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
- carry[13] = (s13 + (1 << 20)) >> 21
- s14 += carry[13]
- s13 -= carry[13] << 21
- carry[15] = (s15 + (1 << 20)) >> 21
- s16 += carry[15]
- s15 -= carry[15] << 21
-
- s5 += s17 * 666643
- s6 += s17 * 470296
- s7 += s17 * 654183
- s8 -= s17 * 997805
- s9 += s17 * 136657
- s10 -= s17 * 683901
- s17 = 0
-
- s4 += s16 * 666643
- s5 += s16 * 470296
- s6 += s16 * 654183
- s7 -= s16 * 997805
- s8 += s16 * 136657
- s9 -= s16 * 683901
- s16 = 0
-
- s3 += s15 * 666643
- s4 += s15 * 470296
- s5 += s15 * 654183
- s6 -= s15 * 997805
- s7 += s15 * 136657
- s8 -= s15 * 683901
- s15 = 0
-
- s2 += s14 * 666643
- s3 += s14 * 470296
- s4 += s14 * 654183
- s5 -= s14 * 997805
- s6 += s14 * 136657
- s7 -= s14 * 683901
- s14 = 0
-
- s1 += s13 * 666643
- s2 += s13 * 470296
- s3 += s13 * 654183
- s4 -= s13 * 997805
- s5 += s13 * 136657
- s6 -= s13 * 683901
- s13 = 0
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = (s0 + (1 << 20)) >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[2] = (s2 + (1 << 20)) >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[4] = (s4 + (1 << 20)) >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[6] = (s6 + (1 << 20)) >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[8] = (s8 + (1 << 20)) >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[10] = (s10 + (1 << 20)) >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
-
- carry[1] = (s1 + (1 << 20)) >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[3] = (s3 + (1 << 20)) >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[5] = (s5 + (1 << 20)) >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[7] = (s7 + (1 << 20)) >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[9] = (s9 + (1 << 20)) >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[11] = (s11 + (1 << 20)) >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = s0 >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[1] = s1 >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[2] = s2 >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[3] = s3 >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[4] = s4 >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[5] = s5 >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[6] = s6 >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[7] = s7 >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[8] = s8 >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[9] = s9 >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[10] = s10 >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
- carry[11] = s11 >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = s0 >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[1] = s1 >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[2] = s2 >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[3] = s3 >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[4] = s4 >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[5] = s5 >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[6] = s6 >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[7] = s7 >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[8] = s8 >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[9] = s9 >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[10] = s10 >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
-
- out[0] = byte(s0 >> 0)
- out[1] = byte(s0 >> 8)
- out[2] = byte((s0 >> 16) | (s1 << 5))
- out[3] = byte(s1 >> 3)
- out[4] = byte(s1 >> 11)
- out[5] = byte((s1 >> 19) | (s2 << 2))
- out[6] = byte(s2 >> 6)
- out[7] = byte((s2 >> 14) | (s3 << 7))
- out[8] = byte(s3 >> 1)
- out[9] = byte(s3 >> 9)
- out[10] = byte((s3 >> 17) | (s4 << 4))
- out[11] = byte(s4 >> 4)
- out[12] = byte(s4 >> 12)
- out[13] = byte((s4 >> 20) | (s5 << 1))
- out[14] = byte(s5 >> 7)
- out[15] = byte((s5 >> 15) | (s6 << 6))
- out[16] = byte(s6 >> 2)
- out[17] = byte(s6 >> 10)
- out[18] = byte((s6 >> 18) | (s7 << 3))
- out[19] = byte(s7 >> 5)
- out[20] = byte(s7 >> 13)
- out[21] = byte(s8 >> 0)
- out[22] = byte(s8 >> 8)
- out[23] = byte((s8 >> 16) | (s9 << 5))
- out[24] = byte(s9 >> 3)
- out[25] = byte(s9 >> 11)
- out[26] = byte((s9 >> 19) | (s10 << 2))
- out[27] = byte(s10 >> 6)
- out[28] = byte((s10 >> 14) | (s11 << 7))
- out[29] = byte(s11 >> 1)
- out[30] = byte(s11 >> 9)
- out[31] = byte(s11 >> 17)
-}
-
-// order is the order of Curve25519 in little-endian form.
-var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000}
-
-// ScMinimal returns true if the given scalar is less than the order of the
-// curve.
-func ScMinimal(scalar *[32]byte) bool {
- for i := 3; ; i-- {
- v := binary.LittleEndian.Uint64(scalar[i*8:])
- if v > order[i] {
- return false
- } else if v < order[i] {
- break
- } else if i == 0 {
- return false
- }
- }
-
- return true
-}
diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/alias/alias.go
similarity index 84%
rename from vendor/golang.org/x/crypto/internal/subtle/aliasing.go
rename to vendor/golang.org/x/crypto/internal/alias/alias.go
index 4fad24f8dcd..69c17f822b9 100644
--- a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go
+++ b/vendor/golang.org/x/crypto/internal/alias/alias.go
@@ -5,9 +5,8 @@
//go:build !purego
// +build !purego
-// Package subtle implements functions that are often useful in cryptographic
-// code but require careful thought to use correctly.
-package subtle // import "golang.org/x/crypto/internal/subtle"
+// Package alias implements memory aliasing tests.
+package alias
import "unsafe"
diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go
similarity index 86%
rename from vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go
rename to vendor/golang.org/x/crypto/internal/alias/alias_purego.go
index 80ccbed2c0d..4775b0a4384 100644
--- a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go
+++ b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go
@@ -5,9 +5,8 @@
//go:build purego
// +build purego
-// Package subtle implements functions that are often useful in cryptographic
-// code but require careful thought to use correctly.
-package subtle // import "golang.org/x/crypto/internal/subtle"
+// Package alias implements memory aliasing tests.
+package alias
// This is the Google App Engine standard variant based on reflect
// because the unsafe package and cgo are disallowed.
diff --git a/vendor/golang.org/x/crypto/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
similarity index 100%
rename from vendor/golang.org/x/crypto/poly1305/bits_compat.go
rename to vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
diff --git a/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
similarity index 100%
rename from vendor/golang.org/x/crypto/poly1305/bits_go1.13.go
rename to vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
similarity index 100%
rename from vendor/golang.org/x/crypto/poly1305/mac_noasm.go
rename to vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go
similarity index 98%
rename from vendor/golang.org/x/crypto/poly1305/poly1305.go
rename to vendor/golang.org/x/crypto/internal/poly1305/poly1305.go
index 9d7a6af09fe..4aaea810a26 100644
--- a/vendor/golang.org/x/crypto/poly1305/poly1305.go
+++ b/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go
@@ -15,7 +15,7 @@
// used with a fixed key in order to generate one-time keys from an nonce.
// However, in this package AES isn't used and the one-time key is specified
// directly.
-package poly1305 // import "golang.org/x/crypto/poly1305"
+package poly1305
import "crypto/subtle"
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
similarity index 100%
rename from vendor/golang.org/x/crypto/poly1305/sum_amd64.go
rename to vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
similarity index 100%
rename from vendor/golang.org/x/crypto/poly1305/sum_amd64.s
rename to vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
similarity index 99%
rename from vendor/golang.org/x/crypto/poly1305/sum_generic.go
rename to vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
index c942a65904f..e041da5ea3e 100644
--- a/vendor/golang.org/x/crypto/poly1305/sum_generic.go
+++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
@@ -136,7 +136,7 @@ func shiftRightBy2(a uint128) uint128 {
// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of
// 128 bits of message, it computes
//
-// h₊ = (h + m) * r mod 2¹³⁰ - 5
+// h₊ = (h + m) * r mod 2¹³⁰ - 5
//
// If the msg length is not a multiple of TagSize, it assumes the last
// incomplete chunk is the final one.
@@ -278,8 +278,7 @@ const (
// finalize completes the modular reduction of h and computes
//
-// out = h + s mod 2¹²⁸
-//
+// out = h + s mod 2¹²⁸
func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
h0, h1, h2 := h[0], h[1], h[2]
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
similarity index 100%
rename from vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go
rename to vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
similarity index 100%
rename from vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s
rename to vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
similarity index 99%
rename from vendor/golang.org/x/crypto/poly1305/sum_s390x.go
rename to vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
index 62cc9f84709..ec959668896 100644
--- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go
+++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
@@ -14,6 +14,7 @@ import (
// updateVX is an assembly implementation of Poly1305 that uses vector
// instructions. It must only be called if the vector facility (vx) is
// available.
+//
//go:noescape
func updateVX(state *macState, msg []byte)
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
similarity index 99%
rename from vendor/golang.org/x/crypto/poly1305/sum_s390x.s
rename to vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
index 69c64f84217..aa9e0494c90 100644
--- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s
+++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
@@ -18,7 +18,7 @@
// value. These limbs are, for the most part, zero extended and
// placed into 64-bit vector register elements. Each vector
// register is 128-bits wide and so holds 2 of these elements.
-// Using 26-bit limbs allows us plenty of headroom to accomodate
+// Using 26-bit limbs allows us plenty of headroom to accommodate
// accumulations before and after multiplication without
// overflowing either 32-bits (before multiplication) or 64-bits
// (after multiplication).
diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
new file mode 100644
index 00000000000..f3c3242a047
--- /dev/null
+++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
@@ -0,0 +1,173 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package secretbox encrypts and authenticates small messages.
+
+Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with
+secret-key cryptography. The length of messages is not hidden.
+
+It is the caller's responsibility to ensure the uniqueness of nonces—for
+example, by using nonce 1 for the first message, nonce 2 for the second
+message, etc. Nonces are long enough that randomly generated nonces have
+negligible risk of collision.
+
+Messages should be small because:
+
+1. The whole message needs to be held in memory to be processed.
+
+2. Using large messages pressures implementations on small machines to decrypt
+and process plaintext before authenticating it. This is very dangerous, and
+this API does not allow it, but a protocol that uses excessive message sizes
+might present some implementations with no other choice.
+
+3. Fixed overheads will be sufficiently amortised by messages as small as 8KB.
+
+4. Performance may be improved by working with messages that fit into data caches.
+
+Thus large amounts of data should be chunked so that each message is small.
+(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable
+chunk size.
+
+This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html.
+*/
+package secretbox // import "golang.org/x/crypto/nacl/secretbox"
+
+import (
+ "golang.org/x/crypto/internal/alias"
+ "golang.org/x/crypto/internal/poly1305"
+ "golang.org/x/crypto/salsa20/salsa"
+)
+
+// Overhead is the number of bytes of overhead when boxing a message.
+const Overhead = poly1305.TagSize
+
+// setup produces a sub-key and Salsa20 counter given a nonce and key.
+func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) {
+ // We use XSalsa20 for encryption so first we need to generate a
+ // key and nonce with HSalsa20.
+ var hNonce [16]byte
+ copy(hNonce[:], nonce[:])
+ salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma)
+
+ // The final 8 bytes of the original nonce form the new nonce.
+ copy(counter[:], nonce[16:])
+}
+
+// sliceForAppend takes a slice and a requested number of bytes. It returns a
+// slice with the contents of the given slice followed by that many bytes and a
+// second slice that aliases into it and contains only the extra bytes. If the
+// original slice has sufficient capacity then no allocation is performed.
+func sliceForAppend(in []byte, n int) (head, tail []byte) {
+ if total := len(in) + n; cap(in) >= total {
+ head = in[:total]
+ } else {
+ head = make([]byte, total)
+ copy(head, in)
+ }
+ tail = head[len(in):]
+ return
+}
+
+// Seal appends an encrypted and authenticated copy of message to out, which
+// must not overlap message. The key and nonce pair must be unique for each
+// distinct message and the output will be Overhead bytes longer than message.
+func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte {
+ var subKey [32]byte
+ var counter [16]byte
+ setup(&subKey, &counter, nonce, key)
+
+ // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
+ // Salsa20 works with 64-byte blocks, we also generate 32 bytes of
+ // keystream as a side effect.
+ var firstBlock [64]byte
+ salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
+
+ var poly1305Key [32]byte
+ copy(poly1305Key[:], firstBlock[:])
+
+ ret, out := sliceForAppend(out, len(message)+poly1305.TagSize)
+ if alias.AnyOverlap(out, message) {
+ panic("nacl: invalid buffer overlap")
+ }
+
+ // We XOR up to 32 bytes of message with the keystream generated from
+ // the first block.
+ firstMessageBlock := message
+ if len(firstMessageBlock) > 32 {
+ firstMessageBlock = firstMessageBlock[:32]
+ }
+
+ tagOut := out
+ out = out[poly1305.TagSize:]
+ for i, x := range firstMessageBlock {
+ out[i] = firstBlock[32+i] ^ x
+ }
+ message = message[len(firstMessageBlock):]
+ ciphertext := out
+ out = out[len(firstMessageBlock):]
+
+ // Now encrypt the rest.
+ counter[8] = 1
+ salsa.XORKeyStream(out, message, &counter, &subKey)
+
+ var tag [poly1305.TagSize]byte
+ poly1305.Sum(&tag, ciphertext, &poly1305Key)
+ copy(tagOut, tag[:])
+
+ return ret
+}
+
+// Open authenticates and decrypts a box produced by Seal and appends the
+// message to out, which must not overlap box. The output will be Overhead
+// bytes smaller than box.
+func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) {
+ if len(box) < Overhead {
+ return nil, false
+ }
+
+ var subKey [32]byte
+ var counter [16]byte
+ setup(&subKey, &counter, nonce, key)
+
+ // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
+ // Salsa20 works with 64-byte blocks, we also generate 32 bytes of
+ // keystream as a side effect.
+ var firstBlock [64]byte
+ salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
+
+ var poly1305Key [32]byte
+ copy(poly1305Key[:], firstBlock[:])
+ var tag [poly1305.TagSize]byte
+ copy(tag[:], box)
+
+ if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) {
+ return nil, false
+ }
+
+ ret, out := sliceForAppend(out, len(box)-Overhead)
+ if alias.AnyOverlap(out, box) {
+ panic("nacl: invalid buffer overlap")
+ }
+
+ // We XOR up to 32 bytes of box with the keystream generated from
+ // the first block.
+ box = box[Overhead:]
+ firstMessageBlock := box
+ if len(firstMessageBlock) > 32 {
+ firstMessageBlock = firstMessageBlock[:32]
+ }
+ for i, x := range firstMessageBlock {
+ out[i] = firstBlock[32+i] ^ x
+ }
+
+ box = box[len(firstMessageBlock):]
+ out = out[len(firstMessageBlock):]
+
+ // Now decrypt the rest.
+ counter[8] = 1
+ salsa.XORKeyStream(out, box, &counter, &subKey)
+
+ return ret, true
+}
diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go
new file mode 100644
index 00000000000..4269ed113be
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go
@@ -0,0 +1,792 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses
+// are signed messages attesting to the validity of a certificate for a small
+// period of time. This is used to manage revocation for X.509 certificates.
+package ocsp // import "golang.org/x/crypto/ocsp"
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ _ "crypto/sha1"
+ _ "crypto/sha256"
+ _ "crypto/sha512"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+ "math/big"
+ "strconv"
+ "time"
+)
+
+var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1})
+
+// ResponseStatus contains the result of an OCSP request. See
+// https://tools.ietf.org/html/rfc6960#section-2.3
+type ResponseStatus int
+
+const (
+ Success ResponseStatus = 0
+ Malformed ResponseStatus = 1
+ InternalError ResponseStatus = 2
+ TryLater ResponseStatus = 3
+ // Status code four is unused in OCSP. See
+ // https://tools.ietf.org/html/rfc6960#section-4.2.1
+ SignatureRequired ResponseStatus = 5
+ Unauthorized ResponseStatus = 6
+)
+
+func (r ResponseStatus) String() string {
+ switch r {
+ case Success:
+ return "success"
+ case Malformed:
+ return "malformed"
+ case InternalError:
+ return "internal error"
+ case TryLater:
+ return "try later"
+ case SignatureRequired:
+ return "signature required"
+ case Unauthorized:
+ return "unauthorized"
+ default:
+ return "unknown OCSP status: " + strconv.Itoa(int(r))
+ }
+}
+
+// ResponseError is an error that may be returned by ParseResponse to indicate
+// that the response itself is an error, not just that it's indicating that a
+// certificate is revoked, unknown, etc.
+type ResponseError struct {
+ Status ResponseStatus
+}
+
+func (r ResponseError) Error() string {
+ return "ocsp: error from server: " + r.Status.String()
+}
+
+// These are internal structures that reflect the ASN.1 structure of an OCSP
+// response. See RFC 2560, section 4.2.
+
+type certID struct {
+ HashAlgorithm pkix.AlgorithmIdentifier
+ NameHash []byte
+ IssuerKeyHash []byte
+ SerialNumber *big.Int
+}
+
+// https://tools.ietf.org/html/rfc2560#section-4.1.1
+type ocspRequest struct {
+ TBSRequest tbsRequest
+}
+
+type tbsRequest struct {
+ Version int `asn1:"explicit,tag:0,default:0,optional"`
+ RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"`
+ RequestList []request
+}
+
+type request struct {
+ Cert certID
+}
+
+type responseASN1 struct {
+ Status asn1.Enumerated
+ Response responseBytes `asn1:"explicit,tag:0,optional"`
+}
+
+type responseBytes struct {
+ ResponseType asn1.ObjectIdentifier
+ Response []byte
+}
+
+type basicResponse struct {
+ TBSResponseData responseData
+ SignatureAlgorithm pkix.AlgorithmIdentifier
+ Signature asn1.BitString
+ Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"`
+}
+
+type responseData struct {
+ Raw asn1.RawContent
+ Version int `asn1:"optional,default:0,explicit,tag:0"`
+ RawResponderID asn1.RawValue
+ ProducedAt time.Time `asn1:"generalized"`
+ Responses []singleResponse
+}
+
+type singleResponse struct {
+ CertID certID
+ Good asn1.Flag `asn1:"tag:0,optional"`
+ Revoked revokedInfo `asn1:"tag:1,optional"`
+ Unknown asn1.Flag `asn1:"tag:2,optional"`
+ ThisUpdate time.Time `asn1:"generalized"`
+ NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"`
+ SingleExtensions []pkix.Extension `asn1:"explicit,tag:1,optional"`
+}
+
+type revokedInfo struct {
+ RevocationTime time.Time `asn1:"generalized"`
+ Reason asn1.Enumerated `asn1:"explicit,tag:0,optional"`
+}
+
+var (
+ oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
+ oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
+ oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
+ oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
+ oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
+ oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
+ oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
+ oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2}
+ oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
+ oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
+ oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
+ oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
+)
+
+var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{
+ crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}),
+ crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}),
+ crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}),
+ crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}),
+}
+
+// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below
+var signatureAlgorithmDetails = []struct {
+ algo x509.SignatureAlgorithm
+ oid asn1.ObjectIdentifier
+ pubKeyAlgo x509.PublicKeyAlgorithm
+ hash crypto.Hash
+}{
+ {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */},
+ {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5},
+ {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1},
+ {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256},
+ {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384},
+ {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512},
+ {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1},
+ {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256},
+ {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1},
+ {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256},
+ {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384},
+ {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512},
+}
+
+// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below
+func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) {
+ var pubType x509.PublicKeyAlgorithm
+
+ switch pub := pub.(type) {
+ case *rsa.PublicKey:
+ pubType = x509.RSA
+ hashFunc = crypto.SHA256
+ sigAlgo.Algorithm = oidSignatureSHA256WithRSA
+ sigAlgo.Parameters = asn1.RawValue{
+ Tag: 5,
+ }
+
+ case *ecdsa.PublicKey:
+ pubType = x509.ECDSA
+
+ switch pub.Curve {
+ case elliptic.P224(), elliptic.P256():
+ hashFunc = crypto.SHA256
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA256
+ case elliptic.P384():
+ hashFunc = crypto.SHA384
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA384
+ case elliptic.P521():
+ hashFunc = crypto.SHA512
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA512
+ default:
+ err = errors.New("x509: unknown elliptic curve")
+ }
+
+ default:
+ err = errors.New("x509: only RSA and ECDSA keys supported")
+ }
+
+ if err != nil {
+ return
+ }
+
+ if requestedSigAlgo == 0 {
+ return
+ }
+
+ found := false
+ for _, details := range signatureAlgorithmDetails {
+ if details.algo == requestedSigAlgo {
+ if details.pubKeyAlgo != pubType {
+ err = errors.New("x509: requested SignatureAlgorithm does not match private key type")
+ return
+ }
+ sigAlgo.Algorithm, hashFunc = details.oid, details.hash
+ if hashFunc == 0 {
+ err = errors.New("x509: cannot sign with hash function requested")
+ return
+ }
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ err = errors.New("x509: unknown SignatureAlgorithm")
+ }
+
+ return
+}
+
+// TODO(agl): this is taken from crypto/x509 and so should probably be exported
+// from crypto/x509 or crypto/x509/pkix.
+func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm {
+ for _, details := range signatureAlgorithmDetails {
+ if oid.Equal(details.oid) {
+ return details.algo
+ }
+ }
+ return x509.UnknownSignatureAlgorithm
+}
+
+// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form.
+func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash {
+ for hash, oid := range hashOIDs {
+ if oid.Equal(target) {
+ return hash
+ }
+ }
+ return crypto.Hash(0)
+}
+
+func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier {
+ for hash, oid := range hashOIDs {
+ if hash == target {
+ return oid
+ }
+ }
+ return nil
+}
+
+// This is the exposed reflection of the internal OCSP structures.
+
+// The status values that can be expressed in OCSP. See RFC 6960.
+const (
+ // Good means that the certificate is valid.
+ Good = iota
+ // Revoked means that the certificate has been deliberately revoked.
+ Revoked
+ // Unknown means that the OCSP responder doesn't know about the certificate.
+ Unknown
+ // ServerFailed is unused and was never used (see
+ // https://go-review.googlesource.com/#/c/18944). ParseResponse will
+ // return a ResponseError when an error response is parsed.
+ ServerFailed
+)
+
+// The enumerated reasons for revoking a certificate. See RFC 5280.
+const (
+ Unspecified = 0
+ KeyCompromise = 1
+ CACompromise = 2
+ AffiliationChanged = 3
+ Superseded = 4
+ CessationOfOperation = 5
+ CertificateHold = 6
+
+ RemoveFromCRL = 8
+ PrivilegeWithdrawn = 9
+ AACompromise = 10
+)
+
+// Request represents an OCSP request. See RFC 6960.
+type Request struct {
+ HashAlgorithm crypto.Hash
+ IssuerNameHash []byte
+ IssuerKeyHash []byte
+ SerialNumber *big.Int
+}
+
+// Marshal marshals the OCSP request to ASN.1 DER encoded form.
+func (req *Request) Marshal() ([]byte, error) {
+ hashAlg := getOIDFromHashAlgorithm(req.HashAlgorithm)
+ if hashAlg == nil {
+ return nil, errors.New("Unknown hash algorithm")
+ }
+ return asn1.Marshal(ocspRequest{
+ tbsRequest{
+ Version: 0,
+ RequestList: []request{
+ {
+ Cert: certID{
+ pkix.AlgorithmIdentifier{
+ Algorithm: hashAlg,
+ Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */},
+ },
+ req.IssuerNameHash,
+ req.IssuerKeyHash,
+ req.SerialNumber,
+ },
+ },
+ },
+ },
+ })
+}
+
+// Response represents an OCSP response containing a single SingleResponse. See
+// RFC 6960.
+type Response struct {
+ Raw []byte
+
+ // Status is one of {Good, Revoked, Unknown}
+ Status int
+ SerialNumber *big.Int
+ ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time
+ RevocationReason int
+ Certificate *x509.Certificate
+ // TBSResponseData contains the raw bytes of the signed response. If
+ // Certificate is nil then this can be used to verify Signature.
+ TBSResponseData []byte
+ Signature []byte
+ SignatureAlgorithm x509.SignatureAlgorithm
+
+ // IssuerHash is the hash used to compute the IssuerNameHash and IssuerKeyHash.
+ // Valid values are crypto.SHA1, crypto.SHA256, crypto.SHA384, and crypto.SHA512.
+ // If zero, the default is crypto.SHA1.
+ IssuerHash crypto.Hash
+
+ // RawResponderName optionally contains the DER-encoded subject of the
+ // responder certificate. Exactly one of RawResponderName and
+ // ResponderKeyHash is set.
+ RawResponderName []byte
+ // ResponderKeyHash optionally contains the SHA-1 hash of the
+ // responder's public key. Exactly one of RawResponderName and
+ // ResponderKeyHash is set.
+ ResponderKeyHash []byte
+
+ // Extensions contains raw X.509 extensions from the singleExtensions field
+ // of the OCSP response. When parsing certificates, this can be used to
+ // extract non-critical extensions that are not parsed by this package. When
+ // marshaling OCSP responses, the Extensions field is ignored, see
+ // ExtraExtensions.
+ Extensions []pkix.Extension
+
+ // ExtraExtensions contains extensions to be copied, raw, into any marshaled
+ // OCSP response (in the singleExtensions field). Values override any
+ // extensions that would otherwise be produced based on the other fields. The
+ // ExtraExtensions field is not populated when parsing certificates, see
+ // Extensions.
+ ExtraExtensions []pkix.Extension
+}
+
+// These are pre-serialized error responses for the various non-success codes
+// defined by OCSP. The Unauthorized code in particular can be used by an OCSP
+// responder that supports only pre-signed responses as a response to requests
+// for certificates with unknown status. See RFC 5019.
+var (
+ MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01}
+ InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02}
+ TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03}
+ SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05}
+ UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06}
+)
+
+// CheckSignatureFrom checks that the signature in resp is a valid signature
+// from issuer. This should only be used if resp.Certificate is nil. Otherwise,
+// the OCSP response contained an intermediate certificate that created the
+// signature. That signature is checked by ParseResponse and only
+// resp.Certificate remains to be validated.
+func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error {
+ return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature)
+}
+
+// ParseError results from an invalid OCSP response.
+type ParseError string
+
+func (p ParseError) Error() string {
+ return string(p)
+}
+
+// ParseRequest parses an OCSP request in DER form. It only supports
+// requests for a single certificate. Signed requests are not supported.
+// If a request includes a signature, it will result in a ParseError.
+func ParseRequest(bytes []byte) (*Request, error) {
+ var req ocspRequest
+ rest, err := asn1.Unmarshal(bytes, &req)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) > 0 {
+ return nil, ParseError("trailing data in OCSP request")
+ }
+
+ if len(req.TBSRequest.RequestList) == 0 {
+ return nil, ParseError("OCSP request contains no request body")
+ }
+ innerRequest := req.TBSRequest.RequestList[0]
+
+ hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm)
+ if hashFunc == crypto.Hash(0) {
+ return nil, ParseError("OCSP request uses unknown hash function")
+ }
+
+ return &Request{
+ HashAlgorithm: hashFunc,
+ IssuerNameHash: innerRequest.Cert.NameHash,
+ IssuerKeyHash: innerRequest.Cert.IssuerKeyHash,
+ SerialNumber: innerRequest.Cert.SerialNumber,
+ }, nil
+}
+
+// ParseResponse parses an OCSP response in DER form. The response must contain
+// only one certificate status. To parse the status of a specific certificate
+// from a response which may contain multiple statuses, use ParseResponseForCert
+// instead.
+//
+// If the response contains an embedded certificate, then that certificate will
+// be used to verify the response signature. If the response contains an
+// embedded certificate and issuer is not nil, then issuer will be used to verify
+// the signature on the embedded certificate.
+//
+// If the response does not contain an embedded certificate and issuer is not
+// nil, then issuer will be used to verify the response signature.
+//
+// Invalid responses and parse failures will result in a ParseError.
+// Error responses will result in a ResponseError.
+func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) {
+ return ParseResponseForCert(bytes, nil, issuer)
+}
+
+// ParseResponseForCert acts identically to ParseResponse, except it supports
+// parsing responses that contain multiple statuses. If the response contains
+// multiple statuses and cert is not nil, then ParseResponseForCert will return
+// the first status which contains a matching serial, otherwise it will return an
+// error. If cert is nil, then the first status in the response will be returned.
+func ParseResponseForCert(bytes []byte, cert, issuer *x509.Certificate) (*Response, error) {
+ var resp responseASN1
+ rest, err := asn1.Unmarshal(bytes, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) > 0 {
+ return nil, ParseError("trailing data in OCSP response")
+ }
+
+ if status := ResponseStatus(resp.Status); status != Success {
+ return nil, ResponseError{status}
+ }
+
+ if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) {
+ return nil, ParseError("bad OCSP response type")
+ }
+
+ var basicResp basicResponse
+ rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) > 0 {
+ return nil, ParseError("trailing data in OCSP response")
+ }
+
+ if n := len(basicResp.TBSResponseData.Responses); n == 0 || cert == nil && n > 1 {
+ return nil, ParseError("OCSP response contains bad number of responses")
+ }
+
+ var singleResp singleResponse
+ if cert == nil {
+ singleResp = basicResp.TBSResponseData.Responses[0]
+ } else {
+ match := false
+ for _, resp := range basicResp.TBSResponseData.Responses {
+ if cert.SerialNumber.Cmp(resp.CertID.SerialNumber) == 0 {
+ singleResp = resp
+ match = true
+ break
+ }
+ }
+ if !match {
+ return nil, ParseError("no response matching the supplied certificate")
+ }
+ }
+
+ ret := &Response{
+ Raw: bytes,
+ TBSResponseData: basicResp.TBSResponseData.Raw,
+ Signature: basicResp.Signature.RightAlign(),
+ SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm),
+ Extensions: singleResp.SingleExtensions,
+ SerialNumber: singleResp.CertID.SerialNumber,
+ ProducedAt: basicResp.TBSResponseData.ProducedAt,
+ ThisUpdate: singleResp.ThisUpdate,
+ NextUpdate: singleResp.NextUpdate,
+ }
+
+ // Handle the ResponderID CHOICE tag. ResponderID can be flattened into
+ // TBSResponseData once https://go-review.googlesource.com/34503 has been
+ // released.
+ rawResponderID := basicResp.TBSResponseData.RawResponderID
+ switch rawResponderID.Tag {
+ case 1: // Name
+ var rdn pkix.RDNSequence
+ if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &rdn); err != nil || len(rest) != 0 {
+ return nil, ParseError("invalid responder name")
+ }
+ ret.RawResponderName = rawResponderID.Bytes
+ case 2: // KeyHash
+ if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &ret.ResponderKeyHash); err != nil || len(rest) != 0 {
+ return nil, ParseError("invalid responder key hash")
+ }
+ default:
+ return nil, ParseError("invalid responder id tag")
+ }
+
+ if len(basicResp.Certificates) > 0 {
+ // Responders should only send a single certificate (if they
+ // send any) that connects the responder's certificate to the
+ // original issuer. We accept responses with multiple
+ // certificates due to a number responders sending them[1], but
+ // ignore all but the first.
+ //
+ // [1] https://github.com/golang/go/issues/21527
+ ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := ret.CheckSignatureFrom(ret.Certificate); err != nil {
+ return nil, ParseError("bad signature on embedded certificate: " + err.Error())
+ }
+
+ if issuer != nil {
+ if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil {
+ return nil, ParseError("bad OCSP signature: " + err.Error())
+ }
+ }
+ } else if issuer != nil {
+ if err := ret.CheckSignatureFrom(issuer); err != nil {
+ return nil, ParseError("bad OCSP signature: " + err.Error())
+ }
+ }
+
+ for _, ext := range singleResp.SingleExtensions {
+ if ext.Critical {
+ return nil, ParseError("unsupported critical extension")
+ }
+ }
+
+ for h, oid := range hashOIDs {
+ if singleResp.CertID.HashAlgorithm.Algorithm.Equal(oid) {
+ ret.IssuerHash = h
+ break
+ }
+ }
+ if ret.IssuerHash == 0 {
+ return nil, ParseError("unsupported issuer hash algorithm")
+ }
+
+ switch {
+ case bool(singleResp.Good):
+ ret.Status = Good
+ case bool(singleResp.Unknown):
+ ret.Status = Unknown
+ default:
+ ret.Status = Revoked
+ ret.RevokedAt = singleResp.Revoked.RevocationTime
+ ret.RevocationReason = int(singleResp.Revoked.Reason)
+ }
+
+ return ret, nil
+}
+
+// RequestOptions contains options for constructing OCSP requests.
+type RequestOptions struct {
+ // Hash contains the hash function that should be used when
+ // constructing the OCSP request. If zero, SHA-1 will be used.
+ Hash crypto.Hash
+}
+
+func (opts *RequestOptions) hash() crypto.Hash {
+ if opts == nil || opts.Hash == 0 {
+ // SHA-1 is nearly universally used in OCSP.
+ return crypto.SHA1
+ }
+ return opts.Hash
+}
+
+// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If
+// opts is nil then sensible defaults are used.
+func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) {
+ hashFunc := opts.hash()
+
+ // OCSP seems to be the only place where these raw hash identifiers are
+ // used. I took the following from
+ // http://msdn.microsoft.com/en-us/library/ff635603.aspx
+ _, ok := hashOIDs[hashFunc]
+ if !ok {
+ return nil, x509.ErrUnsupportedAlgorithm
+ }
+
+ if !hashFunc.Available() {
+ return nil, x509.ErrUnsupportedAlgorithm
+ }
+ h := opts.hash().New()
+
+ var publicKeyInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ PublicKey asn1.BitString
+ }
+ if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil {
+ return nil, err
+ }
+
+ h.Write(publicKeyInfo.PublicKey.RightAlign())
+ issuerKeyHash := h.Sum(nil)
+
+ h.Reset()
+ h.Write(issuer.RawSubject)
+ issuerNameHash := h.Sum(nil)
+
+ req := &Request{
+ HashAlgorithm: hashFunc,
+ IssuerNameHash: issuerNameHash,
+ IssuerKeyHash: issuerKeyHash,
+ SerialNumber: cert.SerialNumber,
+ }
+ return req.Marshal()
+}
+
+// CreateResponse returns a DER-encoded OCSP response with the specified contents.
+// The fields in the response are populated as follows:
+//
+// The responder cert is used to populate the responder's name field, and the
+// certificate itself is provided alongside the OCSP response signature.
+//
+// The issuer cert is used to populate the IssuerNameHash and IssuerKeyHash fields.
+//
+// The template is used to populate the SerialNumber, Status, RevokedAt,
+// RevocationReason, ThisUpdate, and NextUpdate fields.
+//
+// If template.IssuerHash is not set, SHA1 will be used.
+//
+// The ProducedAt date is automatically set to the current date, to the nearest minute.
+func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) {
+ var publicKeyInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ PublicKey asn1.BitString
+ }
+ if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil {
+ return nil, err
+ }
+
+ if template.IssuerHash == 0 {
+ template.IssuerHash = crypto.SHA1
+ }
+ hashOID := getOIDFromHashAlgorithm(template.IssuerHash)
+ if hashOID == nil {
+ return nil, errors.New("unsupported issuer hash algorithm")
+ }
+
+ if !template.IssuerHash.Available() {
+ return nil, fmt.Errorf("issuer hash algorithm %v not linked into binary", template.IssuerHash)
+ }
+ h := template.IssuerHash.New()
+ h.Write(publicKeyInfo.PublicKey.RightAlign())
+ issuerKeyHash := h.Sum(nil)
+
+ h.Reset()
+ h.Write(issuer.RawSubject)
+ issuerNameHash := h.Sum(nil)
+
+ innerResponse := singleResponse{
+ CertID: certID{
+ HashAlgorithm: pkix.AlgorithmIdentifier{
+ Algorithm: hashOID,
+ Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */},
+ },
+ NameHash: issuerNameHash,
+ IssuerKeyHash: issuerKeyHash,
+ SerialNumber: template.SerialNumber,
+ },
+ ThisUpdate: template.ThisUpdate.UTC(),
+ NextUpdate: template.NextUpdate.UTC(),
+ SingleExtensions: template.ExtraExtensions,
+ }
+
+ switch template.Status {
+ case Good:
+ innerResponse.Good = true
+ case Unknown:
+ innerResponse.Unknown = true
+ case Revoked:
+ innerResponse.Revoked = revokedInfo{
+ RevocationTime: template.RevokedAt.UTC(),
+ Reason: asn1.Enumerated(template.RevocationReason),
+ }
+ }
+
+ rawResponderID := asn1.RawValue{
+ Class: 2, // context-specific
+ Tag: 1, // Name (explicit tag)
+ IsCompound: true,
+ Bytes: responderCert.RawSubject,
+ }
+ tbsResponseData := responseData{
+ Version: 0,
+ RawResponderID: rawResponderID,
+ ProducedAt: time.Now().Truncate(time.Minute).UTC(),
+ Responses: []singleResponse{innerResponse},
+ }
+
+ tbsResponseDataDER, err := asn1.Marshal(tbsResponseData)
+ if err != nil {
+ return nil, err
+ }
+
+ hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm)
+ if err != nil {
+ return nil, err
+ }
+
+ responseHash := hashFunc.New()
+ responseHash.Write(tbsResponseDataDER)
+ signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc)
+ if err != nil {
+ return nil, err
+ }
+
+ response := basicResponse{
+ TBSResponseData: tbsResponseData,
+ SignatureAlgorithm: signatureAlgorithm,
+ Signature: asn1.BitString{
+ Bytes: signature,
+ BitLength: 8 * len(signature),
+ },
+ }
+ if template.Certificate != nil {
+ response.Certificates = []asn1.RawValue{
+ {FullBytes: template.Certificate.Raw},
+ }
+ }
+ responseDER, err := asn1.Marshal(response)
+ if err != nil {
+ return nil, err
+ }
+
+ return asn1.Marshal(responseASN1{
+ Status: asn1.Enumerated(Success),
+ Response: responseBytes{
+ ResponseType: idPKIXOCSPBasic,
+ Response: responseDER,
+ },
+ })
+}
diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go
index 36a6804364c..be342ad473a 100644
--- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go
+++ b/vendor/golang.org/x/crypto/openpgp/armor/armor.go
@@ -4,6 +4,12 @@
// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is
// very similar to PEM except that it has an additional CRC checksum.
+//
+// Deprecated: this package is unmaintained except for security fixes. New
+// applications should consider a more focused, modern alternative to OpenPGP
+// for their specific task. If you are required to interoperate with OpenPGP
+// systems and need a maintained package, consider a community fork.
+// See https://golang.org/issue/44226.
package armor // import "golang.org/x/crypto/openpgp/armor"
import (
@@ -17,12 +23,14 @@ import (
// A Block represents an OpenPGP armored structure.
//
// The encoded form is:
-// -----BEGIN Type-----
-// Headers
//
-// base64-encoded Bytes
-// '=' base64 encoded checksum
-// -----END Type-----
+// -----BEGIN Type-----
+// Headers
+//
+// base64-encoded Bytes
+// '=' base64 encoded checksum
+// -----END Type-----
+//
// where Headers is a possibly empty sequence of Key: Value lines.
//
// Since the armored data can be very large, this package presents a streaming
diff --git a/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/vendor/golang.org/x/crypto/openpgp/armor/encode.go
index 6f07582c37c..5b6e16c19d5 100644
--- a/vendor/golang.org/x/crypto/openpgp/armor/encode.go
+++ b/vendor/golang.org/x/crypto/openpgp/armor/encode.go
@@ -96,7 +96,8 @@ func (l *lineBreaker) Close() (err error) {
// trailer.
//
// It's built into a stack of io.Writers:
-// encoding -> base64 encoder -> lineBreaker -> out
+//
+// encoding -> base64 encoder -> lineBreaker -> out
type encoding struct {
out io.Writer
breaker *lineBreaker
diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
index 72a6a739471..743b35a1204 100644
--- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
+++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
@@ -10,6 +10,12 @@
// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it
// unsuitable for other protocols. RSA should be used in preference in any
// case.
+//
+// Deprecated: this package was only provided to support ElGamal encryption in
+// OpenPGP. The golang.org/x/crypto/openpgp package is now deprecated (see
+// https://golang.org/issue/44226), and ElGamal in the OpenPGP ecosystem has
+// compatibility and security issues (see https://eprint.iacr.org/2021/923).
+// Moreover, this package doesn't protect against side-channel attacks.
package elgamal // import "golang.org/x/crypto/openpgp/elgamal"
import (
@@ -71,8 +77,8 @@ func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err
// returns the plaintext of the message. An error can result only if the
// ciphertext is invalid. Users should keep in mind that this is a padding
// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can
-// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks
-// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel
+// be used to break the cryptosystem. See “Chosen Ciphertext Attacks
+// Against Protocols Based on the RSA Encryption Standard PKCS #1”, Daniel
// Bleichenbacher, Advances in Cryptology (Crypto '98),
func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
s := new(big.Int).Exp(c1, priv.X, priv.P)
diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go
index eb0550b2d04..1d7a0ea05ad 100644
--- a/vendor/golang.org/x/crypto/openpgp/errors/errors.go
+++ b/vendor/golang.org/x/crypto/openpgp/errors/errors.go
@@ -3,6 +3,12 @@
// license that can be found in the LICENSE file.
// Package errors contains common error types for the OpenPGP packages.
+//
+// Deprecated: this package is unmaintained except for security fixes. New
+// applications should consider a more focused, modern alternative to OpenPGP
+// for their specific task. If you are required to interoperate with OpenPGP
+// systems and need a maintained package, consider a community fork.
+// See https://golang.org/issue/44226.
package errors // import "golang.org/x/crypto/openpgp/errors"
import (
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go
index 456d807f255..3984477310f 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go
@@ -7,7 +7,6 @@ package packet
import (
"bytes"
"io"
- "io/ioutil"
"golang.org/x/crypto/openpgp/errors"
)
@@ -26,7 +25,7 @@ type OpaquePacket struct {
}
func (op *OpaquePacket) parse(r io.Reader) (err error) {
- op.Contents, err = ioutil.ReadAll(r)
+ op.Contents, err = io.ReadAll(r)
return
}
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go
index 9728d61d7aa..0a19794a8e4 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go
@@ -4,6 +4,12 @@
// Package packet implements parsing and serialization of OpenPGP packets, as
// specified in RFC 4880.
+//
+// Deprecated: this package is unmaintained except for security fixes. New
+// applications should consider a more focused, modern alternative to OpenPGP
+// for their specific task. If you are required to interoperate with OpenPGP
+// systems and need a maintained package, consider a community fork.
+// See https://golang.org/issue/44226.
package packet // import "golang.org/x/crypto/openpgp/packet"
import (
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
index 81abb7cef98..192aac376d1 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
@@ -13,7 +13,6 @@ import (
"crypto/rsa"
"crypto/sha1"
"io"
- "io/ioutil"
"math/big"
"strconv"
"time"
@@ -133,7 +132,7 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) {
}
}
- pk.encryptedData, err = ioutil.ReadAll(r)
+ pk.encryptedData, err = io.ReadAll(r)
if err != nil {
return
}
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go
index 6126030eb90..1a1a62964fc 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go
@@ -236,7 +236,7 @@ func (w *seMDCWriter) Close() (err error) {
return w.w.Close()
}
-// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
+// noOpCloser is like an io.NopCloser, but for an io.Writer.
type noOpCloser struct {
w io.Writer
}
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
index d19ffbc7867..ff7ef530755 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
@@ -9,7 +9,6 @@ import (
"image"
"image/jpeg"
"io"
- "io/ioutil"
)
const UserAttrImageSubpacket = 1
@@ -56,7 +55,7 @@ func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute {
func (uat *UserAttribute) parse(r io.Reader) (err error) {
// RFC 4880, section 5.13
- b, err := ioutil.ReadAll(r)
+ b, err := io.ReadAll(r)
if err != nil {
return
}
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/vendor/golang.org/x/crypto/openpgp/packet/userid.go
index d6bea7d4acc..359a462eb8a 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/userid.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/userid.go
@@ -6,7 +6,6 @@ package packet
import (
"io"
- "io/ioutil"
"strings"
)
@@ -66,7 +65,7 @@ func NewUserId(name, comment, email string) *UserId {
func (uid *UserId) parse(r io.Reader) (err error) {
// RFC 4880, section 5.11
- b, err := ioutil.ReadAll(r)
+ b, err := io.ReadAll(r)
if err != nil {
return
}
diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go
index 6ec664f44a1..48a89314685 100644
--- a/vendor/golang.org/x/crypto/openpgp/read.go
+++ b/vendor/golang.org/x/crypto/openpgp/read.go
@@ -3,6 +3,12 @@
// license that can be found in the LICENSE file.
// Package openpgp implements high level operations on OpenPGP messages.
+//
+// Deprecated: this package is unmaintained except for security fixes. New
+// applications should consider a more focused, modern alternative to OpenPGP
+// for their specific task. If you are required to interoperate with OpenPGP
+// systems and need a maintained package, consider a community fork.
+// See https://golang.org/issue/44226.
package openpgp // import "golang.org/x/crypto/openpgp"
import (
diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
index 4b9a44ca26d..9de04958ead 100644
--- a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
+++ b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
@@ -4,6 +4,12 @@
// Package s2k implements the various OpenPGP string-to-key transforms as
// specified in RFC 4800 section 3.7.1.
+//
+// Deprecated: this package is unmaintained except for security fixes. New
+// applications should consider a more focused, modern alternative to OpenPGP
+// for their specific task. If you are required to interoperate with OpenPGP
+// systems and need a maintained package, consider a community fork.
+// See https://golang.org/issue/44226.
package s2k // import "golang.org/x/crypto/openpgp/s2k"
import (
diff --git a/vendor/golang.org/x/crypto/openpgp/write.go b/vendor/golang.org/x/crypto/openpgp/write.go
index 4ee71784ebe..b89d48b81d7 100644
--- a/vendor/golang.org/x/crypto/openpgp/write.go
+++ b/vendor/golang.org/x/crypto/openpgp/write.go
@@ -402,7 +402,7 @@ func (s signatureWriter) Close() error {
return s.encryptedData.Close()
}
-// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
+// noOpCloser is like an io.NopCloser, but for an io.Writer.
// TODO: we have two of these in OpenPGP packages alone. This probably needs
// to be promoted somewhere more common.
type noOpCloser struct {
diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
index 593f6530084..904b57e01d7 100644
--- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
+++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
@@ -32,7 +32,7 @@ import (
// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
// doing:
//
-// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
+// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
//
// Remember to get a good random salt. At least 8 bytes is recommended by the
// RFC.
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
new file mode 100644
index 00000000000..4c96147c86b
--- /dev/null
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
@@ -0,0 +1,144 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package salsa provides low-level access to functions in the Salsa family.
+package salsa // import "golang.org/x/crypto/salsa20/salsa"
+
+// Sigma is the Salsa20 constant for 256-bit keys.
+var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'}
+
+// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte
+// key k, and 16-byte constant c, and puts the result into the 32-byte array
+// out.
+func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
+ x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
+ x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
+ x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
+ x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
+ x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
+ x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
+ x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+ x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
+ x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
+ x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
+ x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
+ x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
+ x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
+ x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
+ x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
+ x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
+
+ for i := 0; i < 20; i += 2 {
+ u := x0 + x12
+ x4 ^= u<<7 | u>>(32-7)
+ u = x4 + x0
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x4
+ x12 ^= u<<13 | u>>(32-13)
+ u = x12 + x8
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x1
+ x9 ^= u<<7 | u>>(32-7)
+ u = x9 + x5
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x9
+ x1 ^= u<<13 | u>>(32-13)
+ u = x1 + x13
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x6
+ x14 ^= u<<7 | u>>(32-7)
+ u = x14 + x10
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x14
+ x6 ^= u<<13 | u>>(32-13)
+ u = x6 + x2
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x11
+ x3 ^= u<<7 | u>>(32-7)
+ u = x3 + x15
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x3
+ x11 ^= u<<13 | u>>(32-13)
+ u = x11 + x7
+ x15 ^= u<<18 | u>>(32-18)
+
+ u = x0 + x3
+ x1 ^= u<<7 | u>>(32-7)
+ u = x1 + x0
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x1
+ x3 ^= u<<13 | u>>(32-13)
+ u = x3 + x2
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x4
+ x6 ^= u<<7 | u>>(32-7)
+ u = x6 + x5
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x6
+ x4 ^= u<<13 | u>>(32-13)
+ u = x4 + x7
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x9
+ x11 ^= u<<7 | u>>(32-7)
+ u = x11 + x10
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x11
+ x9 ^= u<<13 | u>>(32-13)
+ u = x9 + x8
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x14
+ x12 ^= u<<7 | u>>(32-7)
+ u = x12 + x15
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x12
+ x14 ^= u<<13 | u>>(32-13)
+ u = x14 + x13
+ x15 ^= u<<18 | u>>(32-18)
+ }
+ out[0] = byte(x0)
+ out[1] = byte(x0 >> 8)
+ out[2] = byte(x0 >> 16)
+ out[3] = byte(x0 >> 24)
+
+ out[4] = byte(x5)
+ out[5] = byte(x5 >> 8)
+ out[6] = byte(x5 >> 16)
+ out[7] = byte(x5 >> 24)
+
+ out[8] = byte(x10)
+ out[9] = byte(x10 >> 8)
+ out[10] = byte(x10 >> 16)
+ out[11] = byte(x10 >> 24)
+
+ out[12] = byte(x15)
+ out[13] = byte(x15 >> 8)
+ out[14] = byte(x15 >> 16)
+ out[15] = byte(x15 >> 24)
+
+ out[16] = byte(x6)
+ out[17] = byte(x6 >> 8)
+ out[18] = byte(x6 >> 16)
+ out[19] = byte(x6 >> 24)
+
+ out[20] = byte(x7)
+ out[21] = byte(x7 >> 8)
+ out[22] = byte(x7 >> 16)
+ out[23] = byte(x7 >> 24)
+
+ out[24] = byte(x8)
+ out[25] = byte(x8 >> 8)
+ out[26] = byte(x8 >> 16)
+ out[27] = byte(x8 >> 24)
+
+ out[28] = byte(x9)
+ out[29] = byte(x9 >> 8)
+ out[30] = byte(x9 >> 16)
+ out[31] = byte(x9 >> 24)
+}
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
new file mode 100644
index 00000000000..9bfc0927ce8
--- /dev/null
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
@@ -0,0 +1,199 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package salsa
+
+// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts
+// the result into the 64-byte array out. The input and output may be the same array.
+func Core208(out *[64]byte, in *[64]byte) {
+ j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+ j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
+ j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
+ j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
+ j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24
+ j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24
+ j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24
+ j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24
+ j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24
+ j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24
+ j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24
+ j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24
+ j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24
+ j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24
+ j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24
+ j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24
+
+ x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
+ x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
+
+ for i := 0; i < 8; i += 2 {
+ u := x0 + x12
+ x4 ^= u<<7 | u>>(32-7)
+ u = x4 + x0
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x4
+ x12 ^= u<<13 | u>>(32-13)
+ u = x12 + x8
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x1
+ x9 ^= u<<7 | u>>(32-7)
+ u = x9 + x5
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x9
+ x1 ^= u<<13 | u>>(32-13)
+ u = x1 + x13
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x6
+ x14 ^= u<<7 | u>>(32-7)
+ u = x14 + x10
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x14
+ x6 ^= u<<13 | u>>(32-13)
+ u = x6 + x2
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x11
+ x3 ^= u<<7 | u>>(32-7)
+ u = x3 + x15
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x3
+ x11 ^= u<<13 | u>>(32-13)
+ u = x11 + x7
+ x15 ^= u<<18 | u>>(32-18)
+
+ u = x0 + x3
+ x1 ^= u<<7 | u>>(32-7)
+ u = x1 + x0
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x1
+ x3 ^= u<<13 | u>>(32-13)
+ u = x3 + x2
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x4
+ x6 ^= u<<7 | u>>(32-7)
+ u = x6 + x5
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x6
+ x4 ^= u<<13 | u>>(32-13)
+ u = x4 + x7
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x9
+ x11 ^= u<<7 | u>>(32-7)
+ u = x11 + x10
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x11
+ x9 ^= u<<13 | u>>(32-13)
+ u = x9 + x8
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x14
+ x12 ^= u<<7 | u>>(32-7)
+ u = x12 + x15
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x12
+ x14 ^= u<<13 | u>>(32-13)
+ u = x14 + x13
+ x15 ^= u<<18 | u>>(32-18)
+ }
+ x0 += j0
+ x1 += j1
+ x2 += j2
+ x3 += j3
+ x4 += j4
+ x5 += j5
+ x6 += j6
+ x7 += j7
+ x8 += j8
+ x9 += j9
+ x10 += j10
+ x11 += j11
+ x12 += j12
+ x13 += j13
+ x14 += j14
+ x15 += j15
+
+ out[0] = byte(x0)
+ out[1] = byte(x0 >> 8)
+ out[2] = byte(x0 >> 16)
+ out[3] = byte(x0 >> 24)
+
+ out[4] = byte(x1)
+ out[5] = byte(x1 >> 8)
+ out[6] = byte(x1 >> 16)
+ out[7] = byte(x1 >> 24)
+
+ out[8] = byte(x2)
+ out[9] = byte(x2 >> 8)
+ out[10] = byte(x2 >> 16)
+ out[11] = byte(x2 >> 24)
+
+ out[12] = byte(x3)
+ out[13] = byte(x3 >> 8)
+ out[14] = byte(x3 >> 16)
+ out[15] = byte(x3 >> 24)
+
+ out[16] = byte(x4)
+ out[17] = byte(x4 >> 8)
+ out[18] = byte(x4 >> 16)
+ out[19] = byte(x4 >> 24)
+
+ out[20] = byte(x5)
+ out[21] = byte(x5 >> 8)
+ out[22] = byte(x5 >> 16)
+ out[23] = byte(x5 >> 24)
+
+ out[24] = byte(x6)
+ out[25] = byte(x6 >> 8)
+ out[26] = byte(x6 >> 16)
+ out[27] = byte(x6 >> 24)
+
+ out[28] = byte(x7)
+ out[29] = byte(x7 >> 8)
+ out[30] = byte(x7 >> 16)
+ out[31] = byte(x7 >> 24)
+
+ out[32] = byte(x8)
+ out[33] = byte(x8 >> 8)
+ out[34] = byte(x8 >> 16)
+ out[35] = byte(x8 >> 24)
+
+ out[36] = byte(x9)
+ out[37] = byte(x9 >> 8)
+ out[38] = byte(x9 >> 16)
+ out[39] = byte(x9 >> 24)
+
+ out[40] = byte(x10)
+ out[41] = byte(x10 >> 8)
+ out[42] = byte(x10 >> 16)
+ out[43] = byte(x10 >> 24)
+
+ out[44] = byte(x11)
+ out[45] = byte(x11 >> 8)
+ out[46] = byte(x11 >> 16)
+ out[47] = byte(x11 >> 24)
+
+ out[48] = byte(x12)
+ out[49] = byte(x12 >> 8)
+ out[50] = byte(x12 >> 16)
+ out[51] = byte(x12 >> 24)
+
+ out[52] = byte(x13)
+ out[53] = byte(x13 >> 8)
+ out[54] = byte(x13 >> 16)
+ out[55] = byte(x13 >> 24)
+
+ out[56] = byte(x14)
+ out[57] = byte(x14 >> 8)
+ out[58] = byte(x14 >> 16)
+ out[59] = byte(x14 >> 24)
+
+ out[60] = byte(x15)
+ out[61] = byte(x15 >> 8)
+ out[62] = byte(x15 >> 16)
+ out[63] = byte(x15 >> 24)
+}
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
new file mode 100644
index 00000000000..c400dfcf7bc
--- /dev/null
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
@@ -0,0 +1,24 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 && !purego && gc
+// +build amd64,!purego,gc
+
+package salsa
+
+//go:noescape
+
+// salsa2020XORKeyStream is implemented in salsa20_amd64.s.
+func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
+
+// XORKeyStream crypts bytes from in to out using the given key and counters.
+// In and out must overlap entirely or not at all. Counter
+// contains the raw salsa20 counter bytes (both nonce and block counter).
+func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
+ if len(in) == 0 {
+ return
+ }
+ _ = out[len(in)-1]
+ salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0])
+}
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s
new file mode 100644
index 00000000000..c0892772045
--- /dev/null
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s
@@ -0,0 +1,881 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 && !purego && gc
+// +build amd64,!purego,gc
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
+
+// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
+// This needs up to 64 bytes at 360(R12); hence the non-obvious frame size.
+TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment
+ MOVQ out+0(FP),DI
+ MOVQ in+8(FP),SI
+ MOVQ n+16(FP),DX
+ MOVQ nonce+24(FP),CX
+ MOVQ key+32(FP),R8
+
+ MOVQ SP,R12
+ ADDQ $31, R12
+ ANDQ $~31, R12
+
+ MOVQ DX,R9
+ MOVQ CX,DX
+ MOVQ R8,R10
+ CMPQ R9,$0
+ JBE DONE
+ START:
+ MOVL 20(R10),CX
+ MOVL 0(R10),R8
+ MOVL 0(DX),AX
+ MOVL 16(R10),R11
+ MOVL CX,0(R12)
+ MOVL R8, 4 (R12)
+ MOVL AX, 8 (R12)
+ MOVL R11, 12 (R12)
+ MOVL 8(DX),CX
+ MOVL 24(R10),R8
+ MOVL 4(R10),AX
+ MOVL 4(DX),R11
+ MOVL CX,16(R12)
+ MOVL R8, 20 (R12)
+ MOVL AX, 24 (R12)
+ MOVL R11, 28 (R12)
+ MOVL 12(DX),CX
+ MOVL 12(R10),DX
+ MOVL 28(R10),R8
+ MOVL 8(R10),AX
+ MOVL DX,32(R12)
+ MOVL CX, 36 (R12)
+ MOVL R8, 40 (R12)
+ MOVL AX, 44 (R12)
+ MOVQ $1634760805,DX
+ MOVQ $857760878,CX
+ MOVQ $2036477234,R8
+ MOVQ $1797285236,AX
+ MOVL DX,48(R12)
+ MOVL CX, 52 (R12)
+ MOVL R8, 56 (R12)
+ MOVL AX, 60 (R12)
+ CMPQ R9,$256
+ JB BYTESBETWEEN1AND255
+ MOVOA 48(R12),X0
+ PSHUFL $0X55,X0,X1
+ PSHUFL $0XAA,X0,X2
+ PSHUFL $0XFF,X0,X3
+ PSHUFL $0X00,X0,X0
+ MOVOA X1,64(R12)
+ MOVOA X2,80(R12)
+ MOVOA X3,96(R12)
+ MOVOA X0,112(R12)
+ MOVOA 0(R12),X0
+ PSHUFL $0XAA,X0,X1
+ PSHUFL $0XFF,X0,X2
+ PSHUFL $0X00,X0,X3
+ PSHUFL $0X55,X0,X0
+ MOVOA X1,128(R12)
+ MOVOA X2,144(R12)
+ MOVOA X3,160(R12)
+ MOVOA X0,176(R12)
+ MOVOA 16(R12),X0
+ PSHUFL $0XFF,X0,X1
+ PSHUFL $0X55,X0,X2
+ PSHUFL $0XAA,X0,X0
+ MOVOA X1,192(R12)
+ MOVOA X2,208(R12)
+ MOVOA X0,224(R12)
+ MOVOA 32(R12),X0
+ PSHUFL $0X00,X0,X1
+ PSHUFL $0XAA,X0,X2
+ PSHUFL $0XFF,X0,X0
+ MOVOA X1,240(R12)
+ MOVOA X2,256(R12)
+ MOVOA X0,272(R12)
+ BYTESATLEAST256:
+ MOVL 16(R12),DX
+ MOVL 36 (R12),CX
+ MOVL DX,288(R12)
+ MOVL CX,304(R12)
+ SHLQ $32,CX
+ ADDQ CX,DX
+ ADDQ $1,DX
+ MOVQ DX,CX
+ SHRQ $32,CX
+ MOVL DX, 292 (R12)
+ MOVL CX, 308 (R12)
+ ADDQ $1,DX
+ MOVQ DX,CX
+ SHRQ $32,CX
+ MOVL DX, 296 (R12)
+ MOVL CX, 312 (R12)
+ ADDQ $1,DX
+ MOVQ DX,CX
+ SHRQ $32,CX
+ MOVL DX, 300 (R12)
+ MOVL CX, 316 (R12)
+ ADDQ $1,DX
+ MOVQ DX,CX
+ SHRQ $32,CX
+ MOVL DX,16(R12)
+ MOVL CX, 36 (R12)
+ MOVQ R9,352(R12)
+ MOVQ $20,DX
+ MOVOA 64(R12),X0
+ MOVOA 80(R12),X1
+ MOVOA 96(R12),X2
+ MOVOA 256(R12),X3
+ MOVOA 272(R12),X4
+ MOVOA 128(R12),X5
+ MOVOA 144(R12),X6
+ MOVOA 176(R12),X7
+ MOVOA 192(R12),X8
+ MOVOA 208(R12),X9
+ MOVOA 224(R12),X10
+ MOVOA 304(R12),X11
+ MOVOA 112(R12),X12
+ MOVOA 160(R12),X13
+ MOVOA 240(R12),X14
+ MOVOA 288(R12),X15
+ MAINLOOP1:
+ MOVOA X1,320(R12)
+ MOVOA X2,336(R12)
+ MOVOA X13,X1
+ PADDL X12,X1
+ MOVOA X1,X2
+ PSLLL $7,X1
+ PXOR X1,X14
+ PSRLL $25,X2
+ PXOR X2,X14
+ MOVOA X7,X1
+ PADDL X0,X1
+ MOVOA X1,X2
+ PSLLL $7,X1
+ PXOR X1,X11
+ PSRLL $25,X2
+ PXOR X2,X11
+ MOVOA X12,X1
+ PADDL X14,X1
+ MOVOA X1,X2
+ PSLLL $9,X1
+ PXOR X1,X15
+ PSRLL $23,X2
+ PXOR X2,X15
+ MOVOA X0,X1
+ PADDL X11,X1
+ MOVOA X1,X2
+ PSLLL $9,X1
+ PXOR X1,X9
+ PSRLL $23,X2
+ PXOR X2,X9
+ MOVOA X14,X1
+ PADDL X15,X1
+ MOVOA X1,X2
+ PSLLL $13,X1
+ PXOR X1,X13
+ PSRLL $19,X2
+ PXOR X2,X13
+ MOVOA X11,X1
+ PADDL X9,X1
+ MOVOA X1,X2
+ PSLLL $13,X1
+ PXOR X1,X7
+ PSRLL $19,X2
+ PXOR X2,X7
+ MOVOA X15,X1
+ PADDL X13,X1
+ MOVOA X1,X2
+ PSLLL $18,X1
+ PXOR X1,X12
+ PSRLL $14,X2
+ PXOR X2,X12
+ MOVOA 320(R12),X1
+ MOVOA X12,320(R12)
+ MOVOA X9,X2
+ PADDL X7,X2
+ MOVOA X2,X12
+ PSLLL $18,X2
+ PXOR X2,X0
+ PSRLL $14,X12
+ PXOR X12,X0
+ MOVOA X5,X2
+ PADDL X1,X2
+ MOVOA X2,X12
+ PSLLL $7,X2
+ PXOR X2,X3
+ PSRLL $25,X12
+ PXOR X12,X3
+ MOVOA 336(R12),X2
+ MOVOA X0,336(R12)
+ MOVOA X6,X0
+ PADDL X2,X0
+ MOVOA X0,X12
+ PSLLL $7,X0
+ PXOR X0,X4
+ PSRLL $25,X12
+ PXOR X12,X4
+ MOVOA X1,X0
+ PADDL X3,X0
+ MOVOA X0,X12
+ PSLLL $9,X0
+ PXOR X0,X10
+ PSRLL $23,X12
+ PXOR X12,X10
+ MOVOA X2,X0
+ PADDL X4,X0
+ MOVOA X0,X12
+ PSLLL $9,X0
+ PXOR X0,X8
+ PSRLL $23,X12
+ PXOR X12,X8
+ MOVOA X3,X0
+ PADDL X10,X0
+ MOVOA X0,X12
+ PSLLL $13,X0
+ PXOR X0,X5
+ PSRLL $19,X12
+ PXOR X12,X5
+ MOVOA X4,X0
+ PADDL X8,X0
+ MOVOA X0,X12
+ PSLLL $13,X0
+ PXOR X0,X6
+ PSRLL $19,X12
+ PXOR X12,X6
+ MOVOA X10,X0
+ PADDL X5,X0
+ MOVOA X0,X12
+ PSLLL $18,X0
+ PXOR X0,X1
+ PSRLL $14,X12
+ PXOR X12,X1
+ MOVOA 320(R12),X0
+ MOVOA X1,320(R12)
+ MOVOA X4,X1
+ PADDL X0,X1
+ MOVOA X1,X12
+ PSLLL $7,X1
+ PXOR X1,X7
+ PSRLL $25,X12
+ PXOR X12,X7
+ MOVOA X8,X1
+ PADDL X6,X1
+ MOVOA X1,X12
+ PSLLL $18,X1
+ PXOR X1,X2
+ PSRLL $14,X12
+ PXOR X12,X2
+ MOVOA 336(R12),X12
+ MOVOA X2,336(R12)
+ MOVOA X14,X1
+ PADDL X12,X1
+ MOVOA X1,X2
+ PSLLL $7,X1
+ PXOR X1,X5
+ PSRLL $25,X2
+ PXOR X2,X5
+ MOVOA X0,X1
+ PADDL X7,X1
+ MOVOA X1,X2
+ PSLLL $9,X1
+ PXOR X1,X10
+ PSRLL $23,X2
+ PXOR X2,X10
+ MOVOA X12,X1
+ PADDL X5,X1
+ MOVOA X1,X2
+ PSLLL $9,X1
+ PXOR X1,X8
+ PSRLL $23,X2
+ PXOR X2,X8
+ MOVOA X7,X1
+ PADDL X10,X1
+ MOVOA X1,X2
+ PSLLL $13,X1
+ PXOR X1,X4
+ PSRLL $19,X2
+ PXOR X2,X4
+ MOVOA X5,X1
+ PADDL X8,X1
+ MOVOA X1,X2
+ PSLLL $13,X1
+ PXOR X1,X14
+ PSRLL $19,X2
+ PXOR X2,X14
+ MOVOA X10,X1
+ PADDL X4,X1
+ MOVOA X1,X2
+ PSLLL $18,X1
+ PXOR X1,X0
+ PSRLL $14,X2
+ PXOR X2,X0
+ MOVOA 320(R12),X1
+ MOVOA X0,320(R12)
+ MOVOA X8,X0
+ PADDL X14,X0
+ MOVOA X0,X2
+ PSLLL $18,X0
+ PXOR X0,X12
+ PSRLL $14,X2
+ PXOR X2,X12
+ MOVOA X11,X0
+ PADDL X1,X0
+ MOVOA X0,X2
+ PSLLL $7,X0
+ PXOR X0,X6
+ PSRLL $25,X2
+ PXOR X2,X6
+ MOVOA 336(R12),X2
+ MOVOA X12,336(R12)
+ MOVOA X3,X0
+ PADDL X2,X0
+ MOVOA X0,X12
+ PSLLL $7,X0
+ PXOR X0,X13
+ PSRLL $25,X12
+ PXOR X12,X13
+ MOVOA X1,X0
+ PADDL X6,X0
+ MOVOA X0,X12
+ PSLLL $9,X0
+ PXOR X0,X15
+ PSRLL $23,X12
+ PXOR X12,X15
+ MOVOA X2,X0
+ PADDL X13,X0
+ MOVOA X0,X12
+ PSLLL $9,X0
+ PXOR X0,X9
+ PSRLL $23,X12
+ PXOR X12,X9
+ MOVOA X6,X0
+ PADDL X15,X0
+ MOVOA X0,X12
+ PSLLL $13,X0
+ PXOR X0,X11
+ PSRLL $19,X12
+ PXOR X12,X11
+ MOVOA X13,X0
+ PADDL X9,X0
+ MOVOA X0,X12
+ PSLLL $13,X0
+ PXOR X0,X3
+ PSRLL $19,X12
+ PXOR X12,X3
+ MOVOA X15,X0
+ PADDL X11,X0
+ MOVOA X0,X12
+ PSLLL $18,X0
+ PXOR X0,X1
+ PSRLL $14,X12
+ PXOR X12,X1
+ MOVOA X9,X0
+ PADDL X3,X0
+ MOVOA X0,X12
+ PSLLL $18,X0
+ PXOR X0,X2
+ PSRLL $14,X12
+ PXOR X12,X2
+ MOVOA 320(R12),X12
+ MOVOA 336(R12),X0
+ SUBQ $2,DX
+ JA MAINLOOP1
+ PADDL 112(R12),X12
+ PADDL 176(R12),X7
+ PADDL 224(R12),X10
+ PADDL 272(R12),X4
+ MOVD X12,DX
+ MOVD X7,CX
+ MOVD X10,R8
+ MOVD X4,R9
+ PSHUFL $0X39,X12,X12
+ PSHUFL $0X39,X7,X7
+ PSHUFL $0X39,X10,X10
+ PSHUFL $0X39,X4,X4
+ XORL 0(SI),DX
+ XORL 4(SI),CX
+ XORL 8(SI),R8
+ XORL 12(SI),R9
+ MOVL DX,0(DI)
+ MOVL CX,4(DI)
+ MOVL R8,8(DI)
+ MOVL R9,12(DI)
+ MOVD X12,DX
+ MOVD X7,CX
+ MOVD X10,R8
+ MOVD X4,R9
+ PSHUFL $0X39,X12,X12
+ PSHUFL $0X39,X7,X7
+ PSHUFL $0X39,X10,X10
+ PSHUFL $0X39,X4,X4
+ XORL 64(SI),DX
+ XORL 68(SI),CX
+ XORL 72(SI),R8
+ XORL 76(SI),R9
+ MOVL DX,64(DI)
+ MOVL CX,68(DI)
+ MOVL R8,72(DI)
+ MOVL R9,76(DI)
+ MOVD X12,DX
+ MOVD X7,CX
+ MOVD X10,R8
+ MOVD X4,R9
+ PSHUFL $0X39,X12,X12
+ PSHUFL $0X39,X7,X7
+ PSHUFL $0X39,X10,X10
+ PSHUFL $0X39,X4,X4
+ XORL 128(SI),DX
+ XORL 132(SI),CX
+ XORL 136(SI),R8
+ XORL 140(SI),R9
+ MOVL DX,128(DI)
+ MOVL CX,132(DI)
+ MOVL R8,136(DI)
+ MOVL R9,140(DI)
+ MOVD X12,DX
+ MOVD X7,CX
+ MOVD X10,R8
+ MOVD X4,R9
+ XORL 192(SI),DX
+ XORL 196(SI),CX
+ XORL 200(SI),R8
+ XORL 204(SI),R9
+ MOVL DX,192(DI)
+ MOVL CX,196(DI)
+ MOVL R8,200(DI)
+ MOVL R9,204(DI)
+ PADDL 240(R12),X14
+ PADDL 64(R12),X0
+ PADDL 128(R12),X5
+ PADDL 192(R12),X8
+ MOVD X14,DX
+ MOVD X0,CX
+ MOVD X5,R8
+ MOVD X8,R9
+ PSHUFL $0X39,X14,X14
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X5,X5
+ PSHUFL $0X39,X8,X8
+ XORL 16(SI),DX
+ XORL 20(SI),CX
+ XORL 24(SI),R8
+ XORL 28(SI),R9
+ MOVL DX,16(DI)
+ MOVL CX,20(DI)
+ MOVL R8,24(DI)
+ MOVL R9,28(DI)
+ MOVD X14,DX
+ MOVD X0,CX
+ MOVD X5,R8
+ MOVD X8,R9
+ PSHUFL $0X39,X14,X14
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X5,X5
+ PSHUFL $0X39,X8,X8
+ XORL 80(SI),DX
+ XORL 84(SI),CX
+ XORL 88(SI),R8
+ XORL 92(SI),R9
+ MOVL DX,80(DI)
+ MOVL CX,84(DI)
+ MOVL R8,88(DI)
+ MOVL R9,92(DI)
+ MOVD X14,DX
+ MOVD X0,CX
+ MOVD X5,R8
+ MOVD X8,R9
+ PSHUFL $0X39,X14,X14
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X5,X5
+ PSHUFL $0X39,X8,X8
+ XORL 144(SI),DX
+ XORL 148(SI),CX
+ XORL 152(SI),R8
+ XORL 156(SI),R9
+ MOVL DX,144(DI)
+ MOVL CX,148(DI)
+ MOVL R8,152(DI)
+ MOVL R9,156(DI)
+ MOVD X14,DX
+ MOVD X0,CX
+ MOVD X5,R8
+ MOVD X8,R9
+ XORL 208(SI),DX
+ XORL 212(SI),CX
+ XORL 216(SI),R8
+ XORL 220(SI),R9
+ MOVL DX,208(DI)
+ MOVL CX,212(DI)
+ MOVL R8,216(DI)
+ MOVL R9,220(DI)
+ PADDL 288(R12),X15
+ PADDL 304(R12),X11
+ PADDL 80(R12),X1
+ PADDL 144(R12),X6
+ MOVD X15,DX
+ MOVD X11,CX
+ MOVD X1,R8
+ MOVD X6,R9
+ PSHUFL $0X39,X15,X15
+ PSHUFL $0X39,X11,X11
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X6,X6
+ XORL 32(SI),DX
+ XORL 36(SI),CX
+ XORL 40(SI),R8
+ XORL 44(SI),R9
+ MOVL DX,32(DI)
+ MOVL CX,36(DI)
+ MOVL R8,40(DI)
+ MOVL R9,44(DI)
+ MOVD X15,DX
+ MOVD X11,CX
+ MOVD X1,R8
+ MOVD X6,R9
+ PSHUFL $0X39,X15,X15
+ PSHUFL $0X39,X11,X11
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X6,X6
+ XORL 96(SI),DX
+ XORL 100(SI),CX
+ XORL 104(SI),R8
+ XORL 108(SI),R9
+ MOVL DX,96(DI)
+ MOVL CX,100(DI)
+ MOVL R8,104(DI)
+ MOVL R9,108(DI)
+ MOVD X15,DX
+ MOVD X11,CX
+ MOVD X1,R8
+ MOVD X6,R9
+ PSHUFL $0X39,X15,X15
+ PSHUFL $0X39,X11,X11
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X6,X6
+ XORL 160(SI),DX
+ XORL 164(SI),CX
+ XORL 168(SI),R8
+ XORL 172(SI),R9
+ MOVL DX,160(DI)
+ MOVL CX,164(DI)
+ MOVL R8,168(DI)
+ MOVL R9,172(DI)
+ MOVD X15,DX
+ MOVD X11,CX
+ MOVD X1,R8
+ MOVD X6,R9
+ XORL 224(SI),DX
+ XORL 228(SI),CX
+ XORL 232(SI),R8
+ XORL 236(SI),R9
+ MOVL DX,224(DI)
+ MOVL CX,228(DI)
+ MOVL R8,232(DI)
+ MOVL R9,236(DI)
+ PADDL 160(R12),X13
+ PADDL 208(R12),X9
+ PADDL 256(R12),X3
+ PADDL 96(R12),X2
+ MOVD X13,DX
+ MOVD X9,CX
+ MOVD X3,R8
+ MOVD X2,R9
+ PSHUFL $0X39,X13,X13
+ PSHUFL $0X39,X9,X9
+ PSHUFL $0X39,X3,X3
+ PSHUFL $0X39,X2,X2
+ XORL 48(SI),DX
+ XORL 52(SI),CX
+ XORL 56(SI),R8
+ XORL 60(SI),R9
+ MOVL DX,48(DI)
+ MOVL CX,52(DI)
+ MOVL R8,56(DI)
+ MOVL R9,60(DI)
+ MOVD X13,DX
+ MOVD X9,CX
+ MOVD X3,R8
+ MOVD X2,R9
+ PSHUFL $0X39,X13,X13
+ PSHUFL $0X39,X9,X9
+ PSHUFL $0X39,X3,X3
+ PSHUFL $0X39,X2,X2
+ XORL 112(SI),DX
+ XORL 116(SI),CX
+ XORL 120(SI),R8
+ XORL 124(SI),R9
+ MOVL DX,112(DI)
+ MOVL CX,116(DI)
+ MOVL R8,120(DI)
+ MOVL R9,124(DI)
+ MOVD X13,DX
+ MOVD X9,CX
+ MOVD X3,R8
+ MOVD X2,R9
+ PSHUFL $0X39,X13,X13
+ PSHUFL $0X39,X9,X9
+ PSHUFL $0X39,X3,X3
+ PSHUFL $0X39,X2,X2
+ XORL 176(SI),DX
+ XORL 180(SI),CX
+ XORL 184(SI),R8
+ XORL 188(SI),R9
+ MOVL DX,176(DI)
+ MOVL CX,180(DI)
+ MOVL R8,184(DI)
+ MOVL R9,188(DI)
+ MOVD X13,DX
+ MOVD X9,CX
+ MOVD X3,R8
+ MOVD X2,R9
+ XORL 240(SI),DX
+ XORL 244(SI),CX
+ XORL 248(SI),R8
+ XORL 252(SI),R9
+ MOVL DX,240(DI)
+ MOVL CX,244(DI)
+ MOVL R8,248(DI)
+ MOVL R9,252(DI)
+ MOVQ 352(R12),R9
+ SUBQ $256,R9
+ ADDQ $256,SI
+ ADDQ $256,DI
+ CMPQ R9,$256
+ JAE BYTESATLEAST256
+ CMPQ R9,$0
+ JBE DONE
+ BYTESBETWEEN1AND255:
+ CMPQ R9,$64
+ JAE NOCOPY
+ MOVQ DI,DX
+ LEAQ 360(R12),DI
+ MOVQ R9,CX
+ REP; MOVSB
+ LEAQ 360(R12),DI
+ LEAQ 360(R12),SI
+ NOCOPY:
+ MOVQ R9,352(R12)
+ MOVOA 48(R12),X0
+ MOVOA 0(R12),X1
+ MOVOA 16(R12),X2
+ MOVOA 32(R12),X3
+ MOVOA X1,X4
+ MOVQ $20,CX
+ MAINLOOP2:
+ PADDL X0,X4
+ MOVOA X0,X5
+ MOVOA X4,X6
+ PSLLL $7,X4
+ PSRLL $25,X6
+ PXOR X4,X3
+ PXOR X6,X3
+ PADDL X3,X5
+ MOVOA X3,X4
+ MOVOA X5,X6
+ PSLLL $9,X5
+ PSRLL $23,X6
+ PXOR X5,X2
+ PSHUFL $0X93,X3,X3
+ PXOR X6,X2
+ PADDL X2,X4
+ MOVOA X2,X5
+ MOVOA X4,X6
+ PSLLL $13,X4
+ PSRLL $19,X6
+ PXOR X4,X1
+ PSHUFL $0X4E,X2,X2
+ PXOR X6,X1
+ PADDL X1,X5
+ MOVOA X3,X4
+ MOVOA X5,X6
+ PSLLL $18,X5
+ PSRLL $14,X6
+ PXOR X5,X0
+ PSHUFL $0X39,X1,X1
+ PXOR X6,X0
+ PADDL X0,X4
+ MOVOA X0,X5
+ MOVOA X4,X6
+ PSLLL $7,X4
+ PSRLL $25,X6
+ PXOR X4,X1
+ PXOR X6,X1
+ PADDL X1,X5
+ MOVOA X1,X4
+ MOVOA X5,X6
+ PSLLL $9,X5
+ PSRLL $23,X6
+ PXOR X5,X2
+ PSHUFL $0X93,X1,X1
+ PXOR X6,X2
+ PADDL X2,X4
+ MOVOA X2,X5
+ MOVOA X4,X6
+ PSLLL $13,X4
+ PSRLL $19,X6
+ PXOR X4,X3
+ PSHUFL $0X4E,X2,X2
+ PXOR X6,X3
+ PADDL X3,X5
+ MOVOA X1,X4
+ MOVOA X5,X6
+ PSLLL $18,X5
+ PSRLL $14,X6
+ PXOR X5,X0
+ PSHUFL $0X39,X3,X3
+ PXOR X6,X0
+ PADDL X0,X4
+ MOVOA X0,X5
+ MOVOA X4,X6
+ PSLLL $7,X4
+ PSRLL $25,X6
+ PXOR X4,X3
+ PXOR X6,X3
+ PADDL X3,X5
+ MOVOA X3,X4
+ MOVOA X5,X6
+ PSLLL $9,X5
+ PSRLL $23,X6
+ PXOR X5,X2
+ PSHUFL $0X93,X3,X3
+ PXOR X6,X2
+ PADDL X2,X4
+ MOVOA X2,X5
+ MOVOA X4,X6
+ PSLLL $13,X4
+ PSRLL $19,X6
+ PXOR X4,X1
+ PSHUFL $0X4E,X2,X2
+ PXOR X6,X1
+ PADDL X1,X5
+ MOVOA X3,X4
+ MOVOA X5,X6
+ PSLLL $18,X5
+ PSRLL $14,X6
+ PXOR X5,X0
+ PSHUFL $0X39,X1,X1
+ PXOR X6,X0
+ PADDL X0,X4
+ MOVOA X0,X5
+ MOVOA X4,X6
+ PSLLL $7,X4
+ PSRLL $25,X6
+ PXOR X4,X1
+ PXOR X6,X1
+ PADDL X1,X5
+ MOVOA X1,X4
+ MOVOA X5,X6
+ PSLLL $9,X5
+ PSRLL $23,X6
+ PXOR X5,X2
+ PSHUFL $0X93,X1,X1
+ PXOR X6,X2
+ PADDL X2,X4
+ MOVOA X2,X5
+ MOVOA X4,X6
+ PSLLL $13,X4
+ PSRLL $19,X6
+ PXOR X4,X3
+ PSHUFL $0X4E,X2,X2
+ PXOR X6,X3
+ SUBQ $4,CX
+ PADDL X3,X5
+ MOVOA X1,X4
+ MOVOA X5,X6
+ PSLLL $18,X5
+ PXOR X7,X7
+ PSRLL $14,X6
+ PXOR X5,X0
+ PSHUFL $0X39,X3,X3
+ PXOR X6,X0
+ JA MAINLOOP2
+ PADDL 48(R12),X0
+ PADDL 0(R12),X1
+ PADDL 16(R12),X2
+ PADDL 32(R12),X3
+ MOVD X0,CX
+ MOVD X1,R8
+ MOVD X2,R9
+ MOVD X3,AX
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X2,X2
+ PSHUFL $0X39,X3,X3
+ XORL 0(SI),CX
+ XORL 48(SI),R8
+ XORL 32(SI),R9
+ XORL 16(SI),AX
+ MOVL CX,0(DI)
+ MOVL R8,48(DI)
+ MOVL R9,32(DI)
+ MOVL AX,16(DI)
+ MOVD X0,CX
+ MOVD X1,R8
+ MOVD X2,R9
+ MOVD X3,AX
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X2,X2
+ PSHUFL $0X39,X3,X3
+ XORL 20(SI),CX
+ XORL 4(SI),R8
+ XORL 52(SI),R9
+ XORL 36(SI),AX
+ MOVL CX,20(DI)
+ MOVL R8,4(DI)
+ MOVL R9,52(DI)
+ MOVL AX,36(DI)
+ MOVD X0,CX
+ MOVD X1,R8
+ MOVD X2,R9
+ MOVD X3,AX
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X2,X2
+ PSHUFL $0X39,X3,X3
+ XORL 40(SI),CX
+ XORL 24(SI),R8
+ XORL 8(SI),R9
+ XORL 56(SI),AX
+ MOVL CX,40(DI)
+ MOVL R8,24(DI)
+ MOVL R9,8(DI)
+ MOVL AX,56(DI)
+ MOVD X0,CX
+ MOVD X1,R8
+ MOVD X2,R9
+ MOVD X3,AX
+ XORL 60(SI),CX
+ XORL 44(SI),R8
+ XORL 28(SI),R9
+ XORL 12(SI),AX
+ MOVL CX,60(DI)
+ MOVL R8,44(DI)
+ MOVL R9,28(DI)
+ MOVL AX,12(DI)
+ MOVQ 352(R12),R9
+ MOVL 16(R12),CX
+ MOVL 36 (R12),R8
+ ADDQ $1,CX
+ SHLQ $32,R8
+ ADDQ R8,CX
+ MOVQ CX,R8
+ SHRQ $32,R8
+ MOVL CX,16(R12)
+ MOVL R8, 36 (R12)
+ CMPQ R9,$64
+ JA BYTESATLEAST65
+ JAE BYTESATLEAST64
+ MOVQ DI,SI
+ MOVQ DX,DI
+ MOVQ R9,CX
+ REP; MOVSB
+ BYTESATLEAST64:
+ DONE:
+ RET
+ BYTESATLEAST65:
+ SUBQ $64,R9
+ ADDQ $64,DI
+ ADDQ $64,SI
+ JMP BYTESBETWEEN1AND255
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go
new file mode 100644
index 00000000000..4392cc1ac74
--- /dev/null
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go
@@ -0,0 +1,15 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 || purego || !gc
+// +build !amd64 purego !gc
+
+package salsa
+
+// XORKeyStream crypts bytes from in to out using the given key and counters.
+// In and out must overlap entirely or not at all. Counter
+// contains the raw salsa20 counter bytes (both nonce and block counter).
+func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
+ genericXORKeyStream(out, in, counter, key)
+}
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
new file mode 100644
index 00000000000..68169c6d681
--- /dev/null
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
@@ -0,0 +1,231 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package salsa
+
+const rounds = 20
+
+// core applies the Salsa20 core function to 16-byte input in, 32-byte key k,
+// and 16-byte constant c, and puts the result into 64-byte array out.
+func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
+ j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
+ j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
+ j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
+ j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
+ j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
+ j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
+ j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+ j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
+ j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
+ j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
+ j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
+ j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
+ j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
+ j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
+ j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
+ j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
+
+ x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
+ x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
+
+ for i := 0; i < rounds; i += 2 {
+ u := x0 + x12
+ x4 ^= u<<7 | u>>(32-7)
+ u = x4 + x0
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x4
+ x12 ^= u<<13 | u>>(32-13)
+ u = x12 + x8
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x1
+ x9 ^= u<<7 | u>>(32-7)
+ u = x9 + x5
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x9
+ x1 ^= u<<13 | u>>(32-13)
+ u = x1 + x13
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x6
+ x14 ^= u<<7 | u>>(32-7)
+ u = x14 + x10
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x14
+ x6 ^= u<<13 | u>>(32-13)
+ u = x6 + x2
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x11
+ x3 ^= u<<7 | u>>(32-7)
+ u = x3 + x15
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x3
+ x11 ^= u<<13 | u>>(32-13)
+ u = x11 + x7
+ x15 ^= u<<18 | u>>(32-18)
+
+ u = x0 + x3
+ x1 ^= u<<7 | u>>(32-7)
+ u = x1 + x0
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x1
+ x3 ^= u<<13 | u>>(32-13)
+ u = x3 + x2
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x4
+ x6 ^= u<<7 | u>>(32-7)
+ u = x6 + x5
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x6
+ x4 ^= u<<13 | u>>(32-13)
+ u = x4 + x7
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x9
+ x11 ^= u<<7 | u>>(32-7)
+ u = x11 + x10
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x11
+ x9 ^= u<<13 | u>>(32-13)
+ u = x9 + x8
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x14
+ x12 ^= u<<7 | u>>(32-7)
+ u = x12 + x15
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x12
+ x14 ^= u<<13 | u>>(32-13)
+ u = x14 + x13
+ x15 ^= u<<18 | u>>(32-18)
+ }
+ x0 += j0
+ x1 += j1
+ x2 += j2
+ x3 += j3
+ x4 += j4
+ x5 += j5
+ x6 += j6
+ x7 += j7
+ x8 += j8
+ x9 += j9
+ x10 += j10
+ x11 += j11
+ x12 += j12
+ x13 += j13
+ x14 += j14
+ x15 += j15
+
+ out[0] = byte(x0)
+ out[1] = byte(x0 >> 8)
+ out[2] = byte(x0 >> 16)
+ out[3] = byte(x0 >> 24)
+
+ out[4] = byte(x1)
+ out[5] = byte(x1 >> 8)
+ out[6] = byte(x1 >> 16)
+ out[7] = byte(x1 >> 24)
+
+ out[8] = byte(x2)
+ out[9] = byte(x2 >> 8)
+ out[10] = byte(x2 >> 16)
+ out[11] = byte(x2 >> 24)
+
+ out[12] = byte(x3)
+ out[13] = byte(x3 >> 8)
+ out[14] = byte(x3 >> 16)
+ out[15] = byte(x3 >> 24)
+
+ out[16] = byte(x4)
+ out[17] = byte(x4 >> 8)
+ out[18] = byte(x4 >> 16)
+ out[19] = byte(x4 >> 24)
+
+ out[20] = byte(x5)
+ out[21] = byte(x5 >> 8)
+ out[22] = byte(x5 >> 16)
+ out[23] = byte(x5 >> 24)
+
+ out[24] = byte(x6)
+ out[25] = byte(x6 >> 8)
+ out[26] = byte(x6 >> 16)
+ out[27] = byte(x6 >> 24)
+
+ out[28] = byte(x7)
+ out[29] = byte(x7 >> 8)
+ out[30] = byte(x7 >> 16)
+ out[31] = byte(x7 >> 24)
+
+ out[32] = byte(x8)
+ out[33] = byte(x8 >> 8)
+ out[34] = byte(x8 >> 16)
+ out[35] = byte(x8 >> 24)
+
+ out[36] = byte(x9)
+ out[37] = byte(x9 >> 8)
+ out[38] = byte(x9 >> 16)
+ out[39] = byte(x9 >> 24)
+
+ out[40] = byte(x10)
+ out[41] = byte(x10 >> 8)
+ out[42] = byte(x10 >> 16)
+ out[43] = byte(x10 >> 24)
+
+ out[44] = byte(x11)
+ out[45] = byte(x11 >> 8)
+ out[46] = byte(x11 >> 16)
+ out[47] = byte(x11 >> 24)
+
+ out[48] = byte(x12)
+ out[49] = byte(x12 >> 8)
+ out[50] = byte(x12 >> 16)
+ out[51] = byte(x12 >> 24)
+
+ out[52] = byte(x13)
+ out[53] = byte(x13 >> 8)
+ out[54] = byte(x13 >> 16)
+ out[55] = byte(x13 >> 24)
+
+ out[56] = byte(x14)
+ out[57] = byte(x14 >> 8)
+ out[58] = byte(x14 >> 16)
+ out[59] = byte(x14 >> 24)
+
+ out[60] = byte(x15)
+ out[61] = byte(x15 >> 8)
+ out[62] = byte(x15 >> 16)
+ out[63] = byte(x15 >> 24)
+}
+
+// genericXORKeyStream is the generic implementation of XORKeyStream to be used
+// when no assembly implementation is available.
+func genericXORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
+ var block [64]byte
+ var counterCopy [16]byte
+ copy(counterCopy[:], counter[:])
+
+ for len(in) >= 64 {
+ core(&block, &counterCopy, key, &Sigma)
+ for i, x := range block {
+ out[i] = in[i] ^ x
+ }
+ u := uint32(1)
+ for i := 8; i < 16; i++ {
+ u += uint32(counterCopy[i])
+ counterCopy[i] = byte(u)
+ u >>= 8
+ }
+ in = in[64:]
+ out = out[64:]
+ }
+
+ if len(in) > 0 {
+ core(&block, &counterCopy, key, &Sigma)
+ for i, v := range in {
+ out[i] = v ^ block[i]
+ }
+ }
+}
diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go
new file mode 100644
index 00000000000..c971a99fa67
--- /dev/null
+++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go
@@ -0,0 +1,212 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package scrypt implements the scrypt key derivation function as defined in
+// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard
+// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf).
+package scrypt // import "golang.org/x/crypto/scrypt"
+
+import (
+ "crypto/sha256"
+ "encoding/binary"
+ "errors"
+ "math/bits"
+
+ "golang.org/x/crypto/pbkdf2"
+)
+
+const maxInt = int(^uint(0) >> 1)
+
+// blockCopy copies n numbers from src into dst.
+func blockCopy(dst, src []uint32, n int) {
+ copy(dst, src[:n])
+}
+
+// blockXOR XORs numbers from dst with n numbers from src.
+func blockXOR(dst, src []uint32, n int) {
+ for i, v := range src[:n] {
+ dst[i] ^= v
+ }
+}
+
+// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in,
+// and puts the result into both tmp and out.
+func salsaXOR(tmp *[16]uint32, in, out []uint32) {
+ w0 := tmp[0] ^ in[0]
+ w1 := tmp[1] ^ in[1]
+ w2 := tmp[2] ^ in[2]
+ w3 := tmp[3] ^ in[3]
+ w4 := tmp[4] ^ in[4]
+ w5 := tmp[5] ^ in[5]
+ w6 := tmp[6] ^ in[6]
+ w7 := tmp[7] ^ in[7]
+ w8 := tmp[8] ^ in[8]
+ w9 := tmp[9] ^ in[9]
+ w10 := tmp[10] ^ in[10]
+ w11 := tmp[11] ^ in[11]
+ w12 := tmp[12] ^ in[12]
+ w13 := tmp[13] ^ in[13]
+ w14 := tmp[14] ^ in[14]
+ w15 := tmp[15] ^ in[15]
+
+ x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8
+ x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15
+
+ for i := 0; i < 8; i += 2 {
+ x4 ^= bits.RotateLeft32(x0+x12, 7)
+ x8 ^= bits.RotateLeft32(x4+x0, 9)
+ x12 ^= bits.RotateLeft32(x8+x4, 13)
+ x0 ^= bits.RotateLeft32(x12+x8, 18)
+
+ x9 ^= bits.RotateLeft32(x5+x1, 7)
+ x13 ^= bits.RotateLeft32(x9+x5, 9)
+ x1 ^= bits.RotateLeft32(x13+x9, 13)
+ x5 ^= bits.RotateLeft32(x1+x13, 18)
+
+ x14 ^= bits.RotateLeft32(x10+x6, 7)
+ x2 ^= bits.RotateLeft32(x14+x10, 9)
+ x6 ^= bits.RotateLeft32(x2+x14, 13)
+ x10 ^= bits.RotateLeft32(x6+x2, 18)
+
+ x3 ^= bits.RotateLeft32(x15+x11, 7)
+ x7 ^= bits.RotateLeft32(x3+x15, 9)
+ x11 ^= bits.RotateLeft32(x7+x3, 13)
+ x15 ^= bits.RotateLeft32(x11+x7, 18)
+
+ x1 ^= bits.RotateLeft32(x0+x3, 7)
+ x2 ^= bits.RotateLeft32(x1+x0, 9)
+ x3 ^= bits.RotateLeft32(x2+x1, 13)
+ x0 ^= bits.RotateLeft32(x3+x2, 18)
+
+ x6 ^= bits.RotateLeft32(x5+x4, 7)
+ x7 ^= bits.RotateLeft32(x6+x5, 9)
+ x4 ^= bits.RotateLeft32(x7+x6, 13)
+ x5 ^= bits.RotateLeft32(x4+x7, 18)
+
+ x11 ^= bits.RotateLeft32(x10+x9, 7)
+ x8 ^= bits.RotateLeft32(x11+x10, 9)
+ x9 ^= bits.RotateLeft32(x8+x11, 13)
+ x10 ^= bits.RotateLeft32(x9+x8, 18)
+
+ x12 ^= bits.RotateLeft32(x15+x14, 7)
+ x13 ^= bits.RotateLeft32(x12+x15, 9)
+ x14 ^= bits.RotateLeft32(x13+x12, 13)
+ x15 ^= bits.RotateLeft32(x14+x13, 18)
+ }
+ x0 += w0
+ x1 += w1
+ x2 += w2
+ x3 += w3
+ x4 += w4
+ x5 += w5
+ x6 += w6
+ x7 += w7
+ x8 += w8
+ x9 += w9
+ x10 += w10
+ x11 += w11
+ x12 += w12
+ x13 += w13
+ x14 += w14
+ x15 += w15
+
+ out[0], tmp[0] = x0, x0
+ out[1], tmp[1] = x1, x1
+ out[2], tmp[2] = x2, x2
+ out[3], tmp[3] = x3, x3
+ out[4], tmp[4] = x4, x4
+ out[5], tmp[5] = x5, x5
+ out[6], tmp[6] = x6, x6
+ out[7], tmp[7] = x7, x7
+ out[8], tmp[8] = x8, x8
+ out[9], tmp[9] = x9, x9
+ out[10], tmp[10] = x10, x10
+ out[11], tmp[11] = x11, x11
+ out[12], tmp[12] = x12, x12
+ out[13], tmp[13] = x13, x13
+ out[14], tmp[14] = x14, x14
+ out[15], tmp[15] = x15, x15
+}
+
+func blockMix(tmp *[16]uint32, in, out []uint32, r int) {
+ blockCopy(tmp[:], in[(2*r-1)*16:], 16)
+ for i := 0; i < 2*r; i += 2 {
+ salsaXOR(tmp, in[i*16:], out[i*8:])
+ salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:])
+ }
+}
+
+func integer(b []uint32, r int) uint64 {
+ j := (2*r - 1) * 16
+ return uint64(b[j]) | uint64(b[j+1])<<32
+}
+
+func smix(b []byte, r, N int, v, xy []uint32) {
+ var tmp [16]uint32
+ R := 32 * r
+ x := xy
+ y := xy[R:]
+
+ j := 0
+ for i := 0; i < R; i++ {
+ x[i] = binary.LittleEndian.Uint32(b[j:])
+ j += 4
+ }
+ for i := 0; i < N; i += 2 {
+ blockCopy(v[i*R:], x, R)
+ blockMix(&tmp, x, y, r)
+
+ blockCopy(v[(i+1)*R:], y, R)
+ blockMix(&tmp, y, x, r)
+ }
+ for i := 0; i < N; i += 2 {
+ j := int(integer(x, r) & uint64(N-1))
+ blockXOR(x, v[j*R:], R)
+ blockMix(&tmp, x, y, r)
+
+ j = int(integer(y, r) & uint64(N-1))
+ blockXOR(y, v[j*R:], R)
+ blockMix(&tmp, y, x, r)
+ }
+ j = 0
+ for _, v := range x[:R] {
+ binary.LittleEndian.PutUint32(b[j:], v)
+ j += 4
+ }
+}
+
+// Key derives a key from the password, salt, and cost parameters, returning
+// a byte slice of length keyLen that can be used as cryptographic key.
+//
+// N is a CPU/memory cost parameter, which must be a power of two greater than 1.
+// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the
+// limits, the function returns a nil byte slice and an error.
+//
+// For example, you can get a derived key for e.g. AES-256 (which needs a
+// 32-byte key) by doing:
+//
+// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32)
+//
+// The recommended parameters for interactive logins as of 2017 are N=32768, r=8
+// and p=1. The parameters N, r, and p should be increased as memory latency and
+// CPU parallelism increases; consider setting N to the highest power of 2 you
+// can derive within 100 milliseconds. Remember to get a good random salt.
+func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) {
+ if N <= 1 || N&(N-1) != 0 {
+ return nil, errors.New("scrypt: N must be > 1 and a power of 2")
+ }
+ if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r {
+ return nil, errors.New("scrypt: parameters are too large")
+ }
+
+ xy := make([]uint32, 64*r)
+ v := make([]uint32, 32*N*r)
+ b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New)
+
+ for i := 0; i < p; i++ {
+ smix(b[i*128*r:], r, N, v, xy)
+ }
+
+ return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil
+}
diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go
new file mode 100644
index 00000000000..decd8cf9bf7
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/doc.go
@@ -0,0 +1,62 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sha3 implements the SHA-3 fixed-output-length hash functions and
+// the SHAKE variable-output-length hash functions defined by FIPS-202.
+//
+// Both types of hash function use the "sponge" construction and the Keccak
+// permutation. For a detailed specification see http://keccak.noekeon.org/
+//
+// # Guidance
+//
+// If you aren't sure what function you need, use SHAKE256 with at least 64
+// bytes of output. The SHAKE instances are faster than the SHA3 instances;
+// the latter have to allocate memory to conform to the hash.Hash interface.
+//
+// If you need a secret-key MAC (message authentication code), prepend the
+// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
+// output.
+//
+// # Security strengths
+//
+// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
+// strength against preimage attacks of x bits. Since they only produce "x"
+// bits of output, their collision-resistance is only "x/2" bits.
+//
+// The SHAKE-256 and -128 functions have a generic security strength of 256 and
+// 128 bits against all attacks, provided that at least 2x bits of their output
+// is used. Requesting more than 64 or 32 bytes of output, respectively, does
+// not increase the collision-resistance of the SHAKE functions.
+//
+// # The sponge construction
+//
+// A sponge builds a pseudo-random function from a public pseudo-random
+// permutation, by applying the permutation to a state of "rate + capacity"
+// bytes, but hiding "capacity" of the bytes.
+//
+// A sponge starts out with a zero state. To hash an input using a sponge, up
+// to "rate" bytes of the input are XORed into the sponge's state. The sponge
+// is then "full" and the permutation is applied to "empty" it. This process is
+// repeated until all the input has been "absorbed". The input is then padded.
+// The digest is "squeezed" from the sponge in the same way, except that output
+// is copied out instead of input being XORed in.
+//
+// A sponge is parameterized by its generic security strength, which is equal
+// to half its capacity; capacity + rate is equal to the permutation's width.
+// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
+// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
+//
+// # Recommendations
+//
+// The SHAKE functions are recommended for most new uses. They can produce
+// output of arbitrary length. SHAKE256, with an output length of at least
+// 64 bytes, provides 256-bit security against all attacks. The Keccak team
+// recommends it for most applications upgrading from SHA2-512. (NIST chose a
+// much stronger, but much slower, sponge instance for SHA3-512.)
+//
+// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
+// They produce output of the same length, with the same security strengths
+// against all attacks. This means, in particular, that SHA3-256 only has
+// 128-bit collision resistance, because its output length is 32 bytes.
+package sha3 // import "golang.org/x/crypto/sha3"
diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go
new file mode 100644
index 00000000000..0d8043fd2a1
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/hashes.go
@@ -0,0 +1,97 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+// This file provides functions for creating instances of the SHA-3
+// and SHAKE hash functions, as well as utility functions for hashing
+// bytes.
+
+import (
+ "hash"
+)
+
+// New224 creates a new SHA3-224 hash.
+// Its generic security strength is 224 bits against preimage attacks,
+// and 112 bits against collision attacks.
+func New224() hash.Hash {
+ if h := new224Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 144, outputLen: 28, dsbyte: 0x06}
+}
+
+// New256 creates a new SHA3-256 hash.
+// Its generic security strength is 256 bits against preimage attacks,
+// and 128 bits against collision attacks.
+func New256() hash.Hash {
+ if h := new256Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 136, outputLen: 32, dsbyte: 0x06}
+}
+
+// New384 creates a new SHA3-384 hash.
+// Its generic security strength is 384 bits against preimage attacks,
+// and 192 bits against collision attacks.
+func New384() hash.Hash {
+ if h := new384Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 104, outputLen: 48, dsbyte: 0x06}
+}
+
+// New512 creates a new SHA3-512 hash.
+// Its generic security strength is 512 bits against preimage attacks,
+// and 256 bits against collision attacks.
+func New512() hash.Hash {
+ if h := new512Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 72, outputLen: 64, dsbyte: 0x06}
+}
+
+// NewLegacyKeccak256 creates a new Keccak-256 hash.
+//
+// Only use this function if you require compatibility with an existing cryptosystem
+// that uses non-standard padding. All other users should use New256 instead.
+func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
+
+// NewLegacyKeccak512 creates a new Keccak-512 hash.
+//
+// Only use this function if you require compatibility with an existing cryptosystem
+// that uses non-standard padding. All other users should use New512 instead.
+func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }
+
+// Sum224 returns the SHA3-224 digest of the data.
+func Sum224(data []byte) (digest [28]byte) {
+ h := New224()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
+
+// Sum256 returns the SHA3-256 digest of the data.
+func Sum256(data []byte) (digest [32]byte) {
+ h := New256()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
+
+// Sum384 returns the SHA3-384 digest of the data.
+func Sum384(data []byte) (digest [48]byte) {
+ h := New384()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
+
+// Sum512 returns the SHA3-512 digest of the data.
+func Sum512(data []byte) (digest [64]byte) {
+ h := New512()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go
new file mode 100644
index 00000000000..c74fc20fcb3
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/hashes_generic.go
@@ -0,0 +1,28 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !gc || purego || !s390x
+// +build !gc purego !s390x
+
+package sha3
+
+import (
+ "hash"
+)
+
+// new224Asm returns an assembly implementation of SHA3-224 if available,
+// otherwise it returns nil.
+func new224Asm() hash.Hash { return nil }
+
+// new256Asm returns an assembly implementation of SHA3-256 if available,
+// otherwise it returns nil.
+func new256Asm() hash.Hash { return nil }
+
+// new384Asm returns an assembly implementation of SHA3-384 if available,
+// otherwise it returns nil.
+func new384Asm() hash.Hash { return nil }
+
+// new512Asm returns an assembly implementation of SHA3-512 if available,
+// otherwise it returns nil.
+func new512Asm() hash.Hash { return nil }
diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go
new file mode 100644
index 00000000000..0f4ae8bacff
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/keccakf.go
@@ -0,0 +1,413 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 || purego || !gc
+// +build !amd64 purego !gc
+
+package sha3
+
+// rc stores the round constants for use in the ι step.
+var rc = [24]uint64{
+ 0x0000000000000001,
+ 0x0000000000008082,
+ 0x800000000000808A,
+ 0x8000000080008000,
+ 0x000000000000808B,
+ 0x0000000080000001,
+ 0x8000000080008081,
+ 0x8000000000008009,
+ 0x000000000000008A,
+ 0x0000000000000088,
+ 0x0000000080008009,
+ 0x000000008000000A,
+ 0x000000008000808B,
+ 0x800000000000008B,
+ 0x8000000000008089,
+ 0x8000000000008003,
+ 0x8000000000008002,
+ 0x8000000000000080,
+ 0x000000000000800A,
+ 0x800000008000000A,
+ 0x8000000080008081,
+ 0x8000000000008080,
+ 0x0000000080000001,
+ 0x8000000080008008,
+}
+
+// keccakF1600 applies the Keccak permutation to a 1600b-wide
+// state represented as a slice of 25 uint64s.
+func keccakF1600(a *[25]uint64) {
+ // Implementation translated from Keccak-inplace.c
+ // in the keccak reference code.
+ var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
+
+ for i := 0; i < 24; i += 4 {
+ // Combines the 5 steps in each round into 2 steps.
+ // Unrolls 4 rounds per loop and spreads some steps across rounds.
+
+ // Round 1
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[6] ^ d1
+ bc1 = t<<44 | t>>(64-44)
+ t = a[12] ^ d2
+ bc2 = t<<43 | t>>(64-43)
+ t = a[18] ^ d3
+ bc3 = t<<21 | t>>(64-21)
+ t = a[24] ^ d4
+ bc4 = t<<14 | t>>(64-14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc2 = t<<3 | t>>(64-3)
+ t = a[16] ^ d1
+ bc3 = t<<45 | t>>(64-45)
+ t = a[22] ^ d2
+ bc4 = t<<61 | t>>(64-61)
+ t = a[3] ^ d3
+ bc0 = t<<28 | t>>(64-28)
+ t = a[9] ^ d4
+ bc1 = t<<20 | t>>(64-20)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc4 = t<<18 | t>>(64-18)
+ t = a[1] ^ d1
+ bc0 = t<<1 | t>>(64-1)
+ t = a[7] ^ d2
+ bc1 = t<<6 | t>>(64-6)
+ t = a[13] ^ d3
+ bc2 = t<<25 | t>>(64-25)
+ t = a[19] ^ d4
+ bc3 = t<<8 | t>>(64-8)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc1 = t<<36 | t>>(64-36)
+ t = a[11] ^ d1
+ bc2 = t<<10 | t>>(64-10)
+ t = a[17] ^ d2
+ bc3 = t<<15 | t>>(64-15)
+ t = a[23] ^ d3
+ bc4 = t<<56 | t>>(64-56)
+ t = a[4] ^ d4
+ bc0 = t<<27 | t>>(64-27)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc3 = t<<41 | t>>(64-41)
+ t = a[21] ^ d1
+ bc4 = t<<2 | t>>(64-2)
+ t = a[2] ^ d2
+ bc0 = t<<62 | t>>(64-62)
+ t = a[8] ^ d3
+ bc1 = t<<55 | t>>(64-55)
+ t = a[14] ^ d4
+ bc2 = t<<39 | t>>(64-39)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ // Round 2
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[16] ^ d1
+ bc1 = t<<44 | t>>(64-44)
+ t = a[7] ^ d2
+ bc2 = t<<43 | t>>(64-43)
+ t = a[23] ^ d3
+ bc3 = t<<21 | t>>(64-21)
+ t = a[14] ^ d4
+ bc4 = t<<14 | t>>(64-14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc2 = t<<3 | t>>(64-3)
+ t = a[11] ^ d1
+ bc3 = t<<45 | t>>(64-45)
+ t = a[2] ^ d2
+ bc4 = t<<61 | t>>(64-61)
+ t = a[18] ^ d3
+ bc0 = t<<28 | t>>(64-28)
+ t = a[9] ^ d4
+ bc1 = t<<20 | t>>(64-20)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc4 = t<<18 | t>>(64-18)
+ t = a[6] ^ d1
+ bc0 = t<<1 | t>>(64-1)
+ t = a[22] ^ d2
+ bc1 = t<<6 | t>>(64-6)
+ t = a[13] ^ d3
+ bc2 = t<<25 | t>>(64-25)
+ t = a[4] ^ d4
+ bc3 = t<<8 | t>>(64-8)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc1 = t<<36 | t>>(64-36)
+ t = a[1] ^ d1
+ bc2 = t<<10 | t>>(64-10)
+ t = a[17] ^ d2
+ bc3 = t<<15 | t>>(64-15)
+ t = a[8] ^ d3
+ bc4 = t<<56 | t>>(64-56)
+ t = a[24] ^ d4
+ bc0 = t<<27 | t>>(64-27)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc3 = t<<41 | t>>(64-41)
+ t = a[21] ^ d1
+ bc4 = t<<2 | t>>(64-2)
+ t = a[12] ^ d2
+ bc0 = t<<62 | t>>(64-62)
+ t = a[3] ^ d3
+ bc1 = t<<55 | t>>(64-55)
+ t = a[19] ^ d4
+ bc2 = t<<39 | t>>(64-39)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ // Round 3
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[11] ^ d1
+ bc1 = t<<44 | t>>(64-44)
+ t = a[22] ^ d2
+ bc2 = t<<43 | t>>(64-43)
+ t = a[8] ^ d3
+ bc3 = t<<21 | t>>(64-21)
+ t = a[19] ^ d4
+ bc4 = t<<14 | t>>(64-14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc2 = t<<3 | t>>(64-3)
+ t = a[1] ^ d1
+ bc3 = t<<45 | t>>(64-45)
+ t = a[12] ^ d2
+ bc4 = t<<61 | t>>(64-61)
+ t = a[23] ^ d3
+ bc0 = t<<28 | t>>(64-28)
+ t = a[9] ^ d4
+ bc1 = t<<20 | t>>(64-20)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc4 = t<<18 | t>>(64-18)
+ t = a[16] ^ d1
+ bc0 = t<<1 | t>>(64-1)
+ t = a[2] ^ d2
+ bc1 = t<<6 | t>>(64-6)
+ t = a[13] ^ d3
+ bc2 = t<<25 | t>>(64-25)
+ t = a[24] ^ d4
+ bc3 = t<<8 | t>>(64-8)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc1 = t<<36 | t>>(64-36)
+ t = a[6] ^ d1
+ bc2 = t<<10 | t>>(64-10)
+ t = a[17] ^ d2
+ bc3 = t<<15 | t>>(64-15)
+ t = a[3] ^ d3
+ bc4 = t<<56 | t>>(64-56)
+ t = a[14] ^ d4
+ bc0 = t<<27 | t>>(64-27)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc3 = t<<41 | t>>(64-41)
+ t = a[21] ^ d1
+ bc4 = t<<2 | t>>(64-2)
+ t = a[7] ^ d2
+ bc0 = t<<62 | t>>(64-62)
+ t = a[18] ^ d3
+ bc1 = t<<55 | t>>(64-55)
+ t = a[4] ^ d4
+ bc2 = t<<39 | t>>(64-39)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ // Round 4
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[1] ^ d1
+ bc1 = t<<44 | t>>(64-44)
+ t = a[2] ^ d2
+ bc2 = t<<43 | t>>(64-43)
+ t = a[3] ^ d3
+ bc3 = t<<21 | t>>(64-21)
+ t = a[4] ^ d4
+ bc4 = t<<14 | t>>(64-14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc2 = t<<3 | t>>(64-3)
+ t = a[6] ^ d1
+ bc3 = t<<45 | t>>(64-45)
+ t = a[7] ^ d2
+ bc4 = t<<61 | t>>(64-61)
+ t = a[8] ^ d3
+ bc0 = t<<28 | t>>(64-28)
+ t = a[9] ^ d4
+ bc1 = t<<20 | t>>(64-20)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc4 = t<<18 | t>>(64-18)
+ t = a[11] ^ d1
+ bc0 = t<<1 | t>>(64-1)
+ t = a[12] ^ d2
+ bc1 = t<<6 | t>>(64-6)
+ t = a[13] ^ d3
+ bc2 = t<<25 | t>>(64-25)
+ t = a[14] ^ d4
+ bc3 = t<<8 | t>>(64-8)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc1 = t<<36 | t>>(64-36)
+ t = a[16] ^ d1
+ bc2 = t<<10 | t>>(64-10)
+ t = a[17] ^ d2
+ bc3 = t<<15 | t>>(64-15)
+ t = a[18] ^ d3
+ bc4 = t<<56 | t>>(64-56)
+ t = a[19] ^ d4
+ bc0 = t<<27 | t>>(64-27)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc3 = t<<41 | t>>(64-41)
+ t = a[21] ^ d1
+ bc4 = t<<2 | t>>(64-2)
+ t = a[22] ^ d2
+ bc0 = t<<62 | t>>(64-62)
+ t = a[23] ^ d3
+ bc1 = t<<55 | t>>(64-55)
+ t = a[24] ^ d4
+ bc2 = t<<39 | t>>(64-39)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+ }
+}
diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go
new file mode 100644
index 00000000000..248a38241ff
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 && !purego && gc
+// +build amd64,!purego,gc
+
+package sha3
+
+// This function is implemented in keccakf_amd64.s.
+
+//go:noescape
+
+func keccakF1600(a *[25]uint64)
diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
new file mode 100644
index 00000000000..4cfa54383bf
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
@@ -0,0 +1,391 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 && !purego && gc
+// +build amd64,!purego,gc
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources at https://github.com/gvanas/KeccakCodePackage
+
+// Offsets in state
+#define _ba (0*8)
+#define _be (1*8)
+#define _bi (2*8)
+#define _bo (3*8)
+#define _bu (4*8)
+#define _ga (5*8)
+#define _ge (6*8)
+#define _gi (7*8)
+#define _go (8*8)
+#define _gu (9*8)
+#define _ka (10*8)
+#define _ke (11*8)
+#define _ki (12*8)
+#define _ko (13*8)
+#define _ku (14*8)
+#define _ma (15*8)
+#define _me (16*8)
+#define _mi (17*8)
+#define _mo (18*8)
+#define _mu (19*8)
+#define _sa (20*8)
+#define _se (21*8)
+#define _si (22*8)
+#define _so (23*8)
+#define _su (24*8)
+
+// Temporary registers
+#define rT1 AX
+
+// Round vars
+#define rpState DI
+#define rpStack SP
+
+#define rDa BX
+#define rDe CX
+#define rDi DX
+#define rDo R8
+#define rDu R9
+
+#define rBa R10
+#define rBe R11
+#define rBi R12
+#define rBo R13
+#define rBu R14
+
+#define rCa SI
+#define rCe BP
+#define rCi rBi
+#define rCo rBo
+#define rCu R15
+
+#define MOVQ_RBI_RCE MOVQ rBi, rCe
+#define XORQ_RT1_RCA XORQ rT1, rCa
+#define XORQ_RT1_RCE XORQ rT1, rCe
+#define XORQ_RBA_RCU XORQ rBa, rCu
+#define XORQ_RBE_RCU XORQ rBe, rCu
+#define XORQ_RDU_RCU XORQ rDu, rCu
+#define XORQ_RDA_RCA XORQ rDa, rCa
+#define XORQ_RDE_RCE XORQ rDe, rCe
+
+#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \
+ /* Prepare round */ \
+ MOVQ rCe, rDa; \
+ ROLQ $1, rDa; \
+ \
+ MOVQ _bi(iState), rCi; \
+ XORQ _gi(iState), rDi; \
+ XORQ rCu, rDa; \
+ XORQ _ki(iState), rCi; \
+ XORQ _mi(iState), rDi; \
+ XORQ rDi, rCi; \
+ \
+ MOVQ rCi, rDe; \
+ ROLQ $1, rDe; \
+ \
+ MOVQ _bo(iState), rCo; \
+ XORQ _go(iState), rDo; \
+ XORQ rCa, rDe; \
+ XORQ _ko(iState), rCo; \
+ XORQ _mo(iState), rDo; \
+ XORQ rDo, rCo; \
+ \
+ MOVQ rCo, rDi; \
+ ROLQ $1, rDi; \
+ \
+ MOVQ rCu, rDo; \
+ XORQ rCe, rDi; \
+ ROLQ $1, rDo; \
+ \
+ MOVQ rCa, rDu; \
+ XORQ rCi, rDo; \
+ ROLQ $1, rDu; \
+ \
+ /* Result b */ \
+ MOVQ _ba(iState), rBa; \
+ MOVQ _ge(iState), rBe; \
+ XORQ rCo, rDu; \
+ MOVQ _ki(iState), rBi; \
+ MOVQ _mo(iState), rBo; \
+ MOVQ _su(iState), rBu; \
+ XORQ rDe, rBe; \
+ ROLQ $44, rBe; \
+ XORQ rDi, rBi; \
+ XORQ rDa, rBa; \
+ ROLQ $43, rBi; \
+ \
+ MOVQ rBe, rCa; \
+ MOVQ rc, rT1; \
+ ORQ rBi, rCa; \
+ XORQ rBa, rT1; \
+ XORQ rT1, rCa; \
+ MOVQ rCa, _ba(oState); \
+ \
+ XORQ rDu, rBu; \
+ ROLQ $14, rBu; \
+ MOVQ rBa, rCu; \
+ ANDQ rBe, rCu; \
+ XORQ rBu, rCu; \
+ MOVQ rCu, _bu(oState); \
+ \
+ XORQ rDo, rBo; \
+ ROLQ $21, rBo; \
+ MOVQ rBo, rT1; \
+ ANDQ rBu, rT1; \
+ XORQ rBi, rT1; \
+ MOVQ rT1, _bi(oState); \
+ \
+ NOTQ rBi; \
+ ORQ rBa, rBu; \
+ ORQ rBo, rBi; \
+ XORQ rBo, rBu; \
+ XORQ rBe, rBi; \
+ MOVQ rBu, _bo(oState); \
+ MOVQ rBi, _be(oState); \
+ B_RBI_RCE; \
+ \
+ /* Result g */ \
+ MOVQ _gu(iState), rBe; \
+ XORQ rDu, rBe; \
+ MOVQ _ka(iState), rBi; \
+ ROLQ $20, rBe; \
+ XORQ rDa, rBi; \
+ ROLQ $3, rBi; \
+ MOVQ _bo(iState), rBa; \
+ MOVQ rBe, rT1; \
+ ORQ rBi, rT1; \
+ XORQ rDo, rBa; \
+ MOVQ _me(iState), rBo; \
+ MOVQ _si(iState), rBu; \
+ ROLQ $28, rBa; \
+ XORQ rBa, rT1; \
+ MOVQ rT1, _ga(oState); \
+ G_RT1_RCA; \
+ \
+ XORQ rDe, rBo; \
+ ROLQ $45, rBo; \
+ MOVQ rBi, rT1; \
+ ANDQ rBo, rT1; \
+ XORQ rBe, rT1; \
+ MOVQ rT1, _ge(oState); \
+ G_RT1_RCE; \
+ \
+ XORQ rDi, rBu; \
+ ROLQ $61, rBu; \
+ MOVQ rBu, rT1; \
+ ORQ rBa, rT1; \
+ XORQ rBo, rT1; \
+ MOVQ rT1, _go(oState); \
+ \
+ ANDQ rBe, rBa; \
+ XORQ rBu, rBa; \
+ MOVQ rBa, _gu(oState); \
+ NOTQ rBu; \
+ G_RBA_RCU; \
+ \
+ ORQ rBu, rBo; \
+ XORQ rBi, rBo; \
+ MOVQ rBo, _gi(oState); \
+ \
+ /* Result k */ \
+ MOVQ _be(iState), rBa; \
+ MOVQ _gi(iState), rBe; \
+ MOVQ _ko(iState), rBi; \
+ MOVQ _mu(iState), rBo; \
+ MOVQ _sa(iState), rBu; \
+ XORQ rDi, rBe; \
+ ROLQ $6, rBe; \
+ XORQ rDo, rBi; \
+ ROLQ $25, rBi; \
+ MOVQ rBe, rT1; \
+ ORQ rBi, rT1; \
+ XORQ rDe, rBa; \
+ ROLQ $1, rBa; \
+ XORQ rBa, rT1; \
+ MOVQ rT1, _ka(oState); \
+ K_RT1_RCA; \
+ \
+ XORQ rDu, rBo; \
+ ROLQ $8, rBo; \
+ MOVQ rBi, rT1; \
+ ANDQ rBo, rT1; \
+ XORQ rBe, rT1; \
+ MOVQ rT1, _ke(oState); \
+ K_RT1_RCE; \
+ \
+ XORQ rDa, rBu; \
+ ROLQ $18, rBu; \
+ NOTQ rBo; \
+ MOVQ rBo, rT1; \
+ ANDQ rBu, rT1; \
+ XORQ rBi, rT1; \
+ MOVQ rT1, _ki(oState); \
+ \
+ MOVQ rBu, rT1; \
+ ORQ rBa, rT1; \
+ XORQ rBo, rT1; \
+ MOVQ rT1, _ko(oState); \
+ \
+ ANDQ rBe, rBa; \
+ XORQ rBu, rBa; \
+ MOVQ rBa, _ku(oState); \
+ K_RBA_RCU; \
+ \
+ /* Result m */ \
+ MOVQ _ga(iState), rBe; \
+ XORQ rDa, rBe; \
+ MOVQ _ke(iState), rBi; \
+ ROLQ $36, rBe; \
+ XORQ rDe, rBi; \
+ MOVQ _bu(iState), rBa; \
+ ROLQ $10, rBi; \
+ MOVQ rBe, rT1; \
+ MOVQ _mi(iState), rBo; \
+ ANDQ rBi, rT1; \
+ XORQ rDu, rBa; \
+ MOVQ _so(iState), rBu; \
+ ROLQ $27, rBa; \
+ XORQ rBa, rT1; \
+ MOVQ rT1, _ma(oState); \
+ M_RT1_RCA; \
+ \
+ XORQ rDi, rBo; \
+ ROLQ $15, rBo; \
+ MOVQ rBi, rT1; \
+ ORQ rBo, rT1; \
+ XORQ rBe, rT1; \
+ MOVQ rT1, _me(oState); \
+ M_RT1_RCE; \
+ \
+ XORQ rDo, rBu; \
+ ROLQ $56, rBu; \
+ NOTQ rBo; \
+ MOVQ rBo, rT1; \
+ ORQ rBu, rT1; \
+ XORQ rBi, rT1; \
+ MOVQ rT1, _mi(oState); \
+ \
+ ORQ rBa, rBe; \
+ XORQ rBu, rBe; \
+ MOVQ rBe, _mu(oState); \
+ \
+ ANDQ rBa, rBu; \
+ XORQ rBo, rBu; \
+ MOVQ rBu, _mo(oState); \
+ M_RBE_RCU; \
+ \
+ /* Result s */ \
+ MOVQ _bi(iState), rBa; \
+ MOVQ _go(iState), rBe; \
+ MOVQ _ku(iState), rBi; \
+ XORQ rDi, rBa; \
+ MOVQ _ma(iState), rBo; \
+ ROLQ $62, rBa; \
+ XORQ rDo, rBe; \
+ MOVQ _se(iState), rBu; \
+ ROLQ $55, rBe; \
+ \
+ XORQ rDu, rBi; \
+ MOVQ rBa, rDu; \
+ XORQ rDe, rBu; \
+ ROLQ $2, rBu; \
+ ANDQ rBe, rDu; \
+ XORQ rBu, rDu; \
+ MOVQ rDu, _su(oState); \
+ \
+ ROLQ $39, rBi; \
+ S_RDU_RCU; \
+ NOTQ rBe; \
+ XORQ rDa, rBo; \
+ MOVQ rBe, rDa; \
+ ANDQ rBi, rDa; \
+ XORQ rBa, rDa; \
+ MOVQ rDa, _sa(oState); \
+ S_RDA_RCA; \
+ \
+ ROLQ $41, rBo; \
+ MOVQ rBi, rDe; \
+ ORQ rBo, rDe; \
+ XORQ rBe, rDe; \
+ MOVQ rDe, _se(oState); \
+ S_RDE_RCE; \
+ \
+ MOVQ rBo, rDi; \
+ MOVQ rBu, rDo; \
+ ANDQ rBu, rDi; \
+ ORQ rBa, rDo; \
+ XORQ rBi, rDi; \
+ XORQ rBo, rDo; \
+ MOVQ rDi, _si(oState); \
+ MOVQ rDo, _so(oState) \
+
+// func keccakF1600(state *[25]uint64)
+TEXT ·keccakF1600(SB), 0, $200-8
+ MOVQ state+0(FP), rpState
+
+ // Convert the user state into an internal state
+ NOTQ _be(rpState)
+ NOTQ _bi(rpState)
+ NOTQ _go(rpState)
+ NOTQ _ki(rpState)
+ NOTQ _mi(rpState)
+ NOTQ _sa(rpState)
+
+ // Execute the KeccakF permutation
+ MOVQ _ba(rpState), rCa
+ MOVQ _be(rpState), rCe
+ MOVQ _bu(rpState), rCu
+
+ XORQ _ga(rpState), rCa
+ XORQ _ge(rpState), rCe
+ XORQ _gu(rpState), rCu
+
+ XORQ _ka(rpState), rCa
+ XORQ _ke(rpState), rCe
+ XORQ _ku(rpState), rCu
+
+ XORQ _ma(rpState), rCa
+ XORQ _me(rpState), rCe
+ XORQ _mu(rpState), rCu
+
+ XORQ _sa(rpState), rCa
+ XORQ _se(rpState), rCe
+ MOVQ _si(rpState), rDi
+ MOVQ _so(rpState), rDo
+ XORQ _su(rpState), rCu
+
+ mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP)
+
+ // Revert the internal state to the user state
+ NOTQ _be(rpState)
+ NOTQ _bi(rpState)
+ NOTQ _go(rpState)
+ NOTQ _ki(rpState)
+ NOTQ _mi(rpState)
+ NOTQ _sa(rpState)
+
+ RET
diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go
new file mode 100644
index 00000000000..8b4453aac3c
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/register.go
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.4
+// +build go1.4
+
+package sha3
+
+import (
+ "crypto"
+)
+
+func init() {
+ crypto.RegisterHash(crypto.SHA3_224, New224)
+ crypto.RegisterHash(crypto.SHA3_256, New256)
+ crypto.RegisterHash(crypto.SHA3_384, New384)
+ crypto.RegisterHash(crypto.SHA3_512, New512)
+}
diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go
new file mode 100644
index 00000000000..fa182beb40b
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/sha3.go
@@ -0,0 +1,193 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+// spongeDirection indicates the direction bytes are flowing through the sponge.
+type spongeDirection int
+
+const (
+ // spongeAbsorbing indicates that the sponge is absorbing input.
+ spongeAbsorbing spongeDirection = iota
+ // spongeSqueezing indicates that the sponge is being squeezed.
+ spongeSqueezing
+)
+
+const (
+ // maxRate is the maximum size of the internal buffer. SHAKE-256
+ // currently needs the largest buffer.
+ maxRate = 168
+)
+
+type state struct {
+ // Generic sponge components.
+ a [25]uint64 // main state of the hash
+ buf []byte // points into storage
+ rate int // the number of bytes of state to use
+
+ // dsbyte contains the "domain separation" bits and the first bit of
+ // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
+ // SHA-3 and SHAKE functions by appending bitstrings to the message.
+ // Using a little-endian bit-ordering convention, these are "01" for SHA-3
+ // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
+ // padding rule from section 5.1 is applied to pad the message to a multiple
+ // of the rate, which involves adding a "1" bit, zero or more "0" bits, and
+ // a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
+ // giving 00000110b (0x06) and 00011111b (0x1f).
+ // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
+ // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
+ // Extendable-Output Functions (May 2014)"
+ dsbyte byte
+
+ storage storageBuf
+
+ // Specific to SHA-3 and SHAKE.
+ outputLen int // the default output size in bytes
+ state spongeDirection // whether the sponge is absorbing or squeezing
+}
+
+// BlockSize returns the rate of sponge underlying this hash function.
+func (d *state) BlockSize() int { return d.rate }
+
+// Size returns the output size of the hash function in bytes.
+func (d *state) Size() int { return d.outputLen }
+
+// Reset clears the internal state by zeroing the sponge state and
+// the byte buffer, and setting Sponge.state to absorbing.
+func (d *state) Reset() {
+ // Zero the permutation's state.
+ for i := range d.a {
+ d.a[i] = 0
+ }
+ d.state = spongeAbsorbing
+ d.buf = d.storage.asBytes()[:0]
+}
+
+func (d *state) clone() *state {
+ ret := *d
+ if ret.state == spongeAbsorbing {
+ ret.buf = ret.storage.asBytes()[:len(ret.buf)]
+ } else {
+ ret.buf = ret.storage.asBytes()[d.rate-cap(d.buf) : d.rate]
+ }
+
+ return &ret
+}
+
+// permute applies the KeccakF-1600 permutation. It handles
+// any input-output buffering.
+func (d *state) permute() {
+ switch d.state {
+ case spongeAbsorbing:
+ // If we're absorbing, we need to xor the input into the state
+ // before applying the permutation.
+ xorIn(d, d.buf)
+ d.buf = d.storage.asBytes()[:0]
+ keccakF1600(&d.a)
+ case spongeSqueezing:
+ // If we're squeezing, we need to apply the permutation before
+ // copying more output.
+ keccakF1600(&d.a)
+ d.buf = d.storage.asBytes()[:d.rate]
+ copyOut(d, d.buf)
+ }
+}
+
+// pads appends the domain separation bits in dsbyte, applies
+// the multi-bitrate 10..1 padding rule, and permutes the state.
+func (d *state) padAndPermute(dsbyte byte) {
+ if d.buf == nil {
+ d.buf = d.storage.asBytes()[:0]
+ }
+ // Pad with this instance's domain-separator bits. We know that there's
+ // at least one byte of space in d.buf because, if it were full,
+ // permute would have been called to empty it. dsbyte also contains the
+ // first one bit for the padding. See the comment in the state struct.
+ d.buf = append(d.buf, dsbyte)
+ zerosStart := len(d.buf)
+ d.buf = d.storage.asBytes()[:d.rate]
+ for i := zerosStart; i < d.rate; i++ {
+ d.buf[i] = 0
+ }
+ // This adds the final one bit for the padding. Because of the way that
+ // bits are numbered from the LSB upwards, the final bit is the MSB of
+ // the last byte.
+ d.buf[d.rate-1] ^= 0x80
+ // Apply the permutation
+ d.permute()
+ d.state = spongeSqueezing
+ d.buf = d.storage.asBytes()[:d.rate]
+ copyOut(d, d.buf)
+}
+
+// Write absorbs more data into the hash's state. It produces an error
+// if more data is written to the ShakeHash after writing
+func (d *state) Write(p []byte) (written int, err error) {
+ if d.state != spongeAbsorbing {
+ panic("sha3: write to sponge after read")
+ }
+ if d.buf == nil {
+ d.buf = d.storage.asBytes()[:0]
+ }
+ written = len(p)
+
+ for len(p) > 0 {
+ if len(d.buf) == 0 && len(p) >= d.rate {
+ // The fast path; absorb a full "rate" bytes of input and apply the permutation.
+ xorIn(d, p[:d.rate])
+ p = p[d.rate:]
+ keccakF1600(&d.a)
+ } else {
+ // The slow path; buffer the input until we can fill the sponge, and then xor it in.
+ todo := d.rate - len(d.buf)
+ if todo > len(p) {
+ todo = len(p)
+ }
+ d.buf = append(d.buf, p[:todo]...)
+ p = p[todo:]
+
+ // If the sponge is full, apply the permutation.
+ if len(d.buf) == d.rate {
+ d.permute()
+ }
+ }
+ }
+
+ return
+}
+
+// Read squeezes an arbitrary number of bytes from the sponge.
+func (d *state) Read(out []byte) (n int, err error) {
+ // If we're still absorbing, pad and apply the permutation.
+ if d.state == spongeAbsorbing {
+ d.padAndPermute(d.dsbyte)
+ }
+
+ n = len(out)
+
+ // Now, do the squeezing.
+ for len(out) > 0 {
+ n := copy(out, d.buf)
+ d.buf = d.buf[n:]
+ out = out[n:]
+
+ // Apply the permutation if we've squeezed the sponge dry.
+ if len(d.buf) == 0 {
+ d.permute()
+ }
+ }
+
+ return
+}
+
+// Sum applies padding to the hash state and then squeezes out the desired
+// number of output bytes.
+func (d *state) Sum(in []byte) []byte {
+ // Make a copy of the original hash so that caller can keep writing
+ // and summing.
+ dup := d.clone()
+ hash := make([]byte, dup.outputLen)
+ dup.Read(hash)
+ return append(in, hash...)
+}
diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go
new file mode 100644
index 00000000000..63a3edb4cea
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go
@@ -0,0 +1,287 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+// +build gc,!purego
+
+package sha3
+
+// This file contains code for using the 'compute intermediate
+// message digest' (KIMD) and 'compute last message digest' (KLMD)
+// instructions to compute SHA-3 and SHAKE hashes on IBM Z.
+
+import (
+ "hash"
+
+ "golang.org/x/sys/cpu"
+)
+
+// codes represent 7-bit KIMD/KLMD function codes as defined in
+// the Principles of Operation.
+type code uint64
+
+const (
+ // function codes for KIMD/KLMD
+ sha3_224 code = 32
+ sha3_256 = 33
+ sha3_384 = 34
+ sha3_512 = 35
+ shake_128 = 36
+ shake_256 = 37
+ nopad = 0x100
+)
+
+// kimd is a wrapper for the 'compute intermediate message digest' instruction.
+// src must be a multiple of the rate for the given function code.
+//
+//go:noescape
+func kimd(function code, chain *[200]byte, src []byte)
+
+// klmd is a wrapper for the 'compute last message digest' instruction.
+// src padding is handled by the instruction.
+//
+//go:noescape
+func klmd(function code, chain *[200]byte, dst, src []byte)
+
+type asmState struct {
+ a [200]byte // 1600 bit state
+ buf []byte // care must be taken to ensure cap(buf) is a multiple of rate
+ rate int // equivalent to block size
+ storage [3072]byte // underlying storage for buf
+ outputLen int // output length if fixed, 0 if not
+ function code // KIMD/KLMD function code
+ state spongeDirection // whether the sponge is absorbing or squeezing
+}
+
+func newAsmState(function code) *asmState {
+ var s asmState
+ s.function = function
+ switch function {
+ case sha3_224:
+ s.rate = 144
+ s.outputLen = 28
+ case sha3_256:
+ s.rate = 136
+ s.outputLen = 32
+ case sha3_384:
+ s.rate = 104
+ s.outputLen = 48
+ case sha3_512:
+ s.rate = 72
+ s.outputLen = 64
+ case shake_128:
+ s.rate = 168
+ case shake_256:
+ s.rate = 136
+ default:
+ panic("sha3: unrecognized function code")
+ }
+
+ // limit s.buf size to a multiple of s.rate
+ s.resetBuf()
+ return &s
+}
+
+func (s *asmState) clone() *asmState {
+ c := *s
+ c.buf = c.storage[:len(s.buf):cap(s.buf)]
+ return &c
+}
+
+// copyIntoBuf copies b into buf. It will panic if there is not enough space to
+// store all of b.
+func (s *asmState) copyIntoBuf(b []byte) {
+ bufLen := len(s.buf)
+ s.buf = s.buf[:len(s.buf)+len(b)]
+ copy(s.buf[bufLen:], b)
+}
+
+// resetBuf points buf at storage, sets the length to 0 and sets cap to be a
+// multiple of the rate.
+func (s *asmState) resetBuf() {
+ max := (cap(s.storage) / s.rate) * s.rate
+ s.buf = s.storage[:0:max]
+}
+
+// Write (via the embedded io.Writer interface) adds more data to the running hash.
+// It never returns an error.
+func (s *asmState) Write(b []byte) (int, error) {
+ if s.state != spongeAbsorbing {
+ panic("sha3: write to sponge after read")
+ }
+ length := len(b)
+ for len(b) > 0 {
+ if len(s.buf) == 0 && len(b) >= cap(s.buf) {
+ // Hash the data directly and push any remaining bytes
+ // into the buffer.
+ remainder := len(b) % s.rate
+ kimd(s.function, &s.a, b[:len(b)-remainder])
+ if remainder != 0 {
+ s.copyIntoBuf(b[len(b)-remainder:])
+ }
+ return length, nil
+ }
+
+ if len(s.buf) == cap(s.buf) {
+ // flush the buffer
+ kimd(s.function, &s.a, s.buf)
+ s.buf = s.buf[:0]
+ }
+
+ // copy as much as we can into the buffer
+ n := len(b)
+ if len(b) > cap(s.buf)-len(s.buf) {
+ n = cap(s.buf) - len(s.buf)
+ }
+ s.copyIntoBuf(b[:n])
+ b = b[n:]
+ }
+ return length, nil
+}
+
+// Read squeezes an arbitrary number of bytes from the sponge.
+func (s *asmState) Read(out []byte) (n int, err error) {
+ n = len(out)
+
+ // need to pad if we were absorbing
+ if s.state == spongeAbsorbing {
+ s.state = spongeSqueezing
+
+ // write hash directly into out if possible
+ if len(out)%s.rate == 0 {
+ klmd(s.function, &s.a, out, s.buf) // len(out) may be 0
+ s.buf = s.buf[:0]
+ return
+ }
+
+ // write hash into buffer
+ max := cap(s.buf)
+ if max > len(out) {
+ max = (len(out)/s.rate)*s.rate + s.rate
+ }
+ klmd(s.function, &s.a, s.buf[:max], s.buf)
+ s.buf = s.buf[:max]
+ }
+
+ for len(out) > 0 {
+ // flush the buffer
+ if len(s.buf) != 0 {
+ c := copy(out, s.buf)
+ out = out[c:]
+ s.buf = s.buf[c:]
+ continue
+ }
+
+ // write hash directly into out if possible
+ if len(out)%s.rate == 0 {
+ klmd(s.function|nopad, &s.a, out, nil)
+ return
+ }
+
+ // write hash into buffer
+ s.resetBuf()
+ if cap(s.buf) > len(out) {
+ s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate]
+ }
+ klmd(s.function|nopad, &s.a, s.buf, nil)
+ }
+ return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (s *asmState) Sum(b []byte) []byte {
+ if s.outputLen == 0 {
+ panic("sha3: cannot call Sum on SHAKE functions")
+ }
+
+ // Copy the state to preserve the original.
+ a := s.a
+
+ // Hash the buffer. Note that we don't clear it because we
+ // aren't updating the state.
+ klmd(s.function, &a, nil, s.buf)
+ return append(b, a[:s.outputLen]...)
+}
+
+// Reset resets the Hash to its initial state.
+func (s *asmState) Reset() {
+ for i := range s.a {
+ s.a[i] = 0
+ }
+ s.resetBuf()
+ s.state = spongeAbsorbing
+}
+
+// Size returns the number of bytes Sum will return.
+func (s *asmState) Size() int {
+ return s.outputLen
+}
+
+// BlockSize returns the hash's underlying block size.
+// The Write method must be able to accept any amount
+// of data, but it may operate more efficiently if all writes
+// are a multiple of the block size.
+func (s *asmState) BlockSize() int {
+ return s.rate
+}
+
+// Clone returns a copy of the ShakeHash in its current state.
+func (s *asmState) Clone() ShakeHash {
+ return s.clone()
+}
+
+// new224Asm returns an assembly implementation of SHA3-224 if available,
+// otherwise it returns nil.
+func new224Asm() hash.Hash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(sha3_224)
+ }
+ return nil
+}
+
+// new256Asm returns an assembly implementation of SHA3-256 if available,
+// otherwise it returns nil.
+func new256Asm() hash.Hash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(sha3_256)
+ }
+ return nil
+}
+
+// new384Asm returns an assembly implementation of SHA3-384 if available,
+// otherwise it returns nil.
+func new384Asm() hash.Hash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(sha3_384)
+ }
+ return nil
+}
+
+// new512Asm returns an assembly implementation of SHA3-512 if available,
+// otherwise it returns nil.
+func new512Asm() hash.Hash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(sha3_512)
+ }
+ return nil
+}
+
+// newShake128Asm returns an assembly implementation of SHAKE-128 if available,
+// otherwise it returns nil.
+func newShake128Asm() ShakeHash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(shake_128)
+ }
+ return nil
+}
+
+// newShake256Asm returns an assembly implementation of SHAKE-256 if available,
+// otherwise it returns nil.
+func newShake256Asm() ShakeHash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(shake_256)
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s
new file mode 100644
index 00000000000..a0e051b0451
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s
@@ -0,0 +1,34 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+// +build gc,!purego
+
+#include "textflag.h"
+
+// func kimd(function code, chain *[200]byte, src []byte)
+TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40
+ MOVD function+0(FP), R0
+ MOVD chain+8(FP), R1
+ LMG src+16(FP), R2, R3 // R2=base, R3=len
+
+continue:
+ WORD $0xB93E0002 // KIMD --, R2
+ BVS continue // continue if interrupted
+ MOVD $0, R0 // reset R0 for pre-go1.8 compilers
+ RET
+
+// func klmd(function code, chain *[200]byte, dst, src []byte)
+TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64
+ // TODO: SHAKE support
+ MOVD function+0(FP), R0
+ MOVD chain+8(FP), R1
+ LMG dst+16(FP), R2, R3 // R2=base, R3=len
+ LMG src+40(FP), R4, R5 // R4=base, R5=len
+
+continue:
+ WORD $0xB93F0024 // KLMD R2, R4
+ BVS continue // continue if interrupted
+ MOVD $0, R0 // reset R0 for pre-go1.8 compilers
+ RET
diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go
new file mode 100644
index 00000000000..d7be2954ab2
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/shake.go
@@ -0,0 +1,173 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+// This file defines the ShakeHash interface, and provides
+// functions for creating SHAKE and cSHAKE instances, as well as utility
+// functions for hashing bytes to arbitrary-length output.
+//
+//
+// SHAKE implementation is based on FIPS PUB 202 [1]
+// cSHAKE implementations is based on NIST SP 800-185 [2]
+//
+// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
+// [2] https://doi.org/10.6028/NIST.SP.800-185
+
+import (
+ "encoding/binary"
+ "io"
+)
+
+// ShakeHash defines the interface to hash functions that
+// support arbitrary-length output.
+type ShakeHash interface {
+ // Write absorbs more data into the hash's state. It panics if input is
+ // written to it after output has been read from it.
+ io.Writer
+
+ // Read reads more output from the hash; reading affects the hash's
+ // state. (ShakeHash.Read is thus very different from Hash.Sum)
+ // It never returns an error.
+ io.Reader
+
+ // Clone returns a copy of the ShakeHash in its current state.
+ Clone() ShakeHash
+
+ // Reset resets the ShakeHash to its initial state.
+ Reset()
+}
+
+// cSHAKE specific context
+type cshakeState struct {
+ *state // SHA-3 state context and Read/Write operations
+
+ // initBlock is the cSHAKE specific initialization set of bytes. It is initialized
+ // by newCShake function and stores concatenation of N followed by S, encoded
+ // by the method specified in 3.3 of [1].
+ // It is stored here in order for Reset() to be able to put context into
+ // initial state.
+ initBlock []byte
+}
+
+// Consts for configuring initial SHA-3 state
+const (
+ dsbyteShake = 0x1f
+ dsbyteCShake = 0x04
+ rate128 = 168
+ rate256 = 136
+)
+
+func bytepad(input []byte, w int) []byte {
+ // leftEncode always returns max 9 bytes
+ buf := make([]byte, 0, 9+len(input)+w)
+ buf = append(buf, leftEncode(uint64(w))...)
+ buf = append(buf, input...)
+ padlen := w - (len(buf) % w)
+ return append(buf, make([]byte, padlen)...)
+}
+
+func leftEncode(value uint64) []byte {
+ var b [9]byte
+ binary.BigEndian.PutUint64(b[1:], value)
+ // Trim all but last leading zero bytes
+ i := byte(1)
+ for i < 8 && b[i] == 0 {
+ i++
+ }
+ // Prepend number of encoded bytes
+ b[i-1] = 9 - i
+ return b[i-1:]
+}
+
+func newCShake(N, S []byte, rate int, dsbyte byte) ShakeHash {
+ c := cshakeState{state: &state{rate: rate, dsbyte: dsbyte}}
+
+ // leftEncode returns max 9 bytes
+ c.initBlock = make([]byte, 0, 9*2+len(N)+len(S))
+ c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...)
+ c.initBlock = append(c.initBlock, N...)
+ c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...)
+ c.initBlock = append(c.initBlock, S...)
+ c.Write(bytepad(c.initBlock, c.rate))
+ return &c
+}
+
+// Reset resets the hash to initial state.
+func (c *cshakeState) Reset() {
+ c.state.Reset()
+ c.Write(bytepad(c.initBlock, c.rate))
+}
+
+// Clone returns copy of a cSHAKE context within its current state.
+func (c *cshakeState) Clone() ShakeHash {
+ b := make([]byte, len(c.initBlock))
+ copy(b, c.initBlock)
+ return &cshakeState{state: c.clone(), initBlock: b}
+}
+
+// Clone returns copy of SHAKE context within its current state.
+func (c *state) Clone() ShakeHash {
+ return c.clone()
+}
+
+// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
+// Its generic security strength is 128 bits against all attacks if at
+// least 32 bytes of its output are used.
+func NewShake128() ShakeHash {
+ if h := newShake128Asm(); h != nil {
+ return h
+ }
+ return &state{rate: rate128, dsbyte: dsbyteShake}
+}
+
+// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash.
+// Its generic security strength is 256 bits against all attacks if
+// at least 64 bytes of its output are used.
+func NewShake256() ShakeHash {
+ if h := newShake256Asm(); h != nil {
+ return h
+ }
+ return &state{rate: rate256, dsbyte: dsbyteShake}
+}
+
+// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash,
+// a customizable variant of SHAKE128.
+// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
+// desired. S is a customization byte string used for domain separation - two cSHAKE
+// computations on same input with different S yield unrelated outputs.
+// When N and S are both empty, this is equivalent to NewShake128.
+func NewCShake128(N, S []byte) ShakeHash {
+ if len(N) == 0 && len(S) == 0 {
+ return NewShake128()
+ }
+ return newCShake(N, S, rate128, dsbyteCShake)
+}
+
+// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash,
+// a customizable variant of SHAKE256.
+// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
+// desired. S is a customization byte string used for domain separation - two cSHAKE
+// computations on same input with different S yield unrelated outputs.
+// When N and S are both empty, this is equivalent to NewShake256.
+func NewCShake256(N, S []byte) ShakeHash {
+ if len(N) == 0 && len(S) == 0 {
+ return NewShake256()
+ }
+ return newCShake(N, S, rate256, dsbyteCShake)
+}
+
+// ShakeSum128 writes an arbitrary-length digest of data into hash.
+func ShakeSum128(hash, data []byte) {
+ h := NewShake128()
+ h.Write(data)
+ h.Read(hash)
+}
+
+// ShakeSum256 writes an arbitrary-length digest of data into hash.
+func ShakeSum256(hash, data []byte) {
+ h := NewShake256()
+ h.Write(data)
+ h.Read(hash)
+}
diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go
new file mode 100644
index 00000000000..5c0710ef98f
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/shake_generic.go
@@ -0,0 +1,20 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !gc || purego || !s390x
+// +build !gc purego !s390x
+
+package sha3
+
+// newShake128Asm returns an assembly implementation of SHAKE-128 if available,
+// otherwise it returns nil.
+func newShake128Asm() ShakeHash {
+ return nil
+}
+
+// newShake256Asm returns an assembly implementation of SHAKE-256 if available,
+// otherwise it returns nil.
+func newShake256Asm() ShakeHash {
+ return nil
+}
diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go
new file mode 100644
index 00000000000..59c8eb94186
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/xor.go
@@ -0,0 +1,24 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (!amd64 && !386 && !ppc64le) || purego
+// +build !amd64,!386,!ppc64le purego
+
+package sha3
+
+// A storageBuf is an aligned array of maxRate bytes.
+type storageBuf [maxRate]byte
+
+func (b *storageBuf) asBytes() *[maxRate]byte {
+ return (*[maxRate]byte)(b)
+}
+
+var (
+ xorIn = xorInGeneric
+ copyOut = copyOutGeneric
+ xorInUnaligned = xorInGeneric
+ copyOutUnaligned = copyOutGeneric
+)
+
+const xorImplementationUnaligned = "generic"
diff --git a/vendor/golang.org/x/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go
new file mode 100644
index 00000000000..8d947711272
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/xor_generic.go
@@ -0,0 +1,28 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+import "encoding/binary"
+
+// xorInGeneric xors the bytes in buf into the state; it
+// makes no non-portable assumptions about memory layout
+// or alignment.
+func xorInGeneric(d *state, buf []byte) {
+ n := len(buf) / 8
+
+ for i := 0; i < n; i++ {
+ a := binary.LittleEndian.Uint64(buf)
+ d.a[i] ^= a
+ buf = buf[8:]
+ }
+}
+
+// copyOutGeneric copies uint64s to a byte buffer.
+func copyOutGeneric(d *state, b []byte) {
+ for i := 0; len(b) >= 8; i++ {
+ binary.LittleEndian.PutUint64(b, d.a[i])
+ b = b[8:]
+ }
+}
diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go
new file mode 100644
index 00000000000..1ce606246d5
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go
@@ -0,0 +1,68 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (amd64 || 386 || ppc64le) && !purego
+// +build amd64 386 ppc64le
+// +build !purego
+
+package sha3
+
+import "unsafe"
+
+// A storageBuf is an aligned array of maxRate bytes.
+type storageBuf [maxRate / 8]uint64
+
+func (b *storageBuf) asBytes() *[maxRate]byte {
+ return (*[maxRate]byte)(unsafe.Pointer(b))
+}
+
+// xorInUnaligned uses unaligned reads and writes to update d.a to contain d.a
+// XOR buf.
+func xorInUnaligned(d *state, buf []byte) {
+ n := len(buf)
+ bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8]
+ if n >= 72 {
+ d.a[0] ^= bw[0]
+ d.a[1] ^= bw[1]
+ d.a[2] ^= bw[2]
+ d.a[3] ^= bw[3]
+ d.a[4] ^= bw[4]
+ d.a[5] ^= bw[5]
+ d.a[6] ^= bw[6]
+ d.a[7] ^= bw[7]
+ d.a[8] ^= bw[8]
+ }
+ if n >= 104 {
+ d.a[9] ^= bw[9]
+ d.a[10] ^= bw[10]
+ d.a[11] ^= bw[11]
+ d.a[12] ^= bw[12]
+ }
+ if n >= 136 {
+ d.a[13] ^= bw[13]
+ d.a[14] ^= bw[14]
+ d.a[15] ^= bw[15]
+ d.a[16] ^= bw[16]
+ }
+ if n >= 144 {
+ d.a[17] ^= bw[17]
+ }
+ if n >= 168 {
+ d.a[18] ^= bw[18]
+ d.a[19] ^= bw[19]
+ d.a[20] ^= bw[20]
+ }
+}
+
+func copyOutUnaligned(d *state, buf []byte) {
+ ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0]))
+ copy(buf, ab[:])
+}
+
+var (
+ xorIn = xorInUnaligned
+ copyOut = copyOutUnaligned
+)
+
+const xorImplementationUnaligned = "unaligned"
diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go
index b909471cc06..eb6bc717923 100644
--- a/vendor/golang.org/x/crypto/ssh/agent/client.go
+++ b/vendor/golang.org/x/crypto/ssh/agent/client.go
@@ -8,7 +8,8 @@
// ssh-agent process using the sample server.
//
// References:
-// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00
+//
+// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00
package agent // import "golang.org/x/crypto/ssh/agent"
import (
@@ -25,7 +26,6 @@ import (
"math/big"
"sync"
- "crypto"
"golang.org/x/crypto/ed25519"
"golang.org/x/crypto/ssh"
)
@@ -226,7 +226,9 @@ var ErrExtensionUnsupported = errors.New("agent: extension unsupported")
type extensionAgentMsg struct {
ExtensionType string `sshtype:"27"`
- Contents []byte
+ // NOTE: this matches OpenSSH's PROTOCOL.agent, not the IETF draft [PROTOCOL.agent],
+ // so that it matches what OpenSSH actually implements in the wild.
+ Contents []byte `ssh:"rest"`
}
// Key represents a protocol 2 public key as defined in
@@ -771,19 +773,53 @@ func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature,
return s.agent.Sign(s.pub, data)
}
-func (s *agentKeyringSigner) SignWithOpts(rand io.Reader, data []byte, opts crypto.SignerOpts) (*ssh.Signature, error) {
+func (s *agentKeyringSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*ssh.Signature, error) {
+ if algorithm == "" || algorithm == underlyingAlgo(s.pub.Type()) {
+ return s.Sign(rand, data)
+ }
+
var flags SignatureFlags
- if opts != nil {
- switch opts.HashFunc() {
- case crypto.SHA256:
- flags = SignatureFlagRsaSha256
- case crypto.SHA512:
- flags = SignatureFlagRsaSha512
- }
+ switch algorithm {
+ case ssh.KeyAlgoRSASHA256:
+ flags = SignatureFlagRsaSha256
+ case ssh.KeyAlgoRSASHA512:
+ flags = SignatureFlagRsaSha512
+ default:
+ return nil, fmt.Errorf("agent: unsupported algorithm %q", algorithm)
}
+
return s.agent.SignWithFlags(s.pub, data, flags)
}
+var _ ssh.AlgorithmSigner = &agentKeyringSigner{}
+
+// certKeyAlgoNames is a mapping from known certificate algorithm names to the
+// corresponding public key signature algorithm.
+//
+// This map must be kept in sync with the one in certs.go.
+var certKeyAlgoNames = map[string]string{
+ ssh.CertAlgoRSAv01: ssh.KeyAlgoRSA,
+ ssh.CertAlgoRSASHA256v01: ssh.KeyAlgoRSASHA256,
+ ssh.CertAlgoRSASHA512v01: ssh.KeyAlgoRSASHA512,
+ ssh.CertAlgoDSAv01: ssh.KeyAlgoDSA,
+ ssh.CertAlgoECDSA256v01: ssh.KeyAlgoECDSA256,
+ ssh.CertAlgoECDSA384v01: ssh.KeyAlgoECDSA384,
+ ssh.CertAlgoECDSA521v01: ssh.KeyAlgoECDSA521,
+ ssh.CertAlgoSKECDSA256v01: ssh.KeyAlgoSKECDSA256,
+ ssh.CertAlgoED25519v01: ssh.KeyAlgoED25519,
+ ssh.CertAlgoSKED25519v01: ssh.KeyAlgoSKED25519,
+}
+
+// underlyingAlgo returns the signature algorithm associated with algo (which is
+// an advertised or negotiated public key or host key algorithm). These are
+// usually the same, except for certificate algorithms.
+func underlyingAlgo(algo string) string {
+ if a, ok := certKeyAlgoNames[algo]; ok {
+ return a
+ }
+ return algo
+}
+
// Calls an extension method. It is up to the agent implementation as to whether or not
// any particular extension is supported and may always return an error. Because the
// type of the response is up to the implementation, this returns the bytes of the
diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go
index c9d97943071..21bfa870fa4 100644
--- a/vendor/golang.org/x/crypto/ssh/agent/keyring.go
+++ b/vendor/golang.org/x/crypto/ssh/agent/keyring.go
@@ -113,7 +113,7 @@ func (r *keyring) Unlock(passphrase []byte) error {
// expireKeysLocked removes expired keys from the keyring. If a key was added
// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have
-// ellapsed, it is removed. The caller *must* be holding the keyring mutex.
+// elapsed, it is removed. The caller *must* be holding the keyring mutex.
func (r *keyring) expireKeysLocked() {
for _, k := range r.keys {
if k.expire != nil && time.Now().After(*k.expire) {
@@ -205,9 +205,9 @@ func (r *keyring) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureF
var algorithm string
switch flags {
case SignatureFlagRsaSha256:
- algorithm = ssh.SigAlgoRSASHA2256
+ algorithm = ssh.KeyAlgoRSASHA256
case SignatureFlagRsaSha512:
- algorithm = ssh.SigAlgoRSASHA2512
+ algorithm = ssh.KeyAlgoRSASHA512
default:
return nil, fmt.Errorf("agent: unsupported signature flags: %d", flags)
}
diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go
index 916c840b698..4600c20772a 100644
--- a/vendor/golang.org/x/crypto/ssh/certs.go
+++ b/vendor/golang.org/x/crypto/ssh/certs.go
@@ -14,8 +14,10 @@ import (
"time"
)
-// These constants from [PROTOCOL.certkeys] represent the algorithm names
-// for certificate types supported by this package.
+// Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear
+// in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms.
+// Unlike key algorithm names, these are not passed to AlgorithmSigner and don't
+// appear in the Signature.Format field.
const (
CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com"
@@ -25,6 +27,21 @@ const (
CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com"
CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com"
CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com"
+
+ // CertAlgoRSASHA256v01 and CertAlgoRSASHA512v01 can't appear as a
+ // Certificate.Type (or PublicKey.Type), but only in
+ // ClientConfig.HostKeyAlgorithms.
+ CertAlgoRSASHA256v01 = "rsa-sha2-256-cert-v01@openssh.com"
+ CertAlgoRSASHA512v01 = "rsa-sha2-512-cert-v01@openssh.com"
+)
+
+const (
+ // Deprecated: use CertAlgoRSAv01.
+ CertSigAlgoRSAv01 = CertAlgoRSAv01
+ // Deprecated: use CertAlgoRSASHA256v01.
+ CertSigAlgoRSASHA2256v01 = CertAlgoRSASHA256v01
+ // Deprecated: use CertAlgoRSASHA512v01.
+ CertSigAlgoRSASHA2512v01 = CertAlgoRSASHA512v01
)
// Certificate types distinguish between host and user
@@ -423,6 +440,16 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
}
c.SignatureKey = authority.PublicKey()
+ // Default to KeyAlgoRSASHA512 for ssh-rsa signers.
+ if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA {
+ sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512)
+ if err != nil {
+ return err
+ }
+ c.Signature = sig
+ return nil
+ }
+
sig, err := authority.Sign(rand, c.bytesForSigning())
if err != nil {
return err
@@ -431,26 +458,42 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
return nil
}
-var certAlgoNames = map[string]string{
- KeyAlgoRSA: CertAlgoRSAv01,
- KeyAlgoDSA: CertAlgoDSAv01,
- KeyAlgoECDSA256: CertAlgoECDSA256v01,
- KeyAlgoECDSA384: CertAlgoECDSA384v01,
- KeyAlgoECDSA521: CertAlgoECDSA521v01,
- KeyAlgoSKECDSA256: CertAlgoSKECDSA256v01,
- KeyAlgoED25519: CertAlgoED25519v01,
- KeyAlgoSKED25519: CertAlgoSKED25519v01,
+// certKeyAlgoNames is a mapping from known certificate algorithm names to the
+// corresponding public key signature algorithm.
+//
+// This map must be kept in sync with the one in agent/client.go.
+var certKeyAlgoNames = map[string]string{
+ CertAlgoRSAv01: KeyAlgoRSA,
+ CertAlgoRSASHA256v01: KeyAlgoRSASHA256,
+ CertAlgoRSASHA512v01: KeyAlgoRSASHA512,
+ CertAlgoDSAv01: KeyAlgoDSA,
+ CertAlgoECDSA256v01: KeyAlgoECDSA256,
+ CertAlgoECDSA384v01: KeyAlgoECDSA384,
+ CertAlgoECDSA521v01: KeyAlgoECDSA521,
+ CertAlgoSKECDSA256v01: KeyAlgoSKECDSA256,
+ CertAlgoED25519v01: KeyAlgoED25519,
+ CertAlgoSKED25519v01: KeyAlgoSKED25519,
+}
+
+// underlyingAlgo returns the signature algorithm associated with algo (which is
+// an advertised or negotiated public key or host key algorithm). These are
+// usually the same, except for certificate algorithms.
+func underlyingAlgo(algo string) string {
+ if a, ok := certKeyAlgoNames[algo]; ok {
+ return a
+ }
+ return algo
}
-// certToPrivAlgo returns the underlying algorithm for a certificate algorithm.
-// Panics if a non-certificate algorithm is passed.
-func certToPrivAlgo(algo string) string {
- for privAlgo, pubAlgo := range certAlgoNames {
- if pubAlgo == algo {
- return privAlgo
+// certificateAlgo returns the certificate algorithms that uses the provided
+// underlying signature algorithm.
+func certificateAlgo(algo string) (certAlgo string, ok bool) {
+ for certName, algoName := range certKeyAlgoNames {
+ if algoName == algo {
+ return certName, true
}
}
- panic("unknown cert algorithm")
+ return "", false
}
func (cert *Certificate) bytesForSigning() []byte {
@@ -494,13 +537,13 @@ func (c *Certificate) Marshal() []byte {
return result
}
-// Type returns the key name. It is part of the PublicKey interface.
+// Type returns the certificate algorithm name. It is part of the PublicKey interface.
func (c *Certificate) Type() string {
- algo, ok := certAlgoNames[c.Key.Type()]
+ certName, ok := certificateAlgo(c.Key.Type())
if !ok {
- panic("unknown cert key type " + c.Key.Type())
+ panic("unknown certificate type for key type " + c.Key.Type())
}
- return algo
+ return certName
}
// Verify verifies a signature against the certificate's public
diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go
index 8bd6b3daff5..c3062cfd13b 100644
--- a/vendor/golang.org/x/crypto/ssh/cipher.go
+++ b/vendor/golang.org/x/crypto/ssh/cipher.go
@@ -15,10 +15,9 @@ import (
"fmt"
"hash"
"io"
- "io/ioutil"
"golang.org/x/crypto/chacha20"
- "golang.org/x/crypto/poly1305"
+ "golang.org/x/crypto/internal/poly1305"
)
const (
@@ -394,6 +393,10 @@ func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error)
}
c.incIV()
+ if len(plain) == 0 {
+ return nil, errors.New("ssh: empty packet")
+ }
+
padding := plain[0]
if padding < 4 {
// padding is a byte, so it automatically satisfies
@@ -493,7 +496,7 @@ func (c *cbcCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error)
// data, to make distinguishing between
// failing MAC and failing length check more
// difficult.
- io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage))
+ io.CopyN(io.Discard, r, int64(c.oracleCamouflage))
}
}
return p, err
@@ -636,7 +639,7 @@ const chacha20Poly1305ID = "chacha20-poly1305@openssh.com"
// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com
// AEAD, which is described here:
//
-// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00
+// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00
//
// the methods here also implement padding, which RFC4253 Section 6
// also requires of stream ciphers.
@@ -710,6 +713,10 @@ func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([
plain := c.buf[4:contentEnd]
s.XORKeyStream(plain, plain)
+ if len(plain) == 0 {
+ return nil, errors.New("ssh: empty packet")
+ }
+
padding := plain[0]
if padding < 4 {
// padding is a byte, so it automatically satisfies
diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go
index 99f68bd32e9..bdc356cbdf1 100644
--- a/vendor/golang.org/x/crypto/ssh/client.go
+++ b/vendor/golang.org/x/crypto/ssh/client.go
@@ -113,14 +113,18 @@ func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) e
return c.clientAuthenticate(config)
}
-// verifyHostKeySignature verifies the host key obtained in the key
-// exchange.
-func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error {
+// verifyHostKeySignature verifies the host key obtained in the key exchange.
+// algo is the negotiated algorithm, and may be a certificate type.
+func verifyHostKeySignature(hostKey PublicKey, algo string, result *kexResult) error {
sig, rest, ok := parseSignatureBody(result.Signature)
if len(rest) > 0 || !ok {
return errors.New("ssh: signature parse error")
}
+ if a := underlyingAlgo(algo); sig.Format != a {
+ return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, a)
+ }
+
return hostKey.Verify(result.H, sig)
}
@@ -224,11 +228,11 @@ type ClientConfig struct {
// be used for the connection. If empty, a reasonable default is used.
ClientVersion string
- // HostKeyAlgorithms lists the key types that the client will
- // accept from the server as host key, in order of
+ // HostKeyAlgorithms lists the public key algorithms that the client will
+ // accept from the server for host key authentication, in order of
// preference. If empty, a reasonable default is used. Any
- // string returned from PublicKey.Type method may be used, or
- // any of the CertAlgoXxxx and KeyAlgoXxxx constants.
+ // string returned from a PublicKey.Type method may be used, or
+ // any of the CertAlgo and KeyAlgo constants.
HostKeyAlgorithms []string
// Timeout is the maximum amount of time for the TCP connection to establish.
diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go
index c611aeb6846..409b5ea1d49 100644
--- a/vendor/golang.org/x/crypto/ssh/client_auth.go
+++ b/vendor/golang.org/x/crypto/ssh/client_auth.go
@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"io"
+ "strings"
)
type authResult int
@@ -29,6 +30,33 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
if err != nil {
return err
}
+ // The server may choose to send a SSH_MSG_EXT_INFO at this point (if we
+ // advertised willingness to receive one, which we always do) or not. See
+ // RFC 8308, Section 2.4.
+ extensions := make(map[string][]byte)
+ if len(packet) > 0 && packet[0] == msgExtInfo {
+ var extInfo extInfoMsg
+ if err := Unmarshal(packet, &extInfo); err != nil {
+ return err
+ }
+ payload := extInfo.Payload
+ for i := uint32(0); i < extInfo.NumExtensions; i++ {
+ name, rest, ok := parseString(payload)
+ if !ok {
+ return parseError(msgExtInfo)
+ }
+ value, rest, ok := parseString(rest)
+ if !ok {
+ return parseError(msgExtInfo)
+ }
+ extensions[string(name)] = value
+ payload = rest
+ }
+ packet, err = c.transport.readPacket()
+ if err != nil {
+ return err
+ }
+ }
var serviceAccept serviceAcceptMsg
if err := Unmarshal(packet, &serviceAccept); err != nil {
return err
@@ -41,7 +69,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
sessionID := c.transport.getSessionID()
for auth := AuthMethod(new(noneAuth)); auth != nil; {
- ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand)
+ ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions)
if err != nil {
return err
}
@@ -93,7 +121,7 @@ type AuthMethod interface {
// If authentication is not successful, a []string of alternative
// method names is returned. If the slice is nil, it will be ignored
// and the previous set of possible methods will be reused.
- auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error)
+ auth(session []byte, user string, p packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error)
// method returns the RFC 4252 method name.
method() string
@@ -102,7 +130,7 @@ type AuthMethod interface {
// "none" authentication, RFC 4252 section 5.2.
type noneAuth int
-func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
+func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) {
if err := c.writePacket(Marshal(&userAuthRequestMsg{
User: user,
Service: serviceSSH,
@@ -122,7 +150,7 @@ func (n *noneAuth) method() string {
// a function call, e.g. by prompting the user.
type passwordCallback func() (password string, err error)
-func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
+func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) {
type passwordAuthMsg struct {
User string `sshtype:"50"`
Service string
@@ -189,7 +217,46 @@ func (cb publicKeyCallback) method() string {
return "publickey"
}
-func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
+func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as AlgorithmSigner, algo string) {
+ keyFormat := signer.PublicKey().Type()
+
+ // Like in sendKexInit, if the public key implements AlgorithmSigner we
+ // assume it supports all algorithms, otherwise only the key format one.
+ as, ok := signer.(AlgorithmSigner)
+ if !ok {
+ return algorithmSignerWrapper{signer}, keyFormat
+ }
+
+ extPayload, ok := extensions["server-sig-algs"]
+ if !ok {
+ // If there is no "server-sig-algs" extension, fall back to the key
+ // format algorithm.
+ return as, keyFormat
+ }
+
+ // The server-sig-algs extension only carries underlying signature
+ // algorithm, but we are trying to select a protocol-level public key
+ // algorithm, which might be a certificate type. Extend the list of server
+ // supported algorithms to include the corresponding certificate algorithms.
+ serverAlgos := strings.Split(string(extPayload), ",")
+ for _, algo := range serverAlgos {
+ if certAlgo, ok := certificateAlgo(algo); ok {
+ serverAlgos = append(serverAlgos, certAlgo)
+ }
+ }
+
+ keyAlgos := algorithmsForKeyFormat(keyFormat)
+ algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos)
+ if err != nil {
+ // If there is no overlap, try the key anyway with the key format
+ // algorithm, to support servers that fail to list all supported
+ // algorithms.
+ return as, keyFormat
+ }
+ return as, algo
+}
+
+func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) {
// Authentication is performed by sending an enquiry to test if a key is
// acceptable to the remote. If the key is acceptable, the client will
// attempt to authenticate with the valid key. If not the client will repeat
@@ -201,7 +268,10 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
}
var methods []string
for _, signer := range signers {
- ok, err := validateKey(signer.PublicKey(), user, c)
+ pub := signer.PublicKey()
+ as, algo := pickSignatureAlgorithm(signer, extensions)
+
+ ok, err := validateKey(pub, algo, user, c)
if err != nil {
return authFailure, nil, err
}
@@ -209,13 +279,13 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
continue
}
- pub := signer.PublicKey()
pubKey := pub.Marshal()
- sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{
+ data := buildDataSignedForAuth(session, userAuthRequestMsg{
User: user,
Service: serviceSSH,
Method: cb.method(),
- }, []byte(pub.Type()), pubKey))
+ }, algo, pubKey)
+ sign, err := as.SignWithAlgorithm(rand, data, underlyingAlgo(algo))
if err != nil {
return authFailure, nil, err
}
@@ -229,7 +299,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
Service: serviceSSH,
Method: cb.method(),
HasSig: true,
- Algoname: pub.Type(),
+ Algoname: algo,
PubKey: pubKey,
Sig: sig,
}
@@ -266,26 +336,25 @@ func containsMethod(methods []string, method string) bool {
}
// validateKey validates the key provided is acceptable to the server.
-func validateKey(key PublicKey, user string, c packetConn) (bool, error) {
+func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, error) {
pubKey := key.Marshal()
msg := publickeyAuthMsg{
User: user,
Service: serviceSSH,
Method: "publickey",
HasSig: false,
- Algoname: key.Type(),
+ Algoname: algo,
PubKey: pubKey,
}
if err := c.writePacket(Marshal(&msg)); err != nil {
return false, err
}
- return confirmKeyAck(key, c)
+ return confirmKeyAck(key, algo, c)
}
-func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
+func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) {
pubKey := key.Marshal()
- algoname := key.Type()
for {
packet, err := c.readPacket()
@@ -302,14 +371,14 @@ func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
if err := Unmarshal(packet, &msg); err != nil {
return false, err
}
- if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) {
+ if msg.Algo != algo || !bytes.Equal(msg.PubKey, pubKey) {
return false, nil
}
return true, nil
case msgUserAuthFailure:
return false, nil
default:
- return false, unexpectedMessageError(msgUserAuthSuccess, packet[0])
+ return false, unexpectedMessageError(msgUserAuthPubKeyOk, packet[0])
}
}
}
@@ -330,6 +399,7 @@ func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMet
// along with a list of remaining authentication methods to try next and
// an error if an unexpected response was received.
func handleAuthResponse(c packetConn) (authResult, []string, error) {
+ gotMsgExtInfo := false
for {
packet, err := c.readPacket()
if err != nil {
@@ -341,6 +411,12 @@ func handleAuthResponse(c packetConn) (authResult, []string, error) {
if err := handleBannerResponse(c, packet); err != nil {
return authFailure, nil, err
}
+ case msgExtInfo:
+ // Ignore post-authentication RFC 8308 extensions, once.
+ if gotMsgExtInfo {
+ return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0])
+ }
+ gotMsgExtInfo = true
case msgUserAuthFailure:
var msg userAuthFailureMsg
if err := Unmarshal(packet, &msg); err != nil {
@@ -380,10 +456,10 @@ func handleBannerResponse(c packetConn, packet []byte) error {
// disabling echoing (e.g. for passwords), and return all the answers.
// Challenge may be called multiple times in a single session. After
// successful authentication, the server may send a challenge with no
-// questions, for which the user and instruction messages should be
+// questions, for which the name and instruction messages should be
// printed. RFC 4256 section 3.3 details how the UI should behave for
// both CLI and GUI environments.
-type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error)
+type KeyboardInteractiveChallenge func(name, instruction string, questions []string, echos []bool) (answers []string, err error)
// KeyboardInteractive returns an AuthMethod using a prompt/response
// sequence controlled by the server.
@@ -395,7 +471,7 @@ func (cb KeyboardInteractiveChallenge) method() string {
return "keyboard-interactive"
}
-func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
+func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) {
type initiateMsg struct {
User string `sshtype:"50"`
Service string
@@ -412,6 +488,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
return authFailure, nil, err
}
+ gotMsgExtInfo := false
for {
packet, err := c.readPacket()
if err != nil {
@@ -425,6 +502,13 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
return authFailure, nil, err
}
continue
+ case msgExtInfo:
+ // Ignore post-authentication RFC 8308 extensions, once.
+ if gotMsgExtInfo {
+ return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0])
+ }
+ gotMsgExtInfo = true
+ continue
case msgUserAuthInfoRequest:
// OK
case msgUserAuthFailure:
@@ -465,7 +549,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs")
}
- answers, err := cb(msg.User, msg.Instruction, prompts, echos)
+ answers, err := cb(msg.Name, msg.Instruction, prompts, echos)
if err != nil {
return authFailure, nil, err
}
@@ -497,9 +581,9 @@ type retryableAuthMethod struct {
maxTries int
}
-func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) {
+func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (ok authResult, methods []string, err error) {
for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ {
- ok, methods, err = r.authMethod.auth(session, user, c, rand)
+ ok, methods, err = r.authMethod.auth(session, user, c, rand, extensions)
if ok != authFailure || err != nil { // either success, partial success or error terminate
return ok, methods, err
}
@@ -542,7 +626,7 @@ type gssAPIWithMICCallback struct {
target string
}
-func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
+func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) {
m := &userAuthRequestMsg{
User: user,
Service: serviceSSH,
diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go
index 290382d059e..2a47a61ded9 100644
--- a/vendor/golang.org/x/crypto/ssh/common.go
+++ b/vendor/golang.org/x/crypto/ssh/common.go
@@ -44,11 +44,11 @@ var preferredCiphers = []string{
// supportedKexAlgos specifies the supported key-exchange algorithms in
// preference order.
var supportedKexAlgos = []string{
- kexAlgoCurve25519SHA256,
+ kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH,
// P384 and P521 are not constant-time yet, but since we don't
// reuse ephemeral keys, using them for ECDH should be OK.
kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
- kexAlgoDH14SHA1, kexAlgoDH1SHA1,
+ kexAlgoDH14SHA256, kexAlgoDH14SHA1, kexAlgoDH1SHA1,
}
// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden
@@ -61,18 +61,20 @@ var serverForbiddenKexAlgos = map[string]struct{}{
// preferredKexAlgos specifies the default preference for key-exchange algorithms
// in preference order.
var preferredKexAlgos = []string{
- kexAlgoCurve25519SHA256,
+ kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH,
kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
- kexAlgoDH14SHA1,
+ kexAlgoDH14SHA256, kexAlgoDH14SHA1,
}
// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods
// of authenticating servers) in preference order.
var supportedHostKeyAlgos = []string{
+ CertAlgoRSASHA512v01, CertAlgoRSASHA256v01,
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01,
KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
+ KeyAlgoRSASHA512, KeyAlgoRSASHA256,
KeyAlgoRSA, KeyAlgoDSA,
KeyAlgoED25519,
@@ -87,19 +89,33 @@ var supportedMACs = []string{
var supportedCompressions = []string{compressionNone}
-// hashFuncs keeps the mapping of supported algorithms to their respective
-// hashes needed for signature verification.
+// hashFuncs keeps the mapping of supported signature algorithms to their
+// respective hashes needed for signing and verification.
var hashFuncs = map[string]crypto.Hash{
- KeyAlgoRSA: crypto.SHA1,
- KeyAlgoDSA: crypto.SHA1,
- KeyAlgoECDSA256: crypto.SHA256,
- KeyAlgoECDSA384: crypto.SHA384,
- KeyAlgoECDSA521: crypto.SHA512,
- CertAlgoRSAv01: crypto.SHA1,
- CertAlgoDSAv01: crypto.SHA1,
- CertAlgoECDSA256v01: crypto.SHA256,
- CertAlgoECDSA384v01: crypto.SHA384,
- CertAlgoECDSA521v01: crypto.SHA512,
+ KeyAlgoRSA: crypto.SHA1,
+ KeyAlgoRSASHA256: crypto.SHA256,
+ KeyAlgoRSASHA512: crypto.SHA512,
+ KeyAlgoDSA: crypto.SHA1,
+ KeyAlgoECDSA256: crypto.SHA256,
+ KeyAlgoECDSA384: crypto.SHA384,
+ KeyAlgoECDSA521: crypto.SHA512,
+ // KeyAlgoED25519 doesn't pre-hash.
+ KeyAlgoSKECDSA256: crypto.SHA256,
+ KeyAlgoSKED25519: crypto.SHA256,
+}
+
+// algorithmsForKeyFormat returns the supported signature algorithms for a given
+// public key format (PublicKey.Type), in order of preference. See RFC 8332,
+// Section 2. See also the note in sendKexInit on backwards compatibility.
+func algorithmsForKeyFormat(keyFormat string) []string {
+ switch keyFormat {
+ case KeyAlgoRSA:
+ return []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA}
+ case CertAlgoRSAv01:
+ return []string{CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, CertAlgoRSAv01}
+ default:
+ return []string{keyFormat}
+ }
}
// unexpectedMessageError results when the SSH message that we received didn't
@@ -146,6 +162,11 @@ func (a *directionAlgorithms) rekeyBytes() int64 {
return 1 << 30
}
+var aeadCiphers = map[string]bool{
+ gcmCipherID: true,
+ chacha20Poly1305ID: true,
+}
+
type algorithms struct {
kex string
hostKey string
@@ -181,14 +202,18 @@ func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMs
return
}
- ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer)
- if err != nil {
- return
+ if !aeadCiphers[ctos.Cipher] {
+ ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer)
+ if err != nil {
+ return
+ }
}
- stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient)
- if err != nil {
- return
+ if !aeadCiphers[stoc.Cipher] {
+ stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient)
+ if err != nil {
+ return
+ }
}
ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer)
@@ -272,8 +297,9 @@ func (c *Config) SetDefaults() {
}
// buildDataSignedForAuth returns the data that is signed in order to prove
-// possession of a private key. See RFC 4252, section 7.
-func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte {
+// possession of a private key. See RFC 4252, section 7. algo is the advertised
+// algorithm, and may be a certificate type.
+func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo string, pubKey []byte) []byte {
data := struct {
Session []byte
Type byte
@@ -281,7 +307,7 @@ func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubK
Service string
Method string
Sign bool
- Algo []byte
+ Algo string
PubKey []byte
}{
sessionID,
diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go
index 67b7322c058..f6bff60dc74 100644
--- a/vendor/golang.org/x/crypto/ssh/doc.go
+++ b/vendor/golang.org/x/crypto/ssh/doc.go
@@ -12,8 +12,9 @@ the multiplexed nature of SSH is exposed to users that wish to support
others.
References:
- [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
- [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
+
+ [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
+ [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
This package does not fall under the stability promise of the Go language itself,
so its API may be changed when pressing needs arise.
diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go
index 2b10b05a498..653dc4d2cfb 100644
--- a/vendor/golang.org/x/crypto/ssh/handshake.go
+++ b/vendor/golang.org/x/crypto/ssh/handshake.go
@@ -455,14 +455,38 @@ func (t *handshakeTransport) sendKexInit() error {
}
io.ReadFull(rand.Reader, msg.Cookie[:])
- if len(t.hostKeys) > 0 {
+ isServer := len(t.hostKeys) > 0
+ if isServer {
for _, k := range t.hostKeys {
- msg.ServerHostKeyAlgos = append(
- msg.ServerHostKeyAlgos, k.PublicKey().Type())
+ // If k is an AlgorithmSigner, presume it supports all signature algorithms
+ // associated with the key format. (Ideally AlgorithmSigner would have a
+ // method to advertise supported algorithms, but it doesn't. This means that
+ // adding support for a new algorithm is a breaking change, as we will
+ // immediately negotiate it even if existing implementations don't support
+ // it. If that ever happens, we'll have to figure something out.)
+ // If k is not an AlgorithmSigner, we can only assume it only supports the
+ // algorithms that matches the key format. (This means that Sign can't pick
+ // a different default.)
+ keyFormat := k.PublicKey().Type()
+ if _, ok := k.(AlgorithmSigner); ok {
+ msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...)
+ } else {
+ msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat)
+ }
}
} else {
msg.ServerHostKeyAlgos = t.hostKeyAlgorithms
+
+ // As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what
+ // algorithms the server supports for public key authentication. See RFC
+ // 8308, Section 2.1.
+ if firstKeyExchange := t.sessionID == nil; firstKeyExchange {
+ msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1)
+ msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...)
+ msg.KexAlgos = append(msg.KexAlgos, "ext-info-c")
+ }
}
+
packet := Marshal(msg)
// writePacket destroys the contents, so save a copy.
@@ -582,9 +606,9 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
var result *kexResult
if len(t.hostKeys) > 0 {
- result, err = t.server(kex, t.algorithms, &magics)
+ result, err = t.server(kex, &magics)
} else {
- result, err = t.client(kex, t.algorithms, &magics)
+ result, err = t.client(kex, &magics)
}
if err != nil {
@@ -611,19 +635,52 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
return nil
}
-func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
- var hostKey Signer
- for _, k := range t.hostKeys {
- if algs.hostKey == k.PublicKey().Type() {
- hostKey = k
+// algorithmSignerWrapper is an AlgorithmSigner that only supports the default
+// key format algorithm.
+//
+// This is technically a violation of the AlgorithmSigner interface, but it
+// should be unreachable given where we use this. Anyway, at least it returns an
+// error instead of panicing or producing an incorrect signature.
+type algorithmSignerWrapper struct {
+ Signer
+}
+
+func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
+ if algorithm != underlyingAlgo(a.PublicKey().Type()) {
+ return nil, errors.New("ssh: internal error: algorithmSignerWrapper invoked with non-default algorithm")
+ }
+ return a.Sign(rand, data)
+}
+
+func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner {
+ for _, k := range hostKeys {
+ if algo == k.PublicKey().Type() {
+ return algorithmSignerWrapper{k}
+ }
+ k, ok := k.(AlgorithmSigner)
+ if !ok {
+ continue
+ }
+ for _, a := range algorithmsForKeyFormat(k.PublicKey().Type()) {
+ if algo == a {
+ return k
+ }
}
}
+ return nil
+}
+
+func (t *handshakeTransport) server(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) {
+ hostKey := pickHostKey(t.hostKeys, t.algorithms.hostKey)
+ if hostKey == nil {
+ return nil, errors.New("ssh: internal error: negotiated unsupported signature type")
+ }
- r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey)
+ r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.hostKey)
return r, err
}
-func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
+func (t *handshakeTransport) client(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) {
result, err := kex.Client(t.conn, t.config.Rand, magics)
if err != nil {
return nil, err
@@ -634,7 +691,7 @@ func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *
return nil, err
}
- if err := verifyHostKeySignature(hostKey, result); err != nil {
+ if err := verifyHostKeySignature(hostKey, t.algorithms.hostKey, result); err != nil {
return nil, err
}
diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go
index 766e9293975..927a90cd46f 100644
--- a/vendor/golang.org/x/crypto/ssh/kex.go
+++ b/vendor/golang.org/x/crypto/ssh/kex.go
@@ -20,12 +20,14 @@ import (
)
const (
- kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1"
- kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1"
- kexAlgoECDH256 = "ecdh-sha2-nistp256"
- kexAlgoECDH384 = "ecdh-sha2-nistp384"
- kexAlgoECDH521 = "ecdh-sha2-nistp521"
- kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org"
+ kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1"
+ kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1"
+ kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256"
+ kexAlgoECDH256 = "ecdh-sha2-nistp256"
+ kexAlgoECDH384 = "ecdh-sha2-nistp384"
+ kexAlgoECDH521 = "ecdh-sha2-nistp521"
+ kexAlgoCurve25519SHA256LibSSH = "curve25519-sha256@libssh.org"
+ kexAlgoCurve25519SHA256 = "curve25519-sha256"
// For the following kex only the client half contains a production
// ready implementation. The server half only consists of a minimal
@@ -75,8 +77,9 @@ func (m *handshakeMagics) write(w io.Writer) {
// kexAlgorithm abstracts different key exchange algorithms.
type kexAlgorithm interface {
// Server runs server-side key agreement, signing the result
- // with a hostkey.
- Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error)
+ // with a hostkey. algo is the negotiated algorithm, and may
+ // be a certificate type.
+ Server(p packetConn, rand io.Reader, magics *handshakeMagics, s AlgorithmSigner, algo string) (*kexResult, error)
// Client runs the client-side key agreement. Caller is
// responsible for verifying the host key signature.
@@ -86,6 +89,7 @@ type kexAlgorithm interface {
// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement.
type dhGroup struct {
g, p, pMinus1 *big.Int
+ hashFunc crypto.Hash
}
func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
@@ -96,8 +100,6 @@ func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int,
}
func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
- hashFunc := crypto.SHA1
-
var x *big.Int
for {
var err error
@@ -132,7 +134,7 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha
return nil, err
}
- h := hashFunc.New()
+ h := group.hashFunc.New()
magics.write(h)
writeString(h, kexDHReply.HostKey)
writeInt(h, X)
@@ -146,12 +148,11 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha
K: K,
HostKey: kexDHReply.HostKey,
Signature: kexDHReply.Signature,
- Hash: crypto.SHA1,
+ Hash: group.hashFunc,
}, nil
}
-func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
- hashFunc := crypto.SHA1
+func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) {
packet, err := c.readPacket()
if err != nil {
return
@@ -179,7 +180,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha
hostKeyBytes := priv.PublicKey().Marshal()
- h := hashFunc.New()
+ h := group.hashFunc.New()
magics.write(h)
writeString(h, hostKeyBytes)
writeInt(h, kexDHInit.X)
@@ -193,7 +194,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha
// H is already a hash, but the hostkey signing will apply its
// own key-specific hash algorithm.
- sig, err := signAndMarshal(priv, randSource, H)
+ sig, err := signAndMarshal(priv, randSource, H, algo)
if err != nil {
return nil, err
}
@@ -211,7 +212,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha
K: K,
HostKey: hostKeyBytes,
Signature: sig,
- Hash: crypto.SHA1,
+ Hash: group.hashFunc,
}, err
}
@@ -314,7 +315,7 @@ func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool {
return true
}
-func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
+func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) {
packet, err := c.readPacket()
if err != nil {
return nil, err
@@ -359,7 +360,7 @@ func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, p
// H is already a hash, but the hostkey signing will apply its
// own key-specific hash algorithm.
- sig, err := signAndMarshal(priv, rand, H)
+ sig, err := signAndMarshal(priv, rand, H, algo)
if err != nil {
return nil, err
}
@@ -384,39 +385,62 @@ func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, p
}, nil
}
+// ecHash returns the hash to match the given elliptic curve, see RFC
+// 5656, section 6.2.1
+func ecHash(curve elliptic.Curve) crypto.Hash {
+ bitSize := curve.Params().BitSize
+ switch {
+ case bitSize <= 256:
+ return crypto.SHA256
+ case bitSize <= 384:
+ return crypto.SHA384
+ }
+ return crypto.SHA512
+}
+
var kexAlgoMap = map[string]kexAlgorithm{}
func init() {
- // This is the group called diffie-hellman-group1-sha1 in RFC
- // 4253 and Oakley Group 2 in RFC 2409.
+ // This is the group called diffie-hellman-group1-sha1 in
+ // RFC 4253 and Oakley Group 2 in RFC 2409.
p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16)
kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{
- g: new(big.Int).SetInt64(2),
- p: p,
- pMinus1: new(big.Int).Sub(p, bigOne),
+ g: new(big.Int).SetInt64(2),
+ p: p,
+ pMinus1: new(big.Int).Sub(p, bigOne),
+ hashFunc: crypto.SHA1,
}
- // This is the group called diffie-hellman-group14-sha1 in RFC
- // 4253 and Oakley Group 14 in RFC 3526.
+ // This are the groups called diffie-hellman-group14-sha1 and
+ // diffie-hellman-group14-sha256 in RFC 4253 and RFC 8268,
+ // and Oakley Group 14 in RFC 3526.
p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
-
- kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{
+ group14 := &dhGroup{
g: new(big.Int).SetInt64(2),
p: p,
pMinus1: new(big.Int).Sub(p, bigOne),
}
+ kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{
+ g: group14.g, p: group14.p, pMinus1: group14.pMinus1,
+ hashFunc: crypto.SHA1,
+ }
+ kexAlgoMap[kexAlgoDH14SHA256] = &dhGroup{
+ g: group14.g, p: group14.p, pMinus1: group14.pMinus1,
+ hashFunc: crypto.SHA256,
+ }
+
kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()}
kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()}
kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()}
kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{}
+ kexAlgoMap[kexAlgoCurve25519SHA256LibSSH] = &curve25519sha256{}
kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1}
kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256}
}
-// curve25519sha256 implements the curve25519-sha256@libssh.org key
-// agreement protocol, as described in
-// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt
+// curve25519sha256 implements the curve25519-sha256 (formerly known as
+// curve25519-sha256@libssh.org) key exchange method, as described in RFC 8731.
type curve25519sha256 struct{}
type curve25519KeyPair struct {
@@ -486,7 +510,7 @@ func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handsh
}, nil
}
-func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
+func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) {
packet, err := c.readPacket()
if err != nil {
return
@@ -527,7 +551,7 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh
H := h.Sum(nil)
- sig, err := signAndMarshal(priv, rand, H)
+ sig, err := signAndMarshal(priv, rand, H, algo)
if err != nil {
return nil, err
}
@@ -553,7 +577,6 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh
// diffie-hellman-group-exchange-sha256 key agreement protocols,
// as described in RFC 4419
type dhGEXSHA struct {
- g, p *big.Int
hashFunc crypto.Hash
}
@@ -563,14 +586,7 @@ const (
dhGroupExchangeMaximumBits = 8192
)
-func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
- if theirPublic.Sign() <= 0 || theirPublic.Cmp(gex.p) >= 0 {
- return nil, fmt.Errorf("ssh: DH parameter out of bounds")
- }
- return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil
-}
-
-func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
+func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
// Send GexRequest
kexDHGexRequest := kexDHGexRequestMsg{
MinBits: dhGroupExchangeMinimumBits,
@@ -587,35 +603,29 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake
return nil, err
}
- var kexDHGexGroup kexDHGexGroupMsg
- if err = Unmarshal(packet, &kexDHGexGroup); err != nil {
+ var msg kexDHGexGroupMsg
+ if err = Unmarshal(packet, &msg); err != nil {
return nil, err
}
// reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits
- if kexDHGexGroup.P.BitLen() < dhGroupExchangeMinimumBits || kexDHGexGroup.P.BitLen() > dhGroupExchangeMaximumBits {
- return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", kexDHGexGroup.P.BitLen())
+ if msg.P.BitLen() < dhGroupExchangeMinimumBits || msg.P.BitLen() > dhGroupExchangeMaximumBits {
+ return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", msg.P.BitLen())
}
- gex.p = kexDHGexGroup.P
- gex.g = kexDHGexGroup.G
-
- // Check if g is safe by verifing that g > 1 and g < p - 1
- one := big.NewInt(1)
- var pMinusOne = &big.Int{}
- pMinusOne.Sub(gex.p, one)
- if gex.g.Cmp(one) != 1 && gex.g.Cmp(pMinusOne) != -1 {
+ // Check if g is safe by verifying that 1 < g < p-1
+ pMinusOne := new(big.Int).Sub(msg.P, bigOne)
+ if msg.G.Cmp(bigOne) <= 0 || msg.G.Cmp(pMinusOne) >= 0 {
return nil, fmt.Errorf("ssh: server provided gex g is not safe")
}
// Send GexInit
- var pHalf = &big.Int{}
- pHalf.Rsh(gex.p, 1)
+ pHalf := new(big.Int).Rsh(msg.P, 1)
x, err := rand.Int(randSource, pHalf)
if err != nil {
return nil, err
}
- X := new(big.Int).Exp(gex.g, x, gex.p)
+ X := new(big.Int).Exp(msg.G, x, msg.P)
kexDHGexInit := kexDHGexInitMsg{
X: X,
}
@@ -634,13 +644,13 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake
return nil, err
}
- kInt, err := gex.diffieHellman(kexDHGexReply.Y, x)
- if err != nil {
- return nil, err
+ if kexDHGexReply.Y.Cmp(bigOne) <= 0 || kexDHGexReply.Y.Cmp(pMinusOne) >= 0 {
+ return nil, errors.New("ssh: DH parameter out of bounds")
}
+ kInt := new(big.Int).Exp(kexDHGexReply.Y, x, msg.P)
- // Check if k is safe by verifing that k > 1 and k < p - 1
- if kInt.Cmp(one) != 1 && kInt.Cmp(pMinusOne) != -1 {
+ // Check if k is safe by verifying that k > 1 and k < p - 1
+ if kInt.Cmp(bigOne) <= 0 || kInt.Cmp(pMinusOne) >= 0 {
return nil, fmt.Errorf("ssh: derived k is not safe")
}
@@ -650,8 +660,8 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits))
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits))
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits))
- writeInt(h, gex.p)
- writeInt(h, gex.g)
+ writeInt(h, msg.P)
+ writeInt(h, msg.G)
writeInt(h, X)
writeInt(h, kexDHGexReply.Y)
K := make([]byte, intLength(kInt))
@@ -670,7 +680,7 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake
// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256.
//
// This is a minimal implementation to satisfy the automated tests.
-func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
+func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) {
// Receive GexRequest
packet, err := c.readPacket()
if err != nil {
@@ -681,35 +691,17 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake
return
}
- // smoosh the user's preferred size into our own limits
- if kexDHGexRequest.PreferedBits > dhGroupExchangeMaximumBits {
- kexDHGexRequest.PreferedBits = dhGroupExchangeMaximumBits
- }
- if kexDHGexRequest.PreferedBits < dhGroupExchangeMinimumBits {
- kexDHGexRequest.PreferedBits = dhGroupExchangeMinimumBits
- }
- // fix min/max if they're inconsistent. technically, we could just pout
- // and hang up, but there's no harm in giving them the benefit of the
- // doubt and just picking a bitsize for them.
- if kexDHGexRequest.MinBits > kexDHGexRequest.PreferedBits {
- kexDHGexRequest.MinBits = kexDHGexRequest.PreferedBits
- }
- if kexDHGexRequest.MaxBits < kexDHGexRequest.PreferedBits {
- kexDHGexRequest.MaxBits = kexDHGexRequest.PreferedBits
- }
-
// Send GexGroup
// This is the group called diffie-hellman-group14-sha1 in RFC
// 4253 and Oakley Group 14 in RFC 3526.
p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
- gex.p = p
- gex.g = big.NewInt(2)
+ g := big.NewInt(2)
- kexDHGexGroup := kexDHGexGroupMsg{
- P: gex.p,
- G: gex.g,
+ msg := &kexDHGexGroupMsg{
+ P: p,
+ G: g,
}
- if err := c.writePacket(Marshal(&kexDHGexGroup)); err != nil {
+ if err := c.writePacket(Marshal(msg)); err != nil {
return nil, err
}
@@ -723,19 +715,19 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake
return
}
- var pHalf = &big.Int{}
- pHalf.Rsh(gex.p, 1)
+ pHalf := new(big.Int).Rsh(p, 1)
y, err := rand.Int(randSource, pHalf)
if err != nil {
return
}
+ Y := new(big.Int).Exp(g, y, p)
- Y := new(big.Int).Exp(gex.g, y, gex.p)
- kInt, err := gex.diffieHellman(kexDHGexInit.X, y)
- if err != nil {
- return nil, err
+ pMinusOne := new(big.Int).Sub(p, bigOne)
+ if kexDHGexInit.X.Cmp(bigOne) <= 0 || kexDHGexInit.X.Cmp(pMinusOne) >= 0 {
+ return nil, errors.New("ssh: DH parameter out of bounds")
}
+ kInt := new(big.Int).Exp(kexDHGexInit.X, y, p)
hostKeyBytes := priv.PublicKey().Marshal()
@@ -745,8 +737,8 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits))
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits))
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits))
- writeInt(h, gex.p)
- writeInt(h, gex.g)
+ writeInt(h, p)
+ writeInt(h, g)
writeInt(h, kexDHGexInit.X)
writeInt(h, Y)
@@ -758,7 +750,7 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake
// H is already a hash, but the hostkey signing will apply its
// own key-specific hash algorithm.
- sig, err := signAndMarshal(priv, randSource, H)
+ sig, err := signAndMarshal(priv, randSource, H, algo)
if err != nil {
return nil, err
}
diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go
index 31f26349a05..1c7de1a6dd7 100644
--- a/vendor/golang.org/x/crypto/ssh/keys.go
+++ b/vendor/golang.org/x/crypto/ssh/keys.go
@@ -30,8 +30,9 @@ import (
"golang.org/x/crypto/ssh/internal/bcrypt_pbkdf"
)
-// These constants represent the algorithm names for key types supported by this
-// package.
+// Public key algorithms names. These values can appear in PublicKey.Type,
+// ClientConfig.HostKeyAlgorithms, Signature.Format, or as AlgorithmSigner
+// arguments.
const (
KeyAlgoRSA = "ssh-rsa"
KeyAlgoDSA = "ssh-dss"
@@ -41,16 +42,21 @@ const (
KeyAlgoECDSA521 = "ecdsa-sha2-nistp521"
KeyAlgoED25519 = "ssh-ed25519"
KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com"
+
+ // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, not
+ // public key formats, so they can't appear as a PublicKey.Type. The
+ // corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2.
+ KeyAlgoRSASHA256 = "rsa-sha2-256"
+ KeyAlgoRSASHA512 = "rsa-sha2-512"
)
-// These constants represent non-default signature algorithms that are supported
-// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See
-// [PROTOCOL.agent] section 4.5.1 and
-// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10
const (
- SigAlgoRSA = "ssh-rsa"
- SigAlgoRSASHA2256 = "rsa-sha2-256"
- SigAlgoRSASHA2512 = "rsa-sha2-512"
+ // Deprecated: use KeyAlgoRSA.
+ SigAlgoRSA = KeyAlgoRSA
+ // Deprecated: use KeyAlgoRSASHA256.
+ SigAlgoRSASHA2256 = KeyAlgoRSASHA256
+ // Deprecated: use KeyAlgoRSASHA512.
+ SigAlgoRSASHA2512 = KeyAlgoRSASHA512
)
// parsePubKey parses a public key of the given algorithm.
@@ -70,7 +76,7 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err
case KeyAlgoSKED25519:
return parseSKEd25519(in)
case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01:
- cert, err := parseCert(in, certToPrivAlgo(algo))
+ cert, err := parseCert(in, certKeyAlgoNames[algo])
if err != nil {
return nil, nil, err
}
@@ -289,18 +295,21 @@ func MarshalAuthorizedKey(key PublicKey) []byte {
return b.Bytes()
}
-// PublicKey is an abstraction of different types of public keys.
+// PublicKey represents a public key using an unspecified algorithm.
+//
+// Some PublicKeys provided by this package also implement CryptoPublicKey.
type PublicKey interface {
- // Type returns the key's type, e.g. "ssh-rsa".
+ // Type returns the key format name, e.g. "ssh-rsa".
Type() string
- // Marshal returns the serialized key data in SSH wire format,
- // with the name prefix. To unmarshal the returned data, use
- // the ParsePublicKey function.
+ // Marshal returns the serialized key data in SSH wire format, with the name
+ // prefix. To unmarshal the returned data, use the ParsePublicKey function.
Marshal() []byte
- // Verify that sig is a signature on the given data using this
- // key. This function will hash the data appropriately first.
+ // Verify that sig is a signature on the given data using this key. This
+ // method will hash the data appropriately first. sig.Format is allowed to
+ // be any signature algorithm compatible with the key type, the caller
+ // should check if it has more stringent requirements.
Verify(data []byte, sig *Signature) error
}
@@ -311,25 +320,32 @@ type CryptoPublicKey interface {
}
// A Signer can create signatures that verify against a public key.
+//
+// Some Signers provided by this package also implement AlgorithmSigner.
type Signer interface {
- // PublicKey returns an associated PublicKey instance.
+ // PublicKey returns the associated PublicKey.
PublicKey() PublicKey
- // Sign returns raw signature for the given data. This method
- // will apply the hash specified for the keytype to the data.
+ // Sign returns a signature for the given data. This method will hash the
+ // data appropriately first. The signature algorithm is expected to match
+ // the key format returned by the PublicKey.Type method (and not to be any
+ // alternative algorithm supported by the key format).
Sign(rand io.Reader, data []byte) (*Signature, error)
}
-// A AlgorithmSigner is a Signer that also supports specifying a specific
-// algorithm to use for signing.
+// An AlgorithmSigner is a Signer that also supports specifying an algorithm to
+// use for signing.
+//
+// An AlgorithmSigner can't advertise the algorithms it supports, so it should
+// be prepared to be invoked with every algorithm supported by the public key
+// format.
type AlgorithmSigner interface {
Signer
- // SignWithAlgorithm is like Signer.Sign, but allows specification of a
- // non-default signing algorithm. See the SigAlgo* constants in this
- // package for signature algorithms supported by this package. Callers may
- // pass an empty string for the algorithm in which case the AlgorithmSigner
- // will use its default algorithm.
+ // SignWithAlgorithm is like Signer.Sign, but allows specifying a desired
+ // signing algorithm. Callers may pass an empty string for the algorithm in
+ // which case the AlgorithmSigner will use a default algorithm. This default
+ // doesn't currently control any behavior in this package.
SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error)
}
@@ -381,17 +397,11 @@ func (r *rsaPublicKey) Marshal() []byte {
}
func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
- var hash crypto.Hash
- switch sig.Format {
- case SigAlgoRSA:
- hash = crypto.SHA1
- case SigAlgoRSASHA2256:
- hash = crypto.SHA256
- case SigAlgoRSASHA2512:
- hash = crypto.SHA512
- default:
+ supportedAlgos := algorithmsForKeyFormat(r.Type())
+ if !contains(supportedAlgos, sig.Format) {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
}
+ hash := hashFuncs[sig.Format]
h := hash.New()
h.Write(data)
digest := h.Sum(nil)
@@ -466,7 +476,7 @@ func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
- h := crypto.SHA1.New()
+ h := hashFuncs[sig.Format].New()
h.Write(data)
digest := h.Sum(nil)
@@ -499,7 +509,7 @@ func (k *dsaPrivateKey) PublicKey() PublicKey {
}
func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) {
- return k.SignWithAlgorithm(rand, data, "")
+ return k.SignWithAlgorithm(rand, data, k.PublicKey().Type())
}
func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
@@ -507,7 +517,7 @@ func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm
return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
}
- h := crypto.SHA1.New()
+ h := hashFuncs[k.PublicKey().Type()].New()
h.Write(data)
digest := h.Sum(nil)
r, s, err := dsa.Sign(rand, k.PrivateKey, digest)
@@ -603,19 +613,6 @@ func supportedEllipticCurve(curve elliptic.Curve) bool {
return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521()
}
-// ecHash returns the hash to match the given elliptic curve, see RFC
-// 5656, section 6.2.1
-func ecHash(curve elliptic.Curve) crypto.Hash {
- bitSize := curve.Params().BitSize
- switch {
- case bitSize <= 256:
- return crypto.SHA256
- case bitSize <= 384:
- return crypto.SHA384
- }
- return crypto.SHA512
-}
-
// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1.
func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) {
var w struct {
@@ -671,7 +668,7 @@ func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
- h := ecHash(k.Curve).New()
+ h := hashFuncs[sig.Format].New()
h.Write(data)
digest := h.Sum(nil)
@@ -775,7 +772,7 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
- h := ecHash(k.Curve).New()
+ h := hashFuncs[sig.Format].New()
h.Write([]byte(k.application))
appDigest := h.Sum(nil)
@@ -874,7 +871,7 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error {
return fmt.Errorf("invalid size %d for Ed25519 public key", l)
}
- h := sha256.New()
+ h := hashFuncs[sig.Format].New()
h.Write([]byte(k.application))
appDigest := h.Sum(nil)
@@ -961,44 +958,20 @@ func (s *wrappedSigner) PublicKey() PublicKey {
}
func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
- return s.SignWithAlgorithm(rand, data, "")
+ return s.SignWithAlgorithm(rand, data, s.pubKey.Type())
}
func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
- var hashFunc crypto.Hash
-
- if _, ok := s.pubKey.(*rsaPublicKey); ok {
- // RSA keys support a few hash functions determined by the requested signature algorithm
- switch algorithm {
- case "", SigAlgoRSA:
- algorithm = SigAlgoRSA
- hashFunc = crypto.SHA1
- case SigAlgoRSASHA2256:
- hashFunc = crypto.SHA256
- case SigAlgoRSASHA2512:
- hashFunc = crypto.SHA512
- default:
- return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
- }
- } else {
- // The only supported algorithm for all other key types is the same as the type of the key
- if algorithm == "" {
- algorithm = s.pubKey.Type()
- } else if algorithm != s.pubKey.Type() {
- return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
- }
+ if algorithm == "" {
+ algorithm = s.pubKey.Type()
+ }
- switch key := s.pubKey.(type) {
- case *dsaPublicKey:
- hashFunc = crypto.SHA1
- case *ecdsaPublicKey:
- hashFunc = ecHash(key.Curve)
- case ed25519PublicKey:
- default:
- return nil, fmt.Errorf("ssh: unsupported key type %T", key)
- }
+ supportedAlgos := algorithmsForKeyFormat(s.pubKey.Type())
+ if !contains(supportedAlgos, algorithm) {
+ return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type())
}
+ hashFunc := hashFuncs[algorithm]
var digest []byte
if hashFunc != 0 {
h := hashFunc.New()
diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go
index ac41a4168bf..19bc67c4642 100644
--- a/vendor/golang.org/x/crypto/ssh/messages.go
+++ b/vendor/golang.org/x/crypto/ssh/messages.go
@@ -141,6 +141,14 @@ type serviceAcceptMsg struct {
Service string `sshtype:"6"`
}
+// See RFC 8308, section 2.3
+const msgExtInfo = 7
+
+type extInfoMsg struct {
+ NumExtensions uint32 `sshtype:"7"`
+ Payload []byte `ssh:"rest"`
+}
+
// See RFC 4252, section 5.
const msgUserAuthRequest = 50
@@ -180,11 +188,11 @@ const msgUserAuthInfoRequest = 60
const msgUserAuthInfoResponse = 61
type userAuthInfoRequestMsg struct {
- User string `sshtype:"60"`
- Instruction string
- DeprecatedLanguage string
- NumPrompts uint32
- Prompts []byte `ssh:"rest"`
+ Name string `sshtype:"60"`
+ Instruction string
+ Language string
+ NumPrompts uint32
+ Prompts []byte `ssh:"rest"`
}
// See RFC 4254, section 5.1.
@@ -782,6 +790,8 @@ func decode(packet []byte) (interface{}, error) {
msg = new(serviceRequestMsg)
case msgServiceAccept:
msg = new(serviceAcceptMsg)
+ case msgExtInfo:
+ msg = new(extInfoMsg)
case msgKexInit:
msg = new(kexInitMsg)
case msgKexDHInit:
@@ -843,6 +853,7 @@ var packetTypeNames = map[byte]string{
msgDisconnect: "disconnectMsg",
msgServiceRequest: "serviceRequestMsg",
msgServiceAccept: "serviceAcceptMsg",
+ msgExtInfo: "extInfoMsg",
msgKexInit: "kexInitMsg",
msgKexDHInit: "kexDHInitMsg",
msgKexDHReply: "kexDHReplyMsg",
diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
index b6911e8306d..70045bdfd82 100644
--- a/vendor/golang.org/x/crypto/ssh/server.go
+++ b/vendor/golang.org/x/crypto/ssh/server.go
@@ -120,7 +120,7 @@ type ServerConfig struct {
}
// AddHostKey adds a private key as a host key. If an existing host
-// key exists with the same algorithm, it is overwritten. Each server
+// key exists with the same public key format, it is replaced. Each server
// config must have at least one host key.
func (s *ServerConfig) AddHostKey(key Signer) {
for i, k := range s.hostKeys {
@@ -212,9 +212,10 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha
}
// signAndMarshal signs the data with the appropriate algorithm,
-// and serializes the result in SSH wire format.
-func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) {
- sig, err := k.Sign(rand, data)
+// and serializes the result in SSH wire format. algo is the negotiate
+// algorithm and may be a certificate type.
+func signAndMarshal(k AlgorithmSigner, rand io.Reader, data []byte, algo string) ([]byte, error) {
+ sig, err := k.SignWithAlgorithm(rand, data, underlyingAlgo(algo))
if err != nil {
return nil, err
}
@@ -284,7 +285,7 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error)
func isAcceptableAlgo(algo string) bool {
switch algo {
- case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519,
+ case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519,
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01:
return true
}
@@ -553,6 +554,7 @@ userAuthLoop:
if !ok || len(payload) > 0 {
return nil, parseError(msgUserAuthRequest)
}
+
// Ensure the public key algo and signature algo
// are supported. Compare the private key
// algorithm name that corresponds to algo with
@@ -562,7 +564,12 @@ userAuthLoop:
authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format)
break
}
- signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData)
+ if underlyingAlgo(algo) != sig.Format {
+ authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo)
+ break
+ }
+
+ signedData := buildDataSignedForAuth(sessionID, userAuthReq, algo, pubKeyData)
if err := pubKey.Verify(signedData, sig); err != nil {
return nil, err
@@ -633,6 +640,30 @@ userAuthLoop:
}
authFailures++
+ if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries {
+ // If we have hit the max attempts, don't bother sending the
+ // final SSH_MSG_USERAUTH_FAILURE message, since there are
+ // no more authentication methods which can be attempted,
+ // and this message may cause the client to re-attempt
+ // authentication while we send the disconnect message.
+ // Continue, and trigger the disconnect at the start of
+ // the loop.
+ //
+ // The SSH specification is somewhat confusing about this,
+ // RFC 4252 Section 5.1 requires each authentication failure
+ // be responded to with a respective SSH_MSG_USERAUTH_FAILURE
+ // message, but Section 4 says the server should disconnect
+ // after some number of attempts, but it isn't explicit which
+ // message should take precedence (i.e. should there be a failure
+ // message than a disconnect message, or if we are going to
+ // disconnect, should we only send that message.)
+ //
+ // Either way, OpenSSH disconnects immediately after the last
+ // failed authnetication attempt, and given they are typically
+ // considered the golden implementation it seems reasonable
+ // to match that behavior.
+ continue
+ }
var failureMsg userAuthFailureMsg
if config.PasswordCallback != nil {
@@ -670,7 +701,7 @@ type sshClientKeyboardInteractive struct {
*connection
}
-func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) {
+func (c *sshClientKeyboardInteractive) Challenge(name, instruction string, questions []string, echos []bool) (answers []string, err error) {
if len(questions) != len(echos) {
return nil, errors.New("ssh: echos and questions must have equal length")
}
@@ -682,6 +713,7 @@ func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, quest
}
if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{
+ Name: name,
Instruction: instruction,
NumPrompts: uint32(len(questions)),
Prompts: prompts,
diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go
index d3321f6b784..acef62259fd 100644
--- a/vendor/golang.org/x/crypto/ssh/session.go
+++ b/vendor/golang.org/x/crypto/ssh/session.go
@@ -13,7 +13,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"sync"
)
@@ -85,6 +84,7 @@ const (
IXANY = 39
IXOFF = 40
IMAXBEL = 41
+ IUTF8 = 42 // RFC 8160
ISIG = 50
ICANON = 51
XCASE = 52
@@ -123,7 +123,7 @@ type Session struct {
// output and error.
//
// If either is nil, Run connects the corresponding file
- // descriptor to an instance of ioutil.Discard. There is a
+ // descriptor to an instance of io.Discard. There is a
// fixed amount of buffering that is shared for the two streams.
// If either blocks it may eventually cause the remote
// command to block.
@@ -505,7 +505,7 @@ func (s *Session) stdout() {
return
}
if s.Stdout == nil {
- s.Stdout = ioutil.Discard
+ s.Stdout = io.Discard
}
s.copyFuncs = append(s.copyFuncs, func() error {
_, err := io.Copy(s.Stdout, s.ch)
@@ -518,7 +518,7 @@ func (s *Session) stderr() {
return
}
if s.Stderr == nil {
- s.Stderr = ioutil.Discard
+ s.Stderr = io.Discard
}
s.copyFuncs = append(s.copyFuncs, func() error {
_, err := io.Copy(s.Stderr, s.ch.Stderr())
diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go
index 49ddc2e7de4..acf5a21bbb0 100644
--- a/vendor/golang.org/x/crypto/ssh/transport.go
+++ b/vendor/golang.org/x/crypto/ssh/transport.go
@@ -238,15 +238,19 @@ var (
// (to setup server->client keys) or clientKeys (for client->server keys).
func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) {
cipherMode := cipherModes[algs.Cipher]
- macMode := macModes[algs.MAC]
iv := make([]byte, cipherMode.ivSize)
key := make([]byte, cipherMode.keySize)
- macKey := make([]byte, macMode.keySize)
generateKeyMaterial(iv, d.ivTag, kex)
generateKeyMaterial(key, d.keyTag, kex)
- generateKeyMaterial(macKey, d.macKeyTag, kex)
+
+ var macKey []byte
+ if !aeadCiphers[algs.Cipher] {
+ macMode := macModes[algs.MAC]
+ macKey = make([]byte, macMode.keySize)
+ generateKeyMaterial(macKey, d.macKeyTag, kex)
+ }
return cipherModes[algs.Cipher].create(key, iv, macKey, algs)
}
diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS
deleted file mode 100644
index 15167cd746c..00000000000
--- a/vendor/golang.org/x/net/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS
deleted file mode 100644
index 1c4577e9680..00000000000
--- a/vendor/golang.org/x/net/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
index a3c021d3f88..cf66309c4a8 100644
--- a/vendor/golang.org/x/net/context/context.go
+++ b/vendor/golang.org/x/net/context/context.go
@@ -21,9 +21,9 @@
// explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx:
//
-// func DoSomething(ctx context.Context, arg Arg) error {
-// // ... use ctx ...
-// }
+// func DoSomething(ctx context.Context, arg Arg) error {
+// // ... use ctx ...
+// }
//
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use.
diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go
index 344bd143345..2cb9c408f2e 100644
--- a/vendor/golang.org/x/net/context/go17.go
+++ b/vendor/golang.org/x/net/context/go17.go
@@ -32,7 +32,7 @@ var DeadlineExceeded = context.DeadlineExceeded
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
ctx, f := context.WithCancel(parent)
- return ctx, CancelFunc(f)
+ return ctx, f
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
@@ -46,7 +46,7 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
ctx, f := context.WithDeadline(parent, deadline)
- return ctx, CancelFunc(f)
+ return ctx, f
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
@@ -54,11 +54,11 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
-// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
-// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
-// defer cancel() // releases resources if slowOperation completes before timeout elapses
-// return slowOperation(ctx)
-// }
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go
index 5270db5db7d..7b6b685114a 100644
--- a/vendor/golang.org/x/net/context/pre_go17.go
+++ b/vendor/golang.org/x/net/context/pre_go17.go
@@ -264,11 +264,11 @@ func (c *timerCtx) cancel(removeFromParent bool, err error) {
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
-// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
-// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
-// defer cancel() // releases resources if slowOperation completes before timeout elapses
-// return slowOperation(ctx)
-// }
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go
index f91466f7cd7..038941d7085 100644
--- a/vendor/golang.org/x/net/html/parse.go
+++ b/vendor/golang.org/x/net/html/parse.go
@@ -663,6 +663,24 @@ func inHeadIM(p *parser) bool {
// Ignore the token.
return true
case a.Template:
+ // TODO: remove this divergence from the HTML5 spec.
+ //
+ // We don't handle all of the corner cases when mixing foreign
+ // content (i.e.