diff --git a/go.mod b/go.mod index 9b741f04be..49c9e59e21 100644 --- a/go.mod +++ b/go.mod @@ -12,11 +12,11 @@ require ( github.com/aws/aws-sdk-go v1.38.49 github.com/davecgh/go-spew v1.1.1 github.com/ghodss/yaml v1.0.0 - github.com/go-logr/logr v1.2.0 + github.com/go-logr/logr v1.2.3 github.com/go-logr/zapr v1.2.0 - github.com/google/go-cmp v0.5.6 + github.com/google/go-cmp v0.5.7 github.com/kevinburke/go-bindata v3.11.0+incompatible - github.com/openshift/api v0.0.0-20220315184754-d7c10d0b647e + github.com/openshift/api v0.0.0-20220411210816-c3bb724c282a github.com/openshift/library-go v0.0.0-20220401140922-0716d0ba0657 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.11.0 @@ -29,11 +29,11 @@ require ( google.golang.org/grpc v1.40.0 gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.23.0 - k8s.io/apimachinery v0.23.0 + k8s.io/api v0.23.5 + k8s.io/apimachinery v0.23.5 k8s.io/apiserver v0.23.0 k8s.io/client-go v0.23.0 - k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b + k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 sigs.k8s.io/controller-runtime v0.11.0 sigs.k8s.io/controller-tools v0.4.1 ) @@ -63,7 +63,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/google/gofuzz v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.1.2 // indirect github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/googleapis/gnostic v0.5.5 // indirect @@ -94,10 +94,10 @@ require ( go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect golang.org/x/mod v0.4.2 // indirect - golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect + golang.org/x/net v0.0.0-20220412020605-290c469a71a5 // indirect golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect - golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect - golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff // indirect @@ -112,11 +112,11 @@ require ( gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect k8s.io/apiextensions-apiserver v0.23.0 // indirect k8s.io/component-base v0.23.0 // indirect - k8s.io/klog/v2 v2.30.0 // indirect + k8s.io/klog/v2 v2.60.1 // indirect k8s.io/kube-aggregator v0.23.0 // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect - sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect + sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect sigs.k8s.io/kube-storage-version-migrator v0.0.4 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 8317482777..d3793549b8 100644 --- a/go.sum +++ b/go.sum @@ -230,8 +230,9 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= @@ -389,12 +390,14 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -607,8 +610,9 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/openshift/api v0.0.0-20211209135129-c58d9f695577/go.mod h1:DoslCwtqUpr3d/gsbq4ZlkaMEdYqKxuypsDjorcHhME= -github.com/openshift/api v0.0.0-20220315184754-d7c10d0b647e h1:Rsk+kdRKrkhoHnUi3qBm11TZsdFX/Fovcbm3cK4yfzQ= github.com/openshift/api v0.0.0-20220315184754-d7c10d0b647e/go.mod h1:F/eU6jgr6Q2VhMu1mSpMmygxAELd7+BUxs3NHZ25jV4= +github.com/openshift/api v0.0.0-20220411210816-c3bb724c282a h1:vHa6OYOwLWMod4uwR59Ias5q/KVT4MI5kSEpHlXx5hs= +github.com/openshift/api v0.0.0-20220411210816-c3bb724c282a/go.mod h1:F/eU6jgr6Q2VhMu1mSpMmygxAELd7+BUxs3NHZ25jV4= github.com/openshift/build-machinery-go v0.0.0-20210712174854-1bb7fd1518d3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3/go.mod h1:cwhyki5lqBmrT0m8Im+9I7PGFaraOzcYPtEz93RcsGY= @@ -919,8 +923,10 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1028,11 +1034,13 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 h1:M69LAlWZCshgp0QSzyDcSsSIejIEeuaCVpmwcKwyLMk= golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1317,8 +1325,9 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= k8s.io/api v0.18.0-beta.2/go.mod h1:2oeNnWEqcSmaM/ibSh3t7xcIqbkGXhzZdn4ezV9T4m0= k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= -k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro= k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= +k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA= +k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8= k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8= k8s.io/apiextensions-apiserver v0.18.0-beta.2/go.mod h1:Hnrg5jx8/PbxRbUoqDGxtQkULjwx8FDW4WYJaKNK+fk= k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= @@ -1327,8 +1336,9 @@ k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG4 k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.18.0-beta.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apimachinery v0.23.0 h1:mIfWRMjBuMdolAWJ3Fd+aPTMv3X9z+waiARMpvvb0HQ= k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= +k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0= +k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg= k8s.io/apiserver v0.18.0-beta.2/go.mod h1:bnblMkMoCFnIfVnVftd0SXJPzyvrk3RtaqSbblphF/A= k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= @@ -1358,8 +1368,9 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= +k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-aggregator v0.18.0-beta.2/go.mod h1:O3Td9mheraINbLHH4pzoFP2gRzG0Wk1COqzdSL4rBPk= k8s.io/kube-aggregator v0.23.0 h1:IjY8CfGHH9WUvJXIaAsAxTzHDsaLVeaEqjkvo6MLMD0= k8s.io/kube-aggregator v0.23.0/go.mod h1:b1vpoaTWKZjCzvbe1KXFw3vPbISrghJsg7/RI8oZUME= @@ -1371,8 +1382,10 @@ k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl k8s.io/utils v0.0.0-20200229041039-0a110f9eb7ab/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= @@ -1388,8 +1401,9 @@ sigs.k8s.io/controller-runtime v0.11.0/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eM sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE= sigs.k8s.io/controller-tools v0.4.1 h1:VkuV0MxlRPmRu5iTgBZU4UxUX2LiR99n3sdQGRxZF4w= sigs.k8s.io/controller-tools v0.4.1/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= +sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= +sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/kube-storage-version-migrator v0.0.4 h1:qsCecgZHgdismlTt8xCmS/3numvpxrj58RWJeIg76wc= sigs.k8s.io/kube-storage-version-migrator v0.0.4/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= @@ -1399,8 +1413,9 @@ sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.0 h1:kDvPBbnPk+qYmkHmSo8vKGp438IASWofnbbUKDE/bv0= sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/manifests/00-custom-resource-definition.yaml b/manifests/00-custom-resource-definition.yaml index acb2c40587..2532678006 100644 --- a/manifests/00-custom-resource-definition.yaml +++ b/manifests/00-custom-resource-definition.yaml @@ -144,6 +144,28 @@ spec: description: hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork. properties: + httpPort: + default: 80 + description: httpPort is the port on the host which should + be used to listen for HTTP requests. This field should be + set when port 80 is already in use. The value should not + coincide with the NodePort range of the cluster. When not + specified the value defaults to 80. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + httpsPort: + default: 443 + description: httpsPort is the port on the host which should + be used to listen for HTTPS requests. This field should + be set when port 443 is already in use. The value should + not coincide with the NodePort range of the cluster. When + not specified the value defaults to 443. + format: int32 + maximum: 65535 + minimum: 1 + type: integer protocol: description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether @@ -167,6 +189,29 @@ spec: - TCP - PROXY type: string + statsPort: + default: 1936 + description: statsPort is the port on the host where the stats + from the router are published. The value should not coincide + with the NodePort range of the cluster. If an external load + balancer is configured to forward connections to this IngressController, + the load balancer should use this port for health checks. + The load balancer can send HTTP probes on this port on a + given node, with the path /healthz/ready to determine if + the ingress controller is ready to receive traffic on the + node. For proper operation the load balancer must not forward + traffic to a node until the health check reports ready. + The load balancer should also stop forwarding requests within + a maximum of 45 seconds after /healthz/ready starts reporting + not-ready. Probing every 5 to 10 seconds, with a 5-second + timeout and with a threshold of two successful or failed + requests to become healthy or unhealthy respectively, are + well-tested values. When not specified the value defaults + to 1936. + format: int32 + maximum: 65535 + minimum: 1 + type: integer type: object loadBalancer: description: loadBalancer holds parameters for the load balancer. @@ -237,12 +282,13 @@ spec: type: description: type is the underlying infrastructure provider for the load balancer. Allowed values are "AWS", "Azure", - "BareMetal", "GCP", "OpenStack", and "VSphere". + "BareMetal", "GCP", "Nutanix", "OpenStack", and "VSphere". enum: - AWS - Azure - BareMetal - GCP + - Nutanix - OpenStack - VSphere - IBM @@ -806,9 +852,14 @@ spec: properties: nodeSelector: description: "nodeSelector is the node selector applied to ingress - controller deployments. \n If unset, the default is: \n kubernetes.io/os: - linux node-role.kubernetes.io/worker: '' \n If set, the specified - selector is used and replaces the default." + controller deployments. \n If set, the specified selector is + used and replaces the default. \n If unset, the default depends + on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses + status. \n When defaultPlacement is Workers, the default is: + \n kubernetes.io/os: linux node-role.kubernetes.io/worker: + '' \n When defaultPlacement is ControlPlane, the default is: + \n kubernetes.io/os: linux node-role.kubernetes.io/master: + '' \n These defaults are subject to change." properties: matchExpressions: description: matchExpressions is a list of label selector @@ -896,8 +947,16 @@ spec: type: array type: object replicas: - description: replicas is the desired number of ingress controller - replicas. If unset, defaults to 2. + description: "replicas is the desired number of ingress controller + replicas. If unset, the default depends on the value of the defaultPlacement + field in the cluster config.openshift.io/v1/ingresses status. \n + The value of replicas is set based on the value of a chosen field + in the Infrastructure CR. If defaultPlacement is set to ControlPlane, + the chosen field will be controlPlaneTopology. If it is set to Workers + the chosen field will be infrastructureTopology. Replicas will then + be set to 1 or 2 based whether the chosen field's value is SingleReplica + or HighlyAvailable, respectively. \n These defaults are subject + to change." format: int32 type: integer routeAdmission: @@ -1126,6 +1185,24 @@ spec: format: int32 minimum: 4096 type: integer + healthCheckInterval: + description: "healthCheckInterval defines how long the router + waits between two consecutive health checks on its configured + backends. This value is applied globally as a default for all + routes, but may be overridden per-route by the route annotation + \"router.openshift.io/haproxy.health.check.interval\". \n Setting + this to less than 5s can cause excess traffic due to too frequent + TCP health checks and accompanying SYN packet storms. Alternatively, + setting this too high can result in increased latency, due to + backend servers that are no longer available, but haven't yet + been detected as such. \n An empty or zero healthCheckInterval + means no opinion and IngressController chooses a default, which + is subject to change over time. Currently the default healthCheckInterval + value is 5s. \n Currently the minimum allowed value is 1s and + the maximum allowed value is 2147483647ms (24.85 days). Both + are subject to change over time." + format: duration + type: string serverFinTimeout: description: "serverFinTimeout defines how long a connection will be held open while waiting for the server/backend response to @@ -1233,6 +1310,28 @@ spec: description: hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork. properties: + httpPort: + default: 80 + description: httpPort is the port on the host which should + be used to listen for HTTP requests. This field should be + set when port 80 is already in use. The value should not + coincide with the NodePort range of the cluster. When not + specified the value defaults to 80. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + httpsPort: + default: 443 + description: httpsPort is the port on the host which should + be used to listen for HTTPS requests. This field should + be set when port 443 is already in use. The value should + not coincide with the NodePort range of the cluster. When + not specified the value defaults to 443. + format: int32 + maximum: 65535 + minimum: 1 + type: integer protocol: description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether @@ -1256,6 +1355,29 @@ spec: - TCP - PROXY type: string + statsPort: + default: 1936 + description: statsPort is the port on the host where the stats + from the router are published. The value should not coincide + with the NodePort range of the cluster. If an external load + balancer is configured to forward connections to this IngressController, + the load balancer should use this port for health checks. + The load balancer can send HTTP probes on this port on a + given node, with the path /healthz/ready to determine if + the ingress controller is ready to receive traffic on the + node. For proper operation the load balancer must not forward + traffic to a node until the health check reports ready. + The load balancer should also stop forwarding requests within + a maximum of 45 seconds after /healthz/ready starts reporting + not-ready. Probing every 5 to 10 seconds, with a 5-second + timeout and with a threshold of two successful or failed + requests to become healthy or unhealthy respectively, are + well-tested values. When not specified the value defaults + to 1936. + format: int32 + maximum: 65535 + minimum: 1 + type: integer type: object loadBalancer: description: loadBalancer holds parameters for the load balancer. @@ -1326,12 +1448,13 @@ spec: type: description: type is the underlying infrastructure provider for the load balancer. Allowed values are "AWS", "Azure", - "BareMetal", "GCP", "OpenStack", and "VSphere". + "BareMetal", "GCP", "Nutanix", "OpenStack", and "VSphere". enum: - AWS - Azure - BareMetal - GCP + - Nutanix - OpenStack - VSphere - IBM @@ -1418,10 +1541,99 @@ spec: required: - type type: object + namespaceSelector: + description: namespaceSelector is the actual namespaceSelector in + use. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object observedGeneration: description: observedGeneration is the most recent generation observed. format: int64 type: integer + routeSelector: + description: routeSelector is the actual routeSelector in use. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object selector: description: selector is a label selector, in string format, for ingress controller pods corresponding to the IngressController. The number diff --git a/pkg/manifests/bindata.go b/pkg/manifests/bindata.go index 170e13a103..4ac9e05f0c 100644 --- a/pkg/manifests/bindata.go +++ b/pkg/manifests/bindata.go @@ -17,7 +17,7 @@ // assets/router/service-internal.yaml (429B) // manifests/00-cluster-role.yaml (3.009kB) // manifests/00-custom-resource-definition-internal.yaml (6.75kB) -// manifests/00-custom-resource-definition.yaml (91.138kB) +// manifests/00-custom-resource-definition.yaml (104.048kB) // manifests/00-ingress-credentials-request.yaml (4.279kB) // manifests/00-namespace.yaml (508B) // manifests/0000_90_ingress-operator_00_prometheusrole.yaml (446B) @@ -442,7 +442,7 @@ func manifests00CustomResourceDefinitionInternalYaml() (*asset, error) { return a, nil } -var _manifests00CustomResourceDefinitionYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xbd\x6d\x7b\x1b\x37\x92\x00\xf8\x7d\x7e\x05\x8e\xd9\x9c\x25\x47\x24\x2d\xdb\x71\x12\x65\x67\xe7\x64\xd9\x49\xfc\xac\x63\xeb\x2c\x4d\x92\x1d\x33\xe3\x03\xbb\x41\x12\xa3\x26\xc0\x01\xd0\x92\x98\xcd\xde\x6f\xbf\x07\x55\x40\x37\xba\xd9\x40\x37\x65\x7b\x67\x76\xcf\xfc\x90\xc8\x24\xba\x1a\xa8\x2a\x54\x15\x0a\xf5\x42\x37\xfc\x27\xa6\x34\x97\xe2\x84\xd0\x0d\x67\xb7\x86\x09\xfb\x2f\x3d\xb9\xfa\x5a\x4f\xb8\x9c\x5e\x1f\xff\xe1\x8a\x8b\xfc\x84\x9c\x95\xda\xc8\xf5\x1b\xa6\x65\xa9\x32\xf6\x8c\x2d\xb8\xe0\x86\x4b\xf1\x87\x35\x33\x34\xa7\x86\x9e\xfc\x81\x10\x2a\x84\x34\xd4\x7e\xad\xed\x3f\x89\x85\x39\xa6\x9b\x8d\x92\xd7\x2c\x9f\xc8\x0d\x13\x7a\xc5\x17\x66\xc2\xe5\x09\x59\x19\xb3\xd1\x27\xd3\xe9\x92\x9b\x55\x39\x9f\x64\x72\x3d\xad\x06\x4c\xe9\x86\x4f\x37\x65\x51\x4c\x9f\x1c\x3f\x01\x40\x5c\x64\x45\x99\xb3\x89\x62\x05\xa3\x9a\x35\x60\x4d\xf9\x7c\x3d\xce\x0a\x59\xe6\xe3\x35\x15\x74\xc9\xf2\x13\x32\x32\xaa\x64\xa3\xfe\x47\x35\x2b\x16\xfe\xa9\xf1\x8a\x2f\x57\x63\x7a\x4d\x79\x41\xe7\xbc\xe0\x66\xbb\x07\x1c\x2e\x96\x05\x1b\x0b\x99\xb3\x71\xce\xae\x59\x21\x37\x4c\x05\x8f\x0b\xba\x66\x27\x84\x8b\xa5\x62\x5a\x67\x52\x18\x25\x8b\x82\x29\x6d\xa1\x28\x6a\xa4\x6a\x80\xfb\x83\xde\xb0\xcc\x62\x70\xa9\x64\xb9\x39\x21\xdd\x83\x10\xaa\xc3\x34\x52\xe9\x05\xbe\xe0\xac\x7a\x01\xfc\x56\x70\x6d\xfe\xbd\xfb\xf7\x97\x5c\x1b\x18\xb3\x29\x4a\x45\x8b\xae\x29\xc2\xcf\x76\x7d\x65\x41\x55\xc7\x80\x3f\x10\xa2\x33\xb9\x61\x27\xe4\x95\x9d\xce\x86\x66\x2c\xff\x03\x21\xd7\xc8\x57\x30\xbd\xb1\x5b\xff\xf5\x31\x02\xcb\x56\x6c\x4d\x71\xde\xc4\x2e\x4e\x9c\x9e\xbf\xf8\xe9\xd1\x45\xe3\x6b\x42\x72\xa6\x33\xc5\x37\x06\xb8\x73\xb4\x33\x75\xf7\xfb\x9c\x69\x42\x89\x23\xa1\x9f\x1d\xa9\xa7\x47\x16\x52\x55\x20\x09\x31\x2b\x46\xb2\xa2\xd4\x86\xa9\x09\xb9\xb4\xff\xa8\x47\x66\x54\x10\xcd\xd4\x35\xcf\x18\x79\xbd\x61\xe2\xc2\x62\x9a\xbc\x91\xa5\x61\x84\x8a\x9c\xfc\x7b\x39\x67\x4a\x30\xc3\xb4\x47\x64\x00\x58\xb9\xad\xa1\x27\x64\x26\xc8\xcf\x2b\x26\x08\x15\xbb\x08\x27\x5c\x93\x4c\x31\x6a\x58\x7e\x44\x28\x11\xec\xa6\x63\xce\x01\xd8\x9c\x6d\x0a\xb9\x5d\x33\x61\x82\x27\x89\x91\x84\x16\x85\xbc\x21\x76\xc7\x2a\x41\x0b\x62\x14\x5d\x2c\x78\x66\x7f\x51\x8c\x66\x2b\x58\xa8\x5b\x8c\x26\x66\x45\x4d\x00\x94\xdd\x6e\xa4\x66\x7e\x72\x44\x2a\xb7\xc8\x60\x0d\x7f\xde\xe4\xd4\x70\xb1\x24\x66\xc5\x75\xf5\x03\x59\xd3\x2d\x29\x18\x85\x19\xe4\x5c\xab\x12\xc8\x13\x80\x5e\x48\x45\x36\xe5\xbc\xe0\x19\x59\xd0\xcc\x02\x10\xcc\xdc\x48\x75\x65\x97\x27\x58\x06\xc2\x81\x50\x1d\x5d\x3b\x51\xec\x9a\xeb\x26\x50\xfb\xd6\x39\x23\x30\x20\x27\xb2\x34\x80\x63\x2f\x41\xae\x2a\xb2\xd8\x9d\x98\xcb\x4c\x4f\x33\x29\x32\xb6\x31\x7a\xea\x31\x30\x76\xb3\xe0\x62\x39\x75\xef\x1c\xb7\x79\x1c\x3f\x8e\x78\xec\x9a\x29\xb2\x91\x5a\xf3\x79\xc1\x8e\x88\xb6\x72\x71\x5e\x30\x92\xb3\x05\x2d\x0b\xa3\x61\xa1\x16\xc7\x9b\x82\x9a\x85\x54\x6b\x42\x15\x23\xa5\x66\xf9\x84\x5c\x30\x46\x2c\x0d\x42\xb4\x70\x56\xe4\xf0\xcc\x5a\x2a\x0b\xc5\x50\x5e\x20\xab\x9c\xc9\xf5\x86\x1a\x8e\x12\x87\x14\x56\x78\x90\xe3\x13\x72\x61\xa8\x7d\xdf\x0d\x37\x2b\x2e\x80\xc3\xff\xd6\x60\x64\x27\x86\x00\x26\x25\x6b\x2e\xf8\xba\x5c\x13\xb9\x20\xc7\x0f\xc9\x5a\x0a\xb3\x02\xba\x3e\xb2\xbf\x48\xe5\x47\x6b\x72\x70\xb3\xe2\xd9\x0a\x56\xc7\x35\x29\xa4\x58\x32\x75\x38\x19\x55\x80\x37\xca\x0a\x1b\xc3\xbd\x5c\xc1\x4f\xa0\x27\x82\x6f\x5b\x5b\xf4\x9e\xdd\xc5\x38\xca\xa2\x89\x0b\x60\x3c\xe6\x25\x01\xcb\xdd\xc6\xb7\x93\x74\x5c\xb5\x51\x4c\x33\x81\x2a\xa3\x01\x98\xd8\x41\x54\x10\x39\xff\x1b\xcb\x8c\x45\xa9\xb2\x60\x88\x5e\xc9\xb2\xc8\x2d\xbb\x5c\x33\x65\x88\x62\x99\x5c\x0a\xfe\x5b\x05\x5b\x5b\xc6\xb4\x2f\x2d\xa8\x61\xda\xb4\x60\x72\xe1\x76\xcb\x35\x2d\x4a\x76\x04\x9b\xda\xf2\x96\x62\xf6\x2d\xa4\x14\x01\x3c\x18\xa2\x27\xe4\x47\x4b\x2f\x2e\x16\x4d\x95\xe5\xf5\x63\x26\xd7\xeb\x52\x70\xb3\x9d\x02\x37\xf1\x79\x69\xa4\xd2\x53\x50\x01\x53\xcd\x97\x63\xaa\xb2\x15\x37\x2c\x33\xa5\x62\x56\xad\x8d\x61\xea\x02\xb6\xc1\x64\x9d\x7f\x56\x6d\xb9\x7b\x8d\xb9\x9a\xad\x15\xa8\xda\x28\x2e\x96\xc1\x0f\x20\xe5\x13\x14\xb0\x52\xde\xd2\x95\xba\x47\x71\x15\x35\xa2\x71\x43\x33\xf2\xe6\xf9\xc5\x65\xbd\xa9\x2d\x31\xda\xd8\x07\xbc\xd7\x0f\xea\x9a\x04\x16\x61\x5c\x2c\x98\x42\x22\x2e\x94\x5c\x03\x4c\x26\xf2\x8d\xe4\xc2\x38\x11\xcb\x99\x68\xa3\x5f\x97\xf3\x35\x37\x96\xee\x7f\x2f\x99\x36\x96\x56\x13\x72\x06\x46\x83\xdd\xde\xa5\x15\x39\x76\xff\xbc\x10\xe4\x8c\xae\x59\x71\x66\xd5\xec\xc7\x26\x80\xc5\xb4\x1e\x5b\xc4\x0e\x23\x41\x68\xef\xb4\x07\x23\xd6\x82\x1f\xbc\x1e\x8f\xd0\xcb\xfe\x6c\xc9\x05\xb2\x7a\xc3\x32\xbe\xe0\x19\xec\x05\xdc\x22\x56\x4e\x68\xae\x58\x4e\xe6\x6c\x45\xaf\xb9\x54\xee\xfb\x16\x5e\x77\x94\xcc\xa4\x31\xa0\x7b\x4b\xdb\x0f\x52\xe9\xf2\xe5\x45\xfb\x87\xd6\x3c\xab\x71\x7e\x96\x4c\x13\xcd\x8c\x65\x27\x14\x84\x8e\xa2\x96\xbd\xec\x9e\xba\x66\x8a\x2f\xb6\x4d\xbc\x35\xdf\x49\x32\x3b\x23\x58\x2e\xd3\x47\x04\x84\x12\xe8\xdf\x39\x0a\x51\xbb\x8d\x99\x00\x09\xb8\x2e\x4d\x49\x0b\x62\xdf\xde\x54\xe5\xfe\xc3\xf2\x25\x1b\x1b\xa6\xd6\x5c\x80\x72\xb4\x33\x50\x8c\x89\x4c\x6d\x37\x86\x28\xab\xd9\xf4\x64\xe7\xb9\x38\x56\xec\x07\xb4\x2b\xcb\x2f\x4a\xa0\xe8\x39\x35\x56\x70\x74\x8e\x6c\xa1\xaa\xfb\xc1\x00\x6f\x14\xcc\x31\x4b\x48\xc5\xc0\xa4\xea\x84\x09\x3a\xda\x12\x15\xd4\xa5\x55\xdf\x5e\xf4\xcd\xad\x12\x36\xd9\xca\x2e\x74\x49\xb9\xd0\xb8\xe1\x72\x0e\xe8\x2f\xb9\x5e\x81\xf9\xd5\xf5\xb1\x46\x18\x91\x56\x99\x5c\xd3\x82\xe7\x1d\xa4\xb0\x68\x5f\xf0\xc2\xb0\x8a\xa4\x7a\x42\xac\x99\x14\x81\xe8\x96\xd0\x98\xec\xba\xd4\xc6\xd2\x90\x9c\x9f\xbd\x79\x4e\xf4\x56\x18\x7a\x3b\x21\xe4\x85\x13\xfa\xb0\x7a\xae\x09\x5b\x6f\xcc\xf6\x28\x36\x53\x3f\x0d\xcb\x51\x5c\x93\x0d\x53\x56\xc7\x5a\xe1\x80\x80\x58\x05\x47\x48\x81\xa0\xec\xb7\x82\xd0\xb6\xd0\xf1\x1f\xab\xff\x0c\x91\x82\x91\x0d\x12\x05\x27\x0a\xb8\x24\xb4\x03\x17\xf7\xf4\x1e\x48\x55\x84\x15\x9a\xc1\xc4\x3a\x4d\x1a\xcb\x0c\xb8\xcf\x83\x37\x44\x20\x5a\xfe\xcd\x99\xe0\x4e\x7d\xd6\x76\xd3\x2e\x13\xdb\x0f\x37\x6c\x1d\xe1\xcc\xa8\x10\x6b\x0f\xa0\x4a\xd1\x6d\xe7\xef\xb7\xe3\xda\xbc\x1a\x5b\x9c\x8f\xdd\x13\x46\xae\x79\x16\xdd\xdf\x67\xa7\x03\xf6\x8a\x1f\xda\xd8\x1d\x99\x14\x0b\xbe\x5c\xd3\x0d\x20\x90\x72\xe1\x75\xd6\xf9\xf3\x1f\xc7\x4c\x64\x32\x8f\x92\xe2\xec\xb4\xc1\xcb\xf3\x52\xe4\x05\x6b\x6f\x1e\x2f\x5d\x50\x46\x55\x94\xbf\xd7\xd6\x80\xd5\x7a\x6a\x88\xb8\x15\x08\xcd\xad\xb5\xa5\x0d\x1c\xc9\x90\x8b\xd0\x2e\x47\x06\xaf\x17\xc0\xdb\x36\x4d\x85\xf4\x15\x23\xd5\x51\x6e\x8c\x0f\xe0\x61\xce\x9e\x9e\xba\xc9\x9c\x96\x57\xc4\x71\x62\xec\xb7\x16\xe6\x81\x69\x9d\xe2\xf1\x0a\x6d\x82\x9c\x8c\xbb\x4b\xb1\x05\x53\x4c\x64\x2c\x77\xeb\x89\xc2\xb5\xd6\xf9\x26\xfa\x6b\x2f\xff\x59\x31\x63\x55\x5c\xf7\xc4\xf1\xe0\x98\x60\xdc\x1d\x8d\xeb\x3f\x8e\xb7\x6a\xe2\x9d\xcb\x82\x67\xdb\x01\x5c\x39\x8a\x3c\x1a\x70\xe9\xcd\x8a\x99\x15\x53\xe1\x76\x8f\x71\x4f\x28\x04\x60\xa5\xda\x4d\x0d\x2c\xd5\x8d\x92\xd7\x3c\x6f\x08\x05\x14\xb8\xd6\xb0\xb2\x47\x86\x98\x88\xc8\xe0\x78\x83\xe6\x35\x18\xaa\x64\x36\x7a\xe3\x50\x39\x1b\x59\x79\x34\x1b\xbd\x86\x05\xd1\x62\x36\x82\x73\xc6\x2b\x69\x58\xfb\x20\xd8\x40\x68\xb7\xec\x92\xa2\xd8\x92\x6c\xc5\xb2\x2b\xdd\xa5\xbb\x41\xfd\xb7\x34\x70\x42\xae\xd5\x7a\xd9\xea\x73\xd4\xcd\xdf\x12\x6e\xac\xee\xb7\x66\x20\xbc\x68\xf7\x05\x59\xc1\xa8\x32\xec\x36\x36\xf7\x1f\x2e\x2f\xcf\xed\xa2\x37\x54\x6b\xb3\x52\xb2\x5c\xae\x82\x17\x04\xc7\x9b\xf0\xc3\x44\xb9\x8e\xb1\xdd\xa8\xfb\x91\x31\xf1\x48\x8e\xfc\xec\x71\x9e\x60\xd9\xc8\x66\x88\x6f\x84\x71\x25\x28\xe3\x3f\xb5\x99\x75\x67\x64\x62\xbb\xb8\x23\x6d\x00\xa3\xc7\x1c\x1c\xed\x3e\x81\x87\x8e\x4a\x6a\x80\x83\x82\x68\x96\x29\x66\x02\x39\xde\x81\x15\xb4\x72\x01\x5c\x43\x78\xdb\x83\xbb\x35\x7c\xb7\x11\xbe\x9c\xa0\x8b\x05\xfc\x16\x5d\x9b\x2f\x97\xe2\x9e\x71\x5b\x16\x60\x70\x45\xe4\x8d\x08\xdf\x71\xd4\xb1\x72\xbb\x0e\x38\xc4\x77\x80\x9c\x09\x10\xff\x6e\x55\x28\xf7\x71\x69\x30\xc7\x85\xb4\xa6\x9f\xd5\x56\x57\x6c\xab\x51\x8b\xdb\x73\x82\x7d\x8e\x10\x53\xe8\x49\xa6\x4c\x17\xbf\x85\xcb\x5e\xf0\x02\x5d\x52\x20\x21\xf0\xb1\x2b\xb6\x3d\xb1\x30\x5b\x3f\xce\x84\x35\x85\x4a\xa1\x99\xe9\x32\xa3\x28\xb9\xe1\x45\x9e\x51\x95\x37\x5e\x60\xe9\x54\x1a\xb9\xa6\x86\x67\xb4\x28\xb6\x64\xc9\x04\x53\x95\xdd\x8c\x0e\x8c\x6e\x83\xaf\x05\x06\x6d\x48\xef\x06\xe9\x90\x1c\xb9\x5c\x5b\xdc\x1c\x58\xb8\xba\x9c\xe3\x3f\xf5\x61\xd7\x5c\x45\x0e\x40\xea\xb9\x34\xed\xb0\xb3\x53\xbb\x98\xc2\xaa\xef\xe6\xe4\xed\x81\x7e\xa9\x22\x32\xe7\x86\x9b\x55\xe8\xef\xbb\xa7\x89\x51\x96\x6a\xda\x48\xc5\x26\x0e\x81\x51\x3c\x75\x40\x04\xdb\x01\x96\xb3\xa2\x0a\xb9\x73\x5d\x16\x86\x6f\x0a\x06\x22\x68\xfa\xd0\x1d\x5a\x72\x27\x79\x9c\xbb\x85\xf0\xf5\xa6\xe0\x9d\x7c\x7a\xfa\xf2\xfc\xd5\x21\x9a\xae\x5e\x2f\x1c\xf0\x09\x9b\x1c\x11\x21\x0d\x9d\x17\x5b\x32\x57\xf2\x46\x33\xa5\x0f\xc1\xc3\x44\x0d\x29\xf8\x9c\x29\xb3\x45\x57\x5f\xa9\xbb\x48\x65\x8d\x8b\xd0\xe1\x36\x41\x95\xb2\x66\x54\xe8\xc0\xd8\xa5\x02\x21\xa0\xd9\xe5\x06\x13\x23\x3b\x49\x24\x41\xe5\xa9\xca\x1b\x0a\x76\x15\x18\xf4\x57\xac\x80\xd9\x2c\x28\x2f\xdc\x9b\xdc\x81\xb5\x04\x2f\x13\x92\xb5\x28\xba\x2c\xcc\x2b\x61\x77\x25\xd5\xe1\x04\x32\x49\x0b\xa6\x33\x2e\x96\x13\xbf\xe9\xb8\x18\xdb\x79\x86\x14\x3a\x70\x5a\xb8\x03\x68\xcd\x47\x52\x59\xa2\xa9\xb1\xd7\xdd\xf9\x61\x2f\x27\x01\xdf\x74\x00\xad\x3c\xc2\xf7\x34\x99\x97\xbc\x30\x63\x2e\xc8\xeb\xd3\xd2\xac\x50\x58\xa9\x0e\x35\x93\x36\xda\xe2\x06\x5b\xd3\xab\xf3\x6a\xd7\x30\x33\x9d\x7e\x91\x94\x27\x54\x5e\x33\x75\xcd\xd9\xcd\xd4\xb9\x41\xc7\x76\x99\x63\x54\x08\x7a\x0a\xc6\xe7\xf4\x33\xf8\x5f\x44\xc7\x5e\xbe\x7e\xf6\xfa\x84\x9c\xe6\x39\x41\x4e\x28\x35\x5b\x94\x05\x9a\x29\x7a\x12\xb8\x08\x8f\xc0\x4d\x75\x44\x4a\x9e\xff\xe9\xde\xfe\x8a\x30\xa5\xa9\x40\x84\xf4\x6a\x27\x94\x3b\xa0\x91\x9e\xbd\xba\x40\x63\xd7\x39\x82\x13\x1a\x25\x22\x97\x9c\x42\xb0\xfc\x8d\x96\x70\xa9\x58\xbd\xeb\x17\x8c\x9a\x52\x31\x0d\x52\xfe\x3e\xf9\xce\x49\xc3\x97\x92\xe6\x4f\x69\x41\x45\xc6\xd4\x05\xbe\xb9\xcb\x71\xe1\x3d\x66\xe0\x30\xd7\x2b\xab\x3b\xe0\x58\xc1\x96\xdb\x23\x52\x2f\x83\x74\xcc\xe0\xd9\xab\x8b\x0e\x88\x8a\x65\x52\xe5\xda\xb9\xa0\x1d\xf8\xf3\x0a\xfa\x85\x03\x3e\xc1\xd9\x82\x06\x2d\x35\x78\x6d\xea\x3d\xd3\xa5\x4c\x77\x15\xf4\x51\xfb\x30\x5b\xed\x2b\xd4\x0b\x78\x13\x80\x6b\x88\x61\xd6\xe8\x40\x2f\xe0\x9c\x77\x35\xb2\x9b\xeb\xa5\xb7\x74\xc1\x1d\x80\x2b\xea\x9c\xab\x91\x84\x8b\x9c\x5f\xf3\xbc\xa4\x85\xbb\xd7\xd0\x86\x9a\x52\x33\x4d\xb4\x44\xb1\xc5\x44\x6e\xe5\x89\xd2\x4e\x02\x59\x8b\x5e\x81\xd5\x62\xa8\x5a\xb2\x2e\x33\xb3\xba\x69\xb1\x2c\x55\xe1\x79\x26\x3c\x9d\xc0\x1e\xb0\xe7\x4b\xc1\xff\x5e\x32\x42\xd7\xd2\xa2\xb5\x28\x76\xbd\x73\xba\x53\x5d\x8b\xdc\xdb\xc0\xa1\x2b\x14\x75\x94\x73\x70\x54\x77\x0f\xb0\x46\x80\x3a\x41\x8e\x68\x5e\x46\x3a\x7d\xd7\xf1\x96\x89\x95\x83\x13\x9c\x71\x87\xb8\x4a\xec\xcb\x38\x33\xf5\x6d\xc7\xf8\x93\xe1\xde\x72\x24\xed\x39\x52\x05\xd6\x85\x07\x0b\xd8\x40\x89\xe4\xee\x78\xf4\x91\x77\x1d\x16\x92\xe6\x64\xee\xf6\x61\x25\xe2\xad\x42\xec\x22\x01\x33\xd9\xa4\x61\x55\x35\x8c\x53\xab\xd4\xa8\x9d\xac\x14\x56\xea\x2a\xaa\x8d\x2a\xc1\xab\xbc\x37\x0d\x80\x1b\x27\xfe\xe2\xc8\x19\x88\xa7\x3f\x5f\x9c\xd4\x83\x3a\x04\x08\x39\x00\x83\xe6\xb9\xe3\xc3\x0e\xc8\x70\x0d\x7b\x68\x61\xfd\x56\x2a\x76\x32\x1c\x96\x7b\xb2\xcb\xe6\x25\xe4\xfb\xb3\xf3\xbd\x26\x56\x4f\xe3\xc5\xd3\x1f\xcf\x0a\x59\x76\x9e\xed\x67\xfb\x02\x3b\x2d\xf8\x9c\xce\x29\x02\x1c\x28\x60\x63\xb0\x5e\xf2\xf9\x35\x57\xc6\x2d\xeb\x07\xa9\xcd\x2b\x77\x49\x39\x13\xe4\x54\x6c\x1d\x3f\x79\x0a\x75\x49\x99\xed\xc6\x5a\x77\x18\x13\x60\x05\xe8\x2b\x29\xd8\x61\xc5\x2c\x46\x86\x50\x81\xab\xe2\xdb\xa0\x8b\xcf\x77\x24\xc1\xbe\xb6\xc5\xaa\x7e\xfd\x00\x13\x23\x18\x4d\x56\xb2\xc8\x35\xd9\x50\x45\xd7\xcc\x58\x11\xe9\x4d\xfc\x60\x45\x31\x9f\x75\x42\xa1\x4d\xc8\x39\x5e\x25\xa1\x47\x81\x2f\x00\x87\x76\x5b\x85\x98\xba\xa3\xeb\x6b\xa3\xa4\x91\x99\x2c\x06\xba\xbf\x46\x7e\x7c\xc4\xa7\xd3\x1d\x46\x11\x59\xf3\xed\x06\x7c\xbb\x5c\x64\x72\x6d\x17\x1c\xde\x73\x1b\x09\x9e\xf0\x4d\x61\x35\xc4\xe5\x19\xf8\x28\xe2\x66\xab\xff\x74\x4e\xa1\x7a\xd1\xf9\x9b\xd7\xbf\xfc\x47\xb5\x62\xe0\xad\xe6\x57\x09\xc8\xe1\x0d\x0b\xec\x8d\x86\x84\xf4\x17\x0d\xe5\x66\x23\x95\x21\xdc\xa0\xc9\x01\x17\x6c\x09\x7f\xb5\x9f\xb2\xbb\x4d\xa4\x79\x6e\x67\xce\xb4\x35\x59\xfd\x49\x23\xc0\xc9\x8d\xb5\x3a\x16\x52\xdd\x50\x95\xc7\xbc\x82\x1e\xa8\xd4\xac\x8d\xcf\x4e\xe4\x4c\x08\xf9\x33\xd8\x31\x80\x89\x04\xc8\x8a\xf0\xa8\x21\x74\x04\xd7\x70\xb4\x02\x24\xe0\x1c\x70\x69\x09\xb8\xf5\xa2\xb9\xd0\x86\xd1\x1c\x2f\x78\x2c\x08\xef\xbf\x6e\xa0\xfa\x9e\xf6\x8f\x24\x80\x72\x81\x8e\xad\x15\xa3\xb9\xa5\x8e\xb5\x11\x0a\xb9\xd4\x13\x52\x3b\xf3\x70\x21\xd5\xd2\x87\x30\x81\x14\x91\x55\x83\x05\x97\xd1\x72\x17\xe9\xf6\x64\x97\x9a\xe8\x82\x6c\x65\x09\xe7\x53\x2b\xb6\xbc\x49\xd9\x54\xbf\x30\x5b\xb0\xc2\x06\xcf\xd4\xbe\x19\x19\x65\x28\x17\x58\x1b\xd2\x9e\x86\x4e\xa6\xd3\x9b\x9b\x9b\xc9\x8a\x6e\x94\xbc\xdd\x4e\xa4\x5a\x4e\x73\x79\x23\xec\x8c\xa6\x0f\x27\x0f\xed\xd9\x68\x0a\x3f\x8d\xab\x8d\x64\xa2\xde\x45\xe2\xac\x59\x7b\xd8\x52\x6b\xbc\xa0\xa5\x73\x59\x9a\xae\xbd\x78\xd9\x70\x05\xa1\x73\x36\xc5\x38\x8a\x35\x3c\x29\xde\xf1\x7b\x52\x9b\xbd\x60\xfd\x39\xab\x6c\x42\xee\x93\xd9\xe8\xf2\xec\x7c\x36\xea\x96\x95\xf8\xb1\x83\x60\x6e\xce\xf7\xdb\x06\x13\xc8\xbd\xc0\xc6\x89\xdd\xc6\xd9\x8f\xf3\x63\x68\x90\x62\x56\x74\x74\x2d\x3f\x60\xcb\x00\x6c\x8a\x71\xe0\x04\x00\xa1\x06\x56\xd4\xac\xa8\x58\xb2\x88\xb7\x96\x24\x3d\xb6\x24\xe5\xb5\xc5\x1f\x2f\xcf\xce\x13\xbf\xa6\x05\xc7\xc0\x6b\xb4\xe8\x6d\x44\x11\x98\x2b\x03\xd4\x71\x38\x3c\xae\x8f\x1b\xdb\x2b\xc6\x0d\x31\xa5\xdb\x61\x40\xbd\x87\xf2\xbd\xe6\x39\x53\xe7\xd5\x14\xf7\x50\xc3\xad\x27\xdd\x6a\x7d\xdc\x43\x63\x89\x49\x49\x59\xef\x4c\x1f\x48\xe1\xc5\x44\x29\x72\xa6\x8a\x2d\xdc\x24\x37\xec\xf6\x6a\xe2\xa9\x9d\xd4\x7d\x00\xab\x7c\x48\x9b\x4d\xc1\x7d\xc8\x57\xf5\xe2\xdd\x65\xa5\x04\x0b\xf8\x4f\xf0\xb4\x8c\x51\x61\x4e\xb6\xa0\xcf\xda\xbf\x32\xb1\x2b\xfa\x29\x64\x3f\xf4\x26\xf9\x73\x9b\x34\xf4\x46\xfb\x75\xe8\xca\xed\xe0\x10\xec\x23\x3f\x12\x97\x49\xd5\x5b\x15\x6b\x10\xe4\xf4\xe7\x8b\x96\xcd\x11\x39\xe3\xf6\xc0\x4d\x13\xc0\x4e\x7e\x17\xaf\x7d\x53\xdd\x0f\xeb\x64\x30\xe6\x09\x5c\x47\x52\xad\x79\xf6\xb2\x57\x0e\x84\x9f\xd6\x65\xf9\x0e\x84\x9d\x6d\xd3\xa0\x53\x2f\x78\xd2\x16\x2a\x54\x00\x7d\xdc\x9b\x5a\xf2\xc5\x4b\x92\x01\x60\xdb\xb2\xe6\x0c\x01\xa6\xb6\x19\x7e\x7a\xa4\x68\xf8\x71\x67\xfd\xf7\x40\x68\x07\x84\x8f\x84\x50\x1f\x01\xfb\xc1\x10\xfa\xea\xe5\xd3\x0f\x8a\x4c\x18\xba\x1f\xf6\x46\x7e\x2e\x56\xc4\xc2\xdf\x72\xb1\xbb\xbb\x07\xac\x0d\x7c\x59\xda\x50\x61\x38\x5c\x80\x21\xda\x76\xa2\xcc\x41\x4c\xfc\x64\x0d\xa5\x01\x30\xdd\x95\x38\x55\xcc\x99\x51\xb3\x91\xe3\xc1\xd9\xe8\x84\x9c\x7a\x86\x04\x25\x48\x9e\xee\x31\x57\x6b\xd9\xac\xe9\x15\xd3\x70\x0f\x62\x55\x4a\xce\x32\x8e\x71\x4f\xd6\x16\xe7\x70\x84\xc4\xe3\x90\x51\x54\x68\x7b\x00\x18\x00\xb8\xa0\x5b\xa6\xc8\xc1\xe5\xd9\xf9\xf4\xe2\xe2\xe5\x21\x71\x1a\x1e\xa4\x9b\x8b\x08\x74\x43\xe0\x9a\xc9\xfe\xe7\xe2\xb0\x9f\x05\xd0\x1e\x26\xad\xeb\x49\x40\x71\x9e\x73\xbc\xa9\xf6\xd2\xd1\xb9\xa1\x48\x75\x99\x90\xcb\x4c\x4f\xe8\x8d\x9e\xd0\x35\xfd\x4d\x0a\xc8\xd0\x38\x85\x3f\x9f\x9f\x5d\x4c\x31\xbe\x76\x5a\xe5\x3a\x2c\x4b\x9e\xb3\xa9\x25\xfe\xd8\x13\x1f\x22\x85\xf4\x64\x65\xd6\xc5\x67\x59\x31\x1f\x30\x5b\x47\xab\x57\x2f\x9f\x22\x9d\xbc\x4b\xa2\x41\xa7\x80\x0a\x03\x40\x76\xd2\xc9\xe2\x83\xd4\x04\x6a\xa3\x7f\x28\x62\xff\xf1\x68\x15\xc5\x3c\xad\xa1\x48\xaf\xd5\xec\x3f\x63\xbf\x2b\x06\x8c\x7c\xf5\xf2\xe9\x40\xf1\x93\x30\x99\xfd\x27\x1d\x01\x54\xbf\xd4\x02\x4c\x0e\x19\x28\xf0\x96\xd9\x66\x1f\x43\x68\x99\x6d\x3e\x86\x21\xf4\xfd\xd9\xf9\x7f\x83\x21\x64\x27\xff\xcf\x66\x08\x71\x26\xcc\x69\x96\x31\xdd\x3b\xb6\x3b\x30\x0b\x9f\x0d\xb2\x7f\x56\xf2\xc6\xfb\x99\x28\xfc\x36\x60\xfb\x62\x4e\x8b\x51\x3c\x33\x2c\x77\x87\x7b\xe7\x25\xee\xa0\xca\x5d\xf4\x8e\x15\x64\xdf\x17\x72\x4e\x0b\x2b\xcb\x2e\x30\x10\x05\x03\xa4\xab\x77\x0d\xd1\x0c\x0d\x1f\x0a\xf8\xec\x10\x6a\x73\xc9\x3e\x64\x79\xc8\xda\x7d\x9c\x01\x44\xee\x53\xb1\x25\x8a\x2d\x2d\x3b\xbb\x54\x13\x2b\xd5\x7e\x3a\x3f\x1b\xee\xfb\xf3\x9f\x2a\xde\x02\xe6\x4c\x02\x4b\xa7\x25\x02\x21\x57\x6f\xb2\x94\x72\x59\x30\x90\x7f\x41\x88\x29\x13\x4b\x2e\x18\xde\x5d\xaf\xe4\xcd\xd8\xc8\xa9\xc7\xd6\x38\x10\x83\x5c\x2c\x3f\x5b\x02\x1e\xde\x0d\xa6\xb9\xd3\x2e\x2f\x65\x96\xa2\xc9\xde\x06\x0c\x2c\x1b\x80\xee\x90\x04\xe3\x2d\x30\x82\xce\x21\x1d\x91\x3c\xc8\xd6\x60\x44\xd3\x35\xf3\xd4\x81\x30\x9a\x9f\xce\xcf\x0e\x09\x45\xa3\x6b\x47\x82\x0c\x41\x01\xba\x80\x03\xba\xb6\xa8\x56\xd3\xec\x95\x1c\x44\x74\xe7\xec\xe1\xda\xdb\x82\xfe\x0a\xc4\xa7\x2c\xf4\x53\xbf\x49\x56\x24\xbd\xa7\xc6\x67\x88\x37\x47\xe4\x0f\xa7\xeb\x70\x0f\x0d\x18\x08\x84\xfd\x70\xca\x6e\xa0\x8e\xea\x37\xc8\x1b\x92\x31\xb4\xc5\xfb\xdd\x1d\x3d\xab\xe9\x76\x2f\x91\x53\x4c\x6c\x08\x64\x1c\x19\x9d\xfe\x7c\x31\x3a\x22\x23\xb8\x6e\x1c\xa5\xbc\x87\xf6\x33\x7a\x4a\x15\xfb\x91\x19\x5a\xd8\x67\xbe\x3f\x3b\xb7\xff\x83\xa0\x1a\x43\xb3\xab\x11\x66\x63\x8d\x7e\xba\xd8\xac\x98\x62\x49\xef\xe6\x10\x32\x8f\xed\x61\xa4\x6f\x84\x9d\x77\xcf\x98\x6a\xd2\x3d\xe3\xbe\x4f\x78\x17\x89\x0b\x0d\x75\x4b\xed\x19\xe7\x30\xd0\x33\xea\xc5\xd3\x1f\x07\x30\x5a\x0f\x47\xf6\x9b\x5e\x3d\x66\xd7\x00\x76\xc6\xdc\xe0\x61\x0e\x41\x18\x0b\xe1\x1b\x18\xfa\x0b\x32\x10\xbe\xa3\xc6\x79\xa0\x77\x38\x33\xed\x5d\xc6\x4c\xd7\x7c\x42\xce\x5d\x2a\x67\x83\x7f\xfd\xcd\xf0\x08\x79\xef\x85\x13\x3a\x09\xe6\xeb\xf3\x40\xbf\xe8\xd3\xec\xe3\xd4\x05\x3e\x7e\xde\x3b\x66\x1e\x50\x76\x17\x37\xb5\x90\x39\x3b\x97\xdd\x51\xaa\x6d\xef\x89\x1b\x1a\x77\x4f\xbf\x72\x23\xe2\xd7\xf3\xe4\xce\x57\xc6\x2d\xd8\x9f\xae\x8d\xc9\xa7\x6b\xe3\x00\xe8\xa7\x6b\xe3\x4f\xd7\xc6\x9f\xae\x8d\x3f\x5d\x1b\xbb\xcf\xff\xd8\x6b\xe3\x8d\xe2\xd7\x9d\xe9\x38\xa4\xad\x9a\xdc\xc8\xb8\x36\x3e\x77\x03\xbc\xbe\x8d\x69\xc4\x7d\xb4\xb0\x83\xd9\xcd\x51\x3d\x4b\x8b\x9f\x70\xe2\xd7\x0c\x1d\x93\x73\xaa\x72\x82\x2e\x9a\xf4\xbe\xa9\xaf\x03\xba\xe2\x0e\xad\x3e\x74\x61\xc5\x7a\x78\x84\x38\x81\xe0\x6c\x94\x5b\x41\x79\x98\xf0\x05\xc4\x1b\x29\xe0\xdf\x13\x61\x92\xa8\x73\x24\xc6\x36\x50\x2c\xaf\xa6\x2e\x09\x03\x72\xd1\x25\x21\xd5\x01\xb0\x76\xc3\x47\x40\x9e\x76\x4e\xad\x55\x5a\x26\x8c\xc5\xad\x5f\x16\x03\x39\x13\x56\x7c\x0e\x4b\x42\xe8\x2a\xc7\xe2\xbe\x9b\x7e\x66\xa5\x6c\x8f\x55\x8f\x1e\xd2\x3a\x32\x1e\xa2\x74\x83\x0c\x9e\x3a\x36\xbb\x72\x8c\xfa\xca\x40\x9d\x59\x2d\xf6\x83\xe6\xa7\x93\x3d\x5d\xd8\xb9\xa7\xeb\xe0\x6f\x41\xd7\x6c\x12\x86\x80\xc7\x79\xad\x7a\x33\x6e\x1b\x01\x4f\xfd\x26\x05\xd3\xae\x3a\x0a\xe4\x22\xe4\x62\xdf\x18\x6e\x52\xc5\x71\x63\x9d\x9d\xbf\x48\x81\x09\x39\xee\x4b\xdc\x94\xf6\x5b\xac\x44\x14\xe2\x06\xa7\xb4\x8e\xdf\x3a\x5a\x46\x28\x95\x62\xc2\x14\x5b\x6f\xd1\xf9\x25\x38\x03\xe0\xf4\xe7\x8b\x23\x3c\x3a\xe3\x91\xfd\xfb\xb3\xf3\x2a\x52\xb6\xa3\xce\x42\x45\xb8\xfb\xed\x40\xdb\xbe\xed\x66\x5f\x68\x8f\x19\x51\xba\x29\xa3\x7d\xe0\x7e\x37\x00\xae\x1d\xff\xfa\x50\xfa\x8f\xb0\xfb\x56\x52\x9b\x60\xe3\x1d\x91\xb9\x2c\x45\xcc\x59\x6c\x24\xac\xc8\xcd\xfd\xeb\x07\x80\xc0\xc7\x8f\x1f\x61\x29\xaa\x52\xe3\x9c\x15\xd3\x1b\xe9\xea\x0d\x41\xf6\xab\x9b\x6e\xdc\xec\xa5\xa2\x66\xd1\x96\xf9\xd4\x19\x5a\xdf\x2f\xd4\xae\x39\x85\x07\xea\xe9\xba\x44\x0c\xaf\x48\x66\x82\x3c\x93\x4c\x83\xe5\x36\x2c\x78\x9f\x34\x02\xf8\x13\x04\xd9\x6f\xa6\x43\xc4\xe1\x91\x4f\xe5\xb1\xb3\x65\xb7\x9b\x82\x67\xdc\x74\xe6\xa3\x91\x5a\x09\xba\x4c\x48\x24\x8b\x2b\x16\x21\x4a\xc8\x16\x4b\xa3\x34\xb9\x09\x5a\x47\xd7\x41\x1b\x01\x34\x4c\x8c\xf4\xa1\xde\xf1\xc0\x07\xe8\x9c\x8e\x3a\x32\xfe\x73\x57\x9d\x43\x4e\xab\x09\x44\x20\xef\xa5\x6f\x30\xdb\xd6\x5f\x65\xf5\x4a\x02\x2b\x72\xf3\xad\xa0\x6b\x97\xd1\x47\x8b\x42\x66\xf0\x8a\xf9\xb6\x4e\xde\xfb\x96\xac\xe4\x0d\xbb\x66\xea\xc8\xbe\xd9\x49\xb8\x98\xeb\xca\x50\xc3\x33\x00\xef\xa1\x41\xee\x08\x72\x04\x9a\x9d\xd5\xb1\xa2\xda\x28\xc9\xc4\x7d\x97\xc5\xe7\x55\x43\x9b\x5e\xa8\xb5\xa0\xe6\x92\xba\xee\x8c\xfa\x27\x3d\x89\xeb\xc3\x12\x23\xec\xc8\xbe\x70\xfe\xb1\xdf\xea\x91\x5f\x87\xf8\x77\xee\x9c\xf1\xde\xe9\x75\x4c\x98\x94\xd6\xf8\x38\x93\x6b\x5f\x85\xa6\x27\x27\xa9\x35\xba\xaa\x56\x46\xc9\x06\x4b\x3d\x58\xa9\x0b\x67\x68\x5f\x56\x2f\xab\x47\x77\xed\xee\xa7\xdb\xea\x30\x64\xe9\xab\x18\xca\x1a\x84\x91\x7c\x36\xed\xa4\x5a\xf3\x35\xbb\xdc\x6e\x62\x1e\xac\xa6\xa9\x5c\x0d\xc6\xd4\x47\x5f\x66\xe8\xc7\x17\x3f\x3e\x77\xf9\x2a\x61\x29\x94\x15\xbd\x8e\xed\xa7\x60\xc2\xf5\xbd\xf2\x65\x55\xbc\xc7\x79\x83\xdc\x3d\x35\x17\x55\xf5\x26\x2c\x80\xb3\x87\x50\xf1\xfa\xc3\xbe\x64\xdb\xc0\x93\x2f\x58\x71\x62\xff\x1b\x93\x7c\x45\x11\x2e\x6e\xce\x04\x5b\x70\x83\xd7\x9a\x01\xac\x23\x32\x2f\x0d\xf9\xe1\xf4\xdc\x9e\xdb\x71\x83\x69\xc3\x8b\x98\x03\xa1\xd4\x41\x1d\x44\xd8\xdc\x6a\xeb\x1c\x5a\x00\xd1\x1e\x80\xb8\xc0\x5b\x15\x10\x5f\x13\x42\xbe\x4f\x24\x38\x13\x2c\x03\x46\x51\x0d\x19\x76\x6b\xc8\xc1\xca\xac\x8b\x23\x92\x69\x7d\x44\xfe\xa6\x8f\x20\xf1\xec\x90\xa0\x93\x20\xb1\x8e\x08\x70\xbb\x3a\xff\x2c\x10\x18\xd2\xc4\x0b\xc5\x68\x5e\xe3\x94\xe5\xe4\x80\xaf\xe9\xd2\xda\x6c\x65\xce\xe5\x11\xb9\xe6\x39\x93\x31\x98\x38\x23\x3f\x93\x82\x1b\x53\x58\xc5\x44\xd8\x2d\x4a\xbd\xea\x58\x69\xf8\x1a\x8d\xcf\x6c\x53\xda\x65\x46\x0d\x4b\x48\xeb\x76\x6b\xb1\xa7\xa5\x25\xe5\x62\x52\xf9\x5d\xec\xb9\xe1\x6f\x92\xad\xa4\x30\x52\x4c\xd6\x2c\xe7\xe5\x1a\xee\x06\xcd\x8a\x8d\x97\xbf\xf1\xcd\x78\xc3\x04\x2d\xcc\x76\x9c\x3f\x3a\x9e\xe7\x4f\xbe\xf9\x6a\x71\x4c\x1f\x76\x4b\xc7\x64\x3d\xa4\xe6\x8e\x09\x64\x80\x65\x24\xbb\x79\x1a\x95\x0b\x11\xad\x50\x8a\x90\x60\x85\xd7\xe8\x51\xbf\x62\xc4\x09\x79\x3e\x59\x4e\xc8\x6c\x64\x49\x3d\xcd\xb4\xfe\xd6\x6a\x0a\xa5\x99\xf9\x63\x69\x16\xe3\xaf\x67\xa3\x23\xff\xa3\x65\x83\x59\xe2\xde\xcc\x0d\xbb\x8f\x8f\x00\xf9\xa6\xfa\x7a\xf9\xc5\x2d\x3c\x46\x66\xa3\x20\x3e\x6c\x2a\x33\xc3\xcc\x58\x1b\xc5\xe8\x3a\x0d\xf4\x97\x71\x06\xc5\x7c\xa7\xf8\x3f\x5d\xce\x2d\x34\x9f\xfd\x78\x59\xaf\xdb\x49\x0a\x74\x4b\x25\xb6\x36\x21\x67\x58\x94\x62\x5c\x63\x10\xc2\xa0\x2c\xcb\xbc\xf9\xee\x8c\x1c\x3f\x7a\x7c\x7c\xd2\x1c\x74\xf2\xc7\xf4\xcd\xd2\x6c\x34\x9d\x8d\x88\x2e\xe7\xe0\x00\xb8\xff\x76\x36\xfa\x76\x36\xaa\x1d\x1b\xbf\x92\x31\xcc\x14\xdd\x03\xa2\x01\x3b\x0a\xd3\x09\x2e\x7b\x5e\x92\x8b\x13\x28\x6b\x59\xe1\xaf\xda\x17\x6e\x97\xac\x99\xd6\xf8\x07\xa4\x7a\x53\x95\x70\x73\x59\x2a\xf9\x0d\x45\xa0\x1c\x27\x62\xd6\xa9\x2c\xab\xcf\x33\x96\xa3\x21\x62\xd1\x3f\xc3\xfb\x26\xc4\x6b\xb4\x5c\x0e\xb1\xc3\x29\x31\xf2\x8a\x41\x81\x84\xea\xd0\xc8\x0a\x79\x33\xf1\xeb\x87\x5f\xc3\x8a\x8f\x96\x59\xe3\xbe\xb3\xba\x0a\x9a\x65\x4a\x9a\x19\x6b\x08\xd9\xc9\x40\xf1\x9d\xba\xe0\xd6\xcd\x8a\x1b\x06\x92\x8b\x66\xec\xc8\x0b\xed\x38\x66\x3d\x30\x8d\x08\x10\x5b\x6f\xe9\xd4\xbf\x10\x17\x5b\x62\xc0\xa4\xa3\x05\x9c\xde\xe3\xde\x48\xb7\xbe\x70\xb0\x9f\xa0\x6e\x43\x3e\x38\xfc\xd7\x7f\xfb\xbf\x8e\xbe\x3d\x99\xcd\x66\xa3\xe9\xdb\x5f\xff\x34\xf9\x63\x0f\x58\xcf\x57\x2d\xce\x01\x44\x16\x5a\x7a\xb4\x7b\x2c\xcb\x54\x55\x1f\x12\xc6\xfd\x4e\x75\xe0\xc5\x35\xc1\x9b\xc0\x3a\x75\x14\xa4\xfa\x04\xce\x82\x57\x2c\x1e\x08\x32\x1b\xfd\x71\x36\x22\x07\x48\xdf\x29\xf9\x7b\x29\x0d\xcb\xc7\x48\xe3\x43\x37\xad\xc6\x97\x47\x01\x8f\x44\xa1\xba\xfd\xf8\xf5\xc3\x87\x47\xe8\x44\x55\xca\x9e\x56\x9d\x2b\x42\x96\xf6\xc4\x09\x50\x75\x9d\x92\xee\xb1\x1e\x05\x8a\xbc\x02\x9c\x42\x36\x45\xa9\x81\xfc\x15\x75\xc8\xf3\x5f\xce\x9e\x9f\x5f\x92\xd9\xcc\x4a\x2c\x64\xb6\xb3\x37\x13\x42\xc8\x8b\x38\x9b\xda\xd7\x02\x21\x7c\x81\x1d\x0b\x12\x25\x30\x39\xbd\x38\x7b\xf1\x22\x80\x4f\xb1\x8c\xad\xfd\x3f\x8f\x43\xe4\x9a\x30\x9d\xd1\x8d\xdb\x85\xb3\xa8\xbb\xd8\x55\x07\x3c\x21\x7f\x3d\xf8\x13\x3f\x3c\xb8\x1d\xbf\xfd\xeb\xaf\x6f\xc9\xc1\xe1\x6c\x86\x3c\x36\x9a\xfe\x69\xf2\xc7\xd9\xed\x83\x07\xe3\xd9\xed\xf1\x77\xb3\xdb\xaf\xbe\xfb\xf5\x8b\xdf\x03\x41\xf2\x3b\xc8\x91\xdf\x41\x8c\xfc\xee\xa4\xc8\xef\x95\x10\xf9\xdd\xca\x8a\xdf\x41\x54\x1c\x4e\x2d\xe4\xe8\x84\xd3\x6f\x3c\xf8\x96\xdc\x1f\x30\xb1\x3f\x1e\xbc\xc7\x3b\x7e\x1f\x1d\xcc\x66\x6f\xdd\x97\x5f\x7d\xf7\xeb\xef\x6f\xff\x3a\xbb\x7d\xf0\x6c\x34\x9b\xfd\x7a\x78\x7f\x74\x78\x78\xff\x5f\x62\x5e\x8e\x8f\x55\x74\x50\x77\x54\x66\xe8\x39\x14\x3c\xb7\x56\xea\x1b\x57\xcb\x32\x56\x03\xce\xd9\xee\x27\xe4\x0d\x78\x5e\x76\xf7\x4f\xd3\x6c\x88\xc0\x6d\x85\x24\x3a\xdb\xbf\xba\x7f\xea\x58\x69\x5d\x95\x70\x45\x45\x5e\xb0\x1c\xbc\xeb\x8d\x9a\x8f\x60\x61\x69\x22\x4b\x43\xe6\x6c\x21\xad\x69\xe7\x4b\x73\x76\xd7\x28\xb2\xba\x86\xdb\xb3\x63\x3b\x62\xa7\x79\x51\x04\x02\x69\x36\x72\x0b\x76\x1a\x69\x36\x7a\xb1\x14\x52\xb1\xee\x5b\xa2\x99\x2f\xbd\x89\x00\xd0\xfd\x6a\x4d\xe3\x00\x4c\xcc\x77\x43\x34\x13\x9d\xce\x52\xea\x2e\x2b\x1f\x3f\x78\x60\x35\xc7\xe3\x07\x5f\x7b\xf7\x17\x3b\x82\x5b\xcb\x36\x3e\x0e\xf8\xc2\x07\xd8\x15\x72\xb9\xec\x66\x31\xbb\xdb\xb1\x10\xd3\x21\x4a\x9c\x4c\x96\xc2\xec\xc0\x72\x1a\x09\x3a\x14\x6c\x14\xe4\x1f\xac\x99\x51\x3c\xeb\xf4\x63\x26\xd6\xef\xd1\x16\x5d\x7e\x56\x48\xdd\x79\x31\xd1\x9a\x91\xbf\x22\xb3\xe8\xc2\x5b\x85\x06\x36\x96\x5e\xad\xd4\x8f\x74\x99\x25\x70\xe9\x98\x29\x70\xf4\xda\x27\xfc\xa2\xb0\x7e\xa5\x8f\xd0\xab\xca\xa9\x04\xf4\x43\x13\x70\xbb\x41\x1f\x4a\x17\x6c\xb3\x62\xad\x8b\xde\x4c\xae\x19\x9e\x56\x9a\x61\x01\xf7\xc8\x8a\xd1\xc2\xac\xec\x19\xd7\x6e\x0a\xa9\xc8\xcf\xac\x2b\x0d\xc0\x57\xb8\xba\x07\xde\x9e\xb2\xa0\x86\x5f\x37\x5f\x71\x30\x1b\x59\x23\x0a\xbf\x99\x8d\x0e\x2b\xdd\x34\x67\x44\xd3\x05\xeb\x3c\x79\x71\x20\x4a\x3e\x21\xe4\x87\xca\xe5\x03\x93\xaf\xea\x4f\xaf\xe9\x16\xd5\xcc\x9c\xe1\x05\x36\x28\x07\x11\x75\x8a\x30\xa5\xa4\x35\x71\xa0\x04\x98\xf4\xf1\xdf\xe1\xae\x6a\x70\x03\x96\xca\x5e\x6f\x58\x0e\x15\xdf\x71\x31\x9d\x7b\x20\x27\x39\xa7\x4b\x21\x35\x87\xf0\x07\x8b\xb0\x82\xad\x2d\xc1\x5e\x88\x2a\xa9\xa0\x73\xfa\xd5\xcc\xbb\xf0\xba\x45\x87\x94\xce\xa8\xd0\xed\x93\xba\xe7\x26\xbc\xdf\x6d\xa2\x84\xe7\x76\xb0\x9b\x72\xf7\x06\x13\x46\x95\xe8\x20\x30\xc6\x82\xe8\x8a\x10\xef\xf6\x53\x8d\xa3\x52\x76\x4c\x10\x73\x11\x19\xdf\xa9\x50\x40\x16\x5b\xb2\x9c\xc9\x9c\x9d\xd3\x65\x97\xab\x64\xc7\xf7\xd3\x7c\x20\x52\x7e\x16\x02\x5d\xd0\x90\x8f\xb1\x02\xd9\xd8\xe7\x27\xfb\x56\x85\x8d\x1c\xa5\x12\x15\x61\xd1\xfb\x52\xc3\x08\x7c\x38\x58\x6d\x30\x0a\xd4\x1d\xe5\x46\x30\xdf\xb1\x9d\xef\xf8\x5f\x71\xee\x99\xcc\xd9\xbf\x4d\x2c\x3a\x46\x47\xee\x1a\x25\xfc\x05\x6c\xe1\x2e\x6e\x05\x71\x5d\x0f\x9c\x40\xd1\x2b\x76\x4b\xd7\x9b\x82\x1d\x35\x5e\xf4\xe5\x83\x47\x08\xbf\x76\xb0\x09\x7c\xb2\xeb\xae\x9b\x3a\xb7\x02\xc0\xff\xf2\xc1\xa3\x4a\xf2\xe9\x09\x39\xab\xee\xa4\xe0\x26\x2a\x40\x3e\x3c\xf2\xe5\x83\x47\x91\x6d\xf5\xf8\xc1\xe3\x1a\x8e\x97\x18\x48\x54\xfe\x9b\x95\x0d\xcf\x69\xb6\xf2\x92\x50\x78\xd1\x8a\x58\x4e\x2a\x6c\xd0\x03\x65\x51\x04\xf2\xb9\x2e\x06\x13\x86\xdf\x4c\xc8\xf3\xe5\xb8\x72\x72\x28\x7a\x33\xc1\x8e\x37\xa5\x66\xca\xd5\x72\x6c\x35\xbf\x81\x12\x7b\x6a\xba\xa0\x39\x9d\x3f\xfe\xf2\xab\xc7\x5f\xd1\x6f\xe6\x8f\x1e\x64\xd9\xa3\xc5\x03\xfa\x78\xfe\xf8\x98\xe6\x0f\xbf\xfe\xea\x78\xf1\xcd\x97\x5f\x3f\xfc\x8a\x7e\xf3\x68\x0a\xa6\xa6\xf6\x8f\xb9\xd8\x95\xa9\x5d\xc6\xb4\x83\x18\x1d\xab\xf2\xa5\xb9\x2b\xbd\x56\x17\xd4\xee\xbe\x8d\x68\xc6\x7f\xf4\xec\x8e\x3d\x5d\x9e\x03\x8b\xe2\x7d\xb0\x0a\xc6\xb1\xea\xc5\x77\x76\x5d\x77\x56\x2c\xee\xb1\x52\x7f\x40\x5e\xe9\x2b\xa5\x15\x0c\xad\xb6\x54\xdb\x63\x5d\xb1\x1d\xde\x95\x77\xf4\x76\x20\x55\x3f\x92\x26\xa9\x1b\x76\x81\xae\xfb\x99\xec\x5b\x7a\xc8\xc5\x5e\xb1\x1c\xa7\x3a\xbc\xf0\x72\xe7\x83\xcd\x58\x4b\x01\xbb\x7a\x95\x74\x47\xed\x46\xa8\x69\xe6\x6c\xbf\xef\xfc\x1b\x8e\xc8\x2f\xe3\xea\x1f\xf6\xaf\xe6\x17\x3f\x48\x1d\xf5\xf7\x84\xe3\xce\xa5\x32\xcd\x27\xcf\x95\x34\x12\x2d\x84\x9d\xaf\xc7\xae\x40\x62\x04\x70\x93\x7c\x41\xc5\x3b\xa7\xe7\xd1\x6f\xd5\xcc\x3a\xac\xf2\x5b\x4f\x37\x1b\x06\x06\x78\x04\x38\x6a\xfe\x30\x7e\xcb\x45\x3c\xec\x62\x8b\x02\x28\xed\xb2\x24\xdd\x84\x62\x70\xdd\x5d\x15\x58\x12\xb7\x58\x2e\xbf\xc1\x82\xf7\xc1\xba\xdc\x14\x34\x03\xf3\xb8\x35\x8d\xe8\xd5\x7b\x74\x72\x15\x29\xeb\x99\x11\x05\xf0\x31\x8f\x27\xe6\x82\xaf\x26\x57\xd1\xc4\xda\xa3\x21\x89\xee\xef\xcc\xfb\xc5\xe2\x95\x14\xec\x83\xe0\x74\x77\xda\xb1\x6b\x12\x20\xf0\xb6\x0a\x8a\xf4\x8e\x7c\xcd\x4c\x35\xaf\x57\xd6\x98\xfd\x20\xd8\xc4\x36\x47\x5d\x38\xad\xe9\x1a\x0d\x39\xd8\x76\x53\xbc\x75\x17\xe6\x05\x14\x9c\x33\x3c\x97\xde\xe1\x62\x13\x1f\x8d\xfc\xe8\x38\x2c\xf2\x2b\xd2\x31\x76\x8d\x69\x51\xb0\xbf\x06\xf0\xa8\x7a\x45\xd7\xec\x8c\x6a\x76\x9a\xff\xad\xd4\xc6\x1e\xb8\x06\xdd\xd5\x45\x1f\x0e\xd8\x09\xcc\x74\x5a\xff\x94\x22\xaf\xb3\x6c\xdc\x6d\x1d\xd4\x9f\xab\xa5\x09\x1a\x92\x13\x82\xc6\x4e\x0d\xb1\xdb\x81\x40\x5c\xdf\x1a\x28\x97\x0b\x9d\xba\xc4\x0e\xb0\x3a\x6b\xcc\x57\x6a\xc9\xe8\x86\x1b\x5a\xf0\xdf\x68\xbc\x53\x85\x3d\x42\x37\x8c\x45\x5d\x67\xe0\xcd\x46\x2d\x81\x3c\x1b\x35\x12\x41\x92\x65\xeb\x67\xa3\xdb\x71\xa5\x3c\xec\x5f\xb3\x51\x63\xca\xb5\xd9\x86\x8b\x47\x0c\x25\x6e\x40\x83\x86\x3c\xbb\x6b\x73\xb7\x24\x4d\xe2\xc0\x86\xb5\xc6\x69\x6c\xbb\xd4\x94\xa9\x8a\xd9\x1f\xb5\x0b\xe7\x1f\xb9\x1a\xf9\xe3\x66\xf3\x9a\x98\xfc\xb1\x83\xc1\x1e\xbe\xa9\x0b\xb9\x42\x7d\x80\x63\x98\xe3\x77\x75\x63\x9e\x7a\x5f\x9b\xf6\xc4\x63\xb0\x55\xcd\x4d\xf0\x0e\x68\xf3\x83\x45\xad\x81\xe3\xe0\xec\x61\xf1\xe4\x83\xa5\xd1\xfe\x6c\x46\xb1\xad\x8e\xc7\xf8\xaa\xb1\x65\xe6\x3f\x1a\x55\xc6\x9b\x9e\xf8\xd6\x91\x13\xe2\x66\x8e\x76\x75\x62\xea\xe1\x24\x63\x84\x84\x46\x79\xc8\x0c\xc1\x79\xa2\xb6\x8d\x76\x2c\x21\x21\x3d\xd2\x22\x20\xab\xb0\x7a\x55\xb3\x53\x44\x9e\x0d\xbf\x8a\xdc\x11\xcd\x76\xc6\x3f\x44\x84\x44\x7c\xdf\x22\xe7\x7a\x33\xb8\xb5\x71\x0f\x16\x8d\x83\x5a\x6b\xbf\x8d\xba\xca\x7f\xba\x65\x88\xd4\x56\x6f\x58\x2a\xae\x08\xae\xeb\x30\x14\x85\xb8\x23\x4f\x82\xfb\x2d\x77\x47\xf1\xf0\xc9\xf1\x13\xa2\x9d\x5b\xec\xf1\xe4\x61\x4c\xa4\xac\xe9\xed\x4b\x26\x96\x66\x75\x42\x8e\x1f\x3c\x7c\x1c\x1b\xc5\x85\x1f\xf5\xa0\xd7\xf1\xff\x2f\xbf\xff\xf5\xed\xf8\xff\xf8\xec\x5f\x3e\xff\x3f\xef\xdd\xff\x62\xf2\x60\xfc\xcd\xe9\xf8\x2f\x7f\x7d\xf7\xff\xd0\xf1\x6f\xbf\xff\xbf\xbf\x7e\x71\x67\xaf\xb7\x28\x8b\x82\xce\x0b\x76\x42\xa2\xfb\x20\xed\x18\xc7\xd2\xc2\x2f\x22\x79\x5f\x4d\x05\xe3\xc7\x06\x4e\xe9\x66\xf1\x82\x45\x78\x51\x69\xe9\x91\x64\xf7\xda\x88\xe8\xf2\xed\xa2\x78\xe5\x02\x22\xf2\xb9\x80\x92\x2e\x98\xd6\x94\x32\x75\xeb\x0e\x53\xb5\xc3\x11\xf7\xa4\x7b\x67\x10\xb3\xd6\x2b\xb1\xa9\xe3\x40\x5f\x95\xde\x95\x61\x76\xc1\x59\xe1\xfb\x1c\xc3\xba\x77\x64\x9d\x3e\x0e\xfb\xf1\x09\x4f\xf3\x6d\x78\x4f\xac\x9d\x73\xb5\x28\x73\xe4\xd5\xda\x17\xed\x52\x5e\x32\x5e\x70\x13\x4f\x7b\x32\x0a\x4d\xd5\xa0\x48\x75\x0b\x1b\x71\xe1\x14\x01\x29\x24\xd1\x65\xb6\x0a\xd0\x86\xa4\x80\x09\x1a\x59\x83\xee\x96\x52\xfd\x79\x72\xe8\x44\x1a\x98\x25\x77\xcf\x47\x0f\x34\x72\x45\xdc\x97\x75\xbb\x08\x9c\x60\x14\x64\x43\x46\xdc\xbb\xa7\x91\xbc\x93\xa0\x35\x0e\x59\x51\x88\xb1\x62\x8b\x05\x36\x6f\x2c\x2c\x1d\xa2\xfd\x82\xf0\xc3\x6b\x03\x2b\x77\x8a\x06\xf8\x03\xa3\x83\xc6\x58\xe1\x79\x70\xe8\x29\x40\xb4\xe2\x74\xed\x9b\x58\x3a\x06\x76\xab\xad\x3c\x25\x90\xc8\x8f\xad\xd0\x5c\xfa\x7e\xdf\xc2\x0b\xb9\x74\x50\xea\x96\x67\xbd\x87\xf6\xf4\xc2\x47\x9f\xff\xe7\x17\xbf\xfc\x97\x9c\xcd\xc8\xe7\x19\x3f\xf9\x3c\xdb\xbc\xfb\x7c\xc1\x4f\x3e\x5f\x6c\xde\x7d\x7e\xa9\xdf\x7d\xae\xcc\xc9\xe7\x1b\x9e\x8f\xbe\x25\xda\xd5\xb6\xc9\xa4\x42\x9d\xd9\x93\x70\xe7\x63\xab\x72\x99\x95\x15\x26\x4e\x7c\x42\x55\x36\x97\xc2\x30\xe7\xf5\x02\xa3\x00\x0d\x86\x71\x9e\x49\x71\x3d\x7d\x38\x79\x30\x6d\x48\x26\xac\x6c\xf3\xf5\xe4\xe1\xe4\x51\x77\xf7\x00\x32\x58\xea\x0f\x92\xfb\x8d\x2b\xdf\xcf\x0f\x3e\xff\xfd\x60\xf6\x9f\x6f\xc7\x5f\xfc\xfa\xa7\xb7\xff\xf7\x2f\xcf\x7f\x3d\x38\xaa\xff\x3e\xbc\x3f\xfb\xaf\xc3\x3f\x1d\xbc\x3d\x1d\xff\x85\x8e\x7f\xfb\xf5\x8b\xdf\x67\x6f\xdf\x3a\xf5\xf0\x0e\xbe\x38\x98\x1d\xbc\xfd\xeb\xe1\xaf\x5f\xcc\x0e\x0f\xff\x74\x70\x14\xff\xed\xf0\xfe\xec\xd7\xc3\xc3\xdf\xdf\xfe\xf5\xf3\xb7\x27\x99\x30\xaa\x38\xf9\xf5\xd7\xf8\x75\xea\xa0\x64\xee\x7d\x9b\x7f\x35\x77\x67\xe8\x38\x6b\x9b\x0c\x09\xc2\xd7\xc6\x04\x8a\xdb\x31\xcf\x47\x87\x03\x54\x45\x8a\x4f\x03\x25\xd2\x92\x8b\x18\x9a\x64\xf7\x80\x17\x07\xb5\xbd\x91\x80\x88\x69\x72\x77\xb1\x38\xec\xc6\x4b\x00\xee\xda\x92\x42\x76\x48\xe1\x78\x90\xcb\x47\x62\xe3\xbb\x19\x30\xef\x9b\xaf\x96\xf8\xd1\x5d\x36\xf5\xf8\x37\xfd\x95\x54\xe5\xda\x6c\xa6\xb1\xdd\x34\x1b\xf9\xd9\xd1\xdd\x7d\x7f\x56\x4c\xb1\x49\xdc\xad\x8d\xdd\xd4\xb1\x4a\x17\xa8\x6d\x6b\xc9\xfb\xbe\x3d\xf3\xb2\xcb\xee\x0f\x95\x3c\x44\xf2\x70\x0d\xc3\xf7\x6e\x2b\x9a\xa8\x7b\xd4\xaa\xbf\xd9\x55\xe5\xc8\x54\x9d\x75\xeb\x0b\xbb\xe4\x8e\xaa\xd0\x94\x3c\xf2\x34\xaf\xd3\x21\x6b\xc6\xaf\xef\xae\x16\x43\x0e\xdd\x60\x51\x11\x0c\x93\x4a\xc1\x13\x76\x06\x78\x21\x15\xe2\x7d\x29\xe3\xfb\x68\x58\xfd\xa9\x2a\x69\x61\x8f\xd2\x2a\x75\xa2\x43\x34\xb5\xf2\xcc\x0f\x49\x42\x25\x01\x7b\x57\x2b\x8d\xe7\x56\x56\x40\xd3\x85\x50\x06\xd6\x93\xd1\x5b\x5d\xc8\x8e\xed\x17\x5d\x36\x3e\xd0\xbd\x66\xea\x7f\xf5\xb9\xa4\x13\x32\xb0\x96\x65\x7b\x91\x17\x00\x27\xbd\x42\x29\xd8\xeb\x45\x5f\xad\x97\xe1\xf5\xc7\xaa\x9c\xfb\xfe\x81\x95\xdd\x4b\xf8\xe6\x3a\x2e\x9b\xff\xbb\x66\xf0\x24\x39\x7c\xf8\xfb\x07\xbf\xbd\xd9\xd7\x18\x9f\xf2\x57\x6e\x2f\xce\xab\x6f\x9c\xd9\x80\x3c\x31\x60\x49\x41\x9f\x70\x6a\x7c\xb8\x12\xec\x71\x1f\x74\x1b\xcd\x22\xac\x3f\x83\x0b\x2e\x11\x7f\x22\x8b\xdc\x40\x45\x17\x3c\xf2\xcf\xb5\x4c\x25\xc7\xfb\xfe\xd7\x01\x0b\x96\x8b\xe6\xe2\x52\xa2\x38\x7d\x32\x68\x2f\x09\x9d\xea\x85\xcc\x68\x71\x1c\x75\xaa\x87\x9f\xa1\xe5\xb1\xae\x98\xea\xaf\x51\x36\x86\x3c\xa8\x01\xc3\xd6\xe9\x32\x10\x7e\x58\x4e\xd9\x7a\x40\x95\xdd\x31\xa1\x65\x67\x83\xb6\xf6\xb0\x81\x1c\x39\x26\xc5\x66\xc8\x2a\x04\x1b\x50\x5d\x6f\x4c\xca\x32\x8b\xb7\xc3\xad\x87\x65\x6a\xf0\x52\x1f\x0e\x18\xb7\xe8\xbc\xd8\x6f\x8f\x12\x83\x46\xd1\x32\x4f\x04\xd2\x06\xe3\x0a\x36\xa0\xb8\x2d\x2e\x75\xc8\x1a\x80\x8f\xe3\xe6\x6d\x6b\xe0\xf1\xd0\x81\x83\x5f\xdd\x15\x43\xd2\x39\x30\xad\x0b\x82\x81\x5f\x0e\x1d\x98\x96\xed\xc1\xc0\xaf\x3e\xa4\x64\xac\x4f\x1f\x03\x44\xa3\x8b\x90\x4d\x1e\x53\xea\xd1\x8d\xbc\x34\xff\x9a\x2a\x5e\x83\xde\xf2\x75\xb9\x26\x05\x7e\x8b\x0a\x64\x80\xd8\x73\xa2\xd7\x09\xd2\x1e\x31\x5a\xaf\x6e\x00\xe4\x30\x96\xd3\xae\x70\x90\x30\xad\x74\xb3\x30\x8f\xfa\x99\xcc\x2d\xfa\x84\x3c\x7e\xf0\x4d\x3f\xb9\xd7\x5c\xb8\xd1\x5f\xf7\xef\x08\x24\x39\xb4\x37\xeb\x95\xc7\x9b\x68\x79\xaf\xf0\xd3\xac\x2d\x02\x55\x9d\x90\x74\x7f\x7e\x76\x8e\xff\x16\xe5\x7a\xce\xd4\x3f\x46\xf9\xdf\x11\xf1\x4f\xbe\xfc\xf2\x51\xff\x8e\xac\x30\xdf\x2f\x60\x86\xe3\x7d\x68\x8d\xe3\xfe\xe2\x4e\x60\x6b\xf6\x15\x15\xff\x38\x55\x26\x3b\x4b\xbe\x87\xc7\x36\x7b\x38\x80\x42\x53\x3d\x98\x9b\x91\x17\xa6\x72\xdd\x24\xa2\x5a\xaa\x33\x10\x99\xf5\xe9\xca\xcb\xc0\xdd\x84\xe7\xfb\xa0\x6e\x82\xb7\xdc\x78\xce\x32\xaa\xea\x23\x5d\x0f\x4c\x41\xd7\x2c\x07\xfb\x6a\xa9\x67\x23\x5f\xfe\xa2\xc3\xa9\xb5\x91\xb9\x0b\x32\xf7\xef\xeb\x81\x1c\x81\x63\x24\xb9\x51\xdc\xb0\xea\x32\x21\x98\x75\x3f\x4e\x2f\x57\xac\x76\x50\x40\x43\x61\x7a\x4d\x39\xdc\x38\xb9\xfe\xba\x0e\x99\xae\x16\x58\x77\x93\xe7\xf0\x83\xa5\xe3\xaa\x23\x79\xe5\xd7\x6b\x86\xba\x06\x58\xa6\xf1\x78\xd9\xf0\xe3\x0f\xc2\x5a\x16\x25\xe6\x38\xa0\x30\xa0\xb9\x3b\xea\x43\x30\x39\x08\xf6\xc1\xab\xaf\x4b\x48\xe1\x3d\x78\x73\xb9\xae\x12\x2f\xfc\x8e\xff\x8e\x64\x9e\xd4\x9f\x39\x23\xb9\x92\x9b\x4d\x9d\x94\xa1\xa8\x61\xce\x98\xd7\x84\xdd\x66\x8c\xe5\x55\x46\xc1\x20\x86\x52\xa5\x30\x7c\xcd\xee\x69\x5f\xdf\xdf\xdd\xbe\xb5\xf1\x71\x4f\x93\x8c\x6e\x68\xc6\xcd\xb6\x77\xe1\x76\x9f\xe0\x31\xda\xfe\xfd\xd2\xd3\x1f\x7c\x0a\xd8\x84\x7d\xe7\xac\xde\x4f\xf8\x8e\x58\x66\xdf\x49\x9d\x8a\x96\x0c\xc7\x26\xd2\x20\xc7\x7b\xa0\x36\xd5\xb7\x77\xe8\x0e\x62\xb2\x1e\xc8\x2b\xda\xb8\x29\xac\xae\x35\xdd\x1b\xb1\xbf\x44\x96\x2a\x11\x46\x06\x16\x95\x1d\xe6\xeb\x19\x3b\x92\x0c\x10\xd2\xff\x04\x15\x5a\xa1\x1a\x02\xdd\x98\x52\xb1\x33\x29\xaf\x92\x2e\x84\xdd\x32\x0a\x8d\x07\x83\xb3\xb2\x4b\x7f\xc2\xaf\x7b\x0a\xe4\xd7\x4e\xdd\x0c\xc1\xb5\x6f\x56\xe3\xde\xdc\x04\x54\x21\xab\xf7\xdb\x2d\xe1\x41\xc7\xb7\x54\x32\x5c\x63\x67\xf5\x3b\x21\x1b\x0e\x15\x76\xe1\x88\x8e\xda\x7b\x9b\x64\x04\x1f\xa6\x81\x93\x6d\x14\x69\x98\x0f\x99\xf6\x70\xe7\xcf\x9a\x9a\x6c\x75\x39\xa0\xd9\x4b\x63\xa1\xd5\x53\x2d\x4f\x88\x37\x01\xe0\xf7\x78\x7d\xad\xfa\x33\x67\x64\xc3\x94\xb5\xde\xb0\x8d\x2e\xca\x4e\x58\x34\xd6\xd5\xea\x28\x32\xdd\x0b\x74\xf4\xfc\x96\x66\x66\xe4\x1b\xc7\x30\xfb\x2f\x9f\x0b\x8d\x33\x83\xda\xbe\xe7\x8a\x2d\xf8\x2d\x0c\xeb\xf7\xcf\xf9\xe7\x37\xf0\x10\x82\x41\x16\xf4\x6f\x0b\xef\x91\x8f\x92\xd7\x4f\xf8\x81\x2b\x27\x6f\xf2\xd4\x91\x65\x2e\xb2\x06\x7e\x05\xc6\x76\x6f\x71\xb3\xed\x05\xcb\xab\x26\x5d\x76\x16\xd5\x7c\x53\xef\x41\xd0\xfd\x36\xb6\x9b\x4d\x2c\x5e\xaf\x66\x8a\xc1\x93\x85\x34\xf3\x6a\x06\x64\xb4\x90\x72\xe4\x8b\x87\x02\x8f\x43\xfa\x4d\xc5\x0e\x39\x8e\xe8\x05\x2b\x15\x0c\x9c\x53\x35\x82\x5a\x18\x42\x62\x82\xbb\x83\x61\xbf\xaf\xee\x0e\x15\x14\x71\x32\xd9\xaa\xff\x70\x4c\xfc\x5c\xb8\x1e\xb4\x07\xc9\x40\xf7\xda\x98\x00\x0b\xf5\x8e\x1a\x44\xa7\xc1\x87\xfd\xc1\x47\xfd\xd6\xce\xf7\x27\xf7\x30\x45\x6a\xe7\xf8\xde\x7f\x50\xb2\x76\x2c\x6e\x29\x90\x6e\xbe\x36\x1f\xde\x4d\xf9\x38\x66\x17\x55\xd3\x17\x1e\x81\x9f\x80\x51\x8e\xfc\x3f\x40\x6a\x1c\xb9\xb0\x48\x36\xae\x53\xc4\x73\x56\xf0\x35\x37\x50\xea\x35\x79\xa5\x5b\x4f\x17\x6d\x26\xa3\xb6\x81\xa5\xc7\xb5\x5b\x33\x3a\x18\xf0\x0a\xfa\x26\x5e\x4a\xa6\xfe\xcc\x19\x31\xaa\x14\x58\x8e\xca\x6d\xc6\xc0\x16\xda\x29\x81\x9a\x2e\xa2\x16\x62\xa1\x3a\x3c\xb8\x5c\x43\xa9\xed\x36\xd2\x6c\x43\xc1\x5c\x85\x62\x74\x5e\xd6\x1a\xd9\x57\xc7\x9e\x40\x89\x08\xef\x95\x69\x54\x11\xb6\x0a\xb9\x0a\xa1\xea\x77\x2c\xb9\x33\x74\xbf\xb7\x68\xf0\x79\x7b\xf8\x69\x3b\x1d\x0b\x81\x9f\x54\x44\x04\x6d\x69\xa5\x17\x46\xf7\xc6\xd7\xb8\xc5\x34\xe3\x1f\x1b\x7a\xbd\x3b\xea\xa0\x17\xe4\x93\x87\x4f\xbe\x0c\xa2\x12\x8e\x07\xe0\x7e\x50\x48\x81\x1b\x3d\x24\xb0\x00\x3f\x75\x78\x41\x3c\xb6\x20\x11\xc3\x82\x9f\xc1\x02\xab\x56\x14\x7b\x53\xd2\xe9\x97\x08\x3d\x9d\x92\x44\xb2\xf6\x62\xbf\x33\xac\x35\x45\xd6\x7e\x95\x8d\xc1\x26\xff\xbf\x24\xeb\x10\x37\xd8\xb8\x36\x2d\x7a\x46\xf5\xbb\x77\x07\xf9\xc1\xd6\xf4\xf6\x05\x18\xfe\x09\x09\x34\x20\x74\x97\x0c\xa8\x6b\xd1\x38\x6b\x45\x73\xfe\xfc\x67\x37\xf7\xaf\xf9\x64\x15\x27\xd3\x90\xd3\xff\xa8\xa3\x56\x18\x16\x5f\xd9\x4b\xbe\x8c\x5b\xd0\x26\x08\x6b\xe9\x60\x28\x00\xc6\xee\xa7\x76\x61\x98\x2e\xb1\x53\x51\x03\x34\xbd\x91\x76\x1b\x59\xeb\x71\x40\xc1\x0d\xff\xc1\x70\x22\xdf\x4f\xa4\xcb\x15\xe7\x73\x32\x34\x71\xb9\x18\x50\x7b\xa2\x07\xb9\x5c\xef\xa4\x74\x10\x48\x65\xf0\xf9\x1c\xc1\xec\x0e\x27\x84\xfc\x90\x4c\x05\x23\x58\x08\xc7\xda\xb3\x21\xb9\xec\xdc\x2f\x5f\x5e\x90\x0d\xd5\xda\xac\x94\x2c\x97\xab\x10\xee\x7b\x37\x0a\x76\x3a\x7e\x1f\xc7\xb0\xcf\x32\x09\x53\x35\x2d\x72\xc3\x00\xbe\x9e\xbc\x37\xff\xb1\x04\xc7\x95\x0e\xcc\xd2\x18\x08\xb7\xc1\x96\xe9\x43\x45\xaf\x1b\x60\x07\x01\x29\x57\x00\xd2\x78\xa0\x2b\x80\x04\xee\x80\x30\xf0\x7e\x3f\x77\x00\xd9\x2b\x1e\x64\xaf\x7b\xc0\x3b\x1c\x10\x06\xc0\xac\x9b\x4e\xb9\x45\xbb\x60\x6f\x4b\x7e\xda\xf8\xce\x5b\xe3\x83\x80\x26\x2c\xf6\x2e\x8b\x7c\x10\xcc\x01\x56\xfb\xb0\xb8\xf1\xfa\x33\xd4\x72\xdf\x07\x9f\xef\x63\xbd\x93\xfd\x6e\xc1\xf6\xbb\x7f\x1c\x66\x9d\x93\x5e\x0b\x3d\x08\xe5\x1d\x68\xca\x91\xb4\x39\xd7\x1d\x1b\x3c\x08\xea\x9e\x19\x4b\xe1\x67\x88\xf5\x95\x08\xd8\xad\x3f\x7b\x5c\xfb\x0f\xbb\x8c\x1c\x6a\x63\x91\x78\xd1\x83\xae\x19\x0e\xe8\x47\x3d\xd0\xd8\x22\x83\x0c\x2e\x02\x0b\xc6\xc4\xbf\xfd\x54\x9a\x4b\x3f\x8c\xe8\xb4\x66\x72\xe2\x07\x54\x6a\x7b\x01\xfe\xa4\xd5\xe2\x9f\x4f\x5a\xed\x93\x56\x6b\xcf\xe9\x93\x56\xfb\xa4\xd5\xfe\x77\x68\xb5\x81\xf7\xba\x2f\xe5\xf2\xbb\x7d\x92\x37\x47\x8d\x87\xba\x73\x38\x5d\xb4\x4e\xfa\x7e\xdb\x07\xe9\xb9\x6b\xb9\x66\x02\xee\xde\x19\xae\xa4\x29\xa3\x34\x54\x69\x07\xe1\xd4\xc8\xbe\xbc\xa7\xab\x7c\xc8\x76\x12\x65\x02\x30\x16\xa5\x70\x49\x8c\x71\x10\x47\x55\x5e\xa4\x1b\x9a\xba\x28\xfe\xd0\x99\x90\xa9\xe9\xef\xb8\x58\x3c\x9d\x02\x17\xcb\xae\x23\x25\xe5\x74\x18\xec\x62\x69\x3a\x52\x52\x9e\x91\xbd\x5c\x2c\x5d\x8e\x94\x04\xf0\x94\x8b\xe5\x85\x09\xda\x0c\x60\xa6\xb0\xbf\x7c\x41\x34\xf5\x78\x89\xee\xe6\x69\x19\x20\xb7\x0a\xb9\x6c\xd4\x90\x4d\x6d\x50\x17\x7b\xfb\x32\xb1\xe3\xda\x89\x74\x0d\xd8\xc1\x3e\x86\x66\xf9\x01\x01\x93\x51\xe8\x48\xd9\xc0\xd7\x02\xcd\x89\xb0\xbe\xec\x4e\x26\xde\x84\xa4\x0b\x87\x56\xb8\x81\x32\x1e\xad\xb2\x93\x03\x6b\x88\xa6\x99\xe0\x67\x36\x1f\x52\x4b\x34\x28\x25\x3a\x3a\x4c\xcd\xb4\xbb\x60\x66\x77\x11\xce\x52\x40\x79\x0c\x2b\xe1\x83\x82\xa3\xbd\x68\x18\x56\x8a\xb4\x2a\x3c\x9a\xec\x1c\x3f\xac\xb8\x27\x96\x35\x58\x94\x05\xb0\xb7\x2f\x3b\xda\xd3\x50\x95\xb9\x19\xec\x53\x90\x94\xcc\x53\x12\xf2\xae\x85\x49\xd3\xe4\xaa\x4a\x96\x76\x15\x28\xdd\x09\x2f\xf1\x15\x91\x53\x22\xa0\xaa\x95\x3c\x7a\x29\x97\xbe\x4f\x34\x54\x29\x1d\xc5\xca\xe9\xc2\xc8\xbb\xb7\x90\x4e\xed\xf2\x68\x85\x54\xff\x79\xef\xde\xd1\x41\x3c\x6f\xe7\x88\xbb\x66\x00\x57\xb5\x4c\x2f\x58\xc1\x32\x23\x3b\x72\x30\x9b\x86\xc8\xce\x03\x50\xd5\x43\x63\x21\x90\x05\x2f\x8c\x6b\xba\xac\x19\x58\x24\xd5\xf0\xce\x3a\xdb\xd8\xa2\x08\xb6\x54\xa4\x65\x17\x16\x96\xc0\x57\xf8\xcd\x51\x19\x17\xdd\xc8\xd4\x2b\xaa\xf2\x2a\x9d\xab\x14\xd0\x8c\x30\xac\xc9\x80\x3d\x80\x70\xae\x5c\x2c\xf7\x2e\xa4\x08\x17\x61\xcf\x6f\x7d\x8b\x92\x21\x79\xc3\xed\x47\x9a\xad\x80\x0a\x3a\x67\x05\xd1\x1e\xa1\x8e\x17\xa0\x96\x52\x8c\x5f\x2f\x57\xac\x31\x0e\xf6\xc2\xe9\xab\x67\xb1\xb3\xfa\xf0\x92\x47\xa7\x89\xe9\xb8\x8e\x16\xfe\x97\xe4\xb5\x4b\xd5\x14\x02\x77\xf5\x11\xa1\xe4\x8a\x6d\x31\x0c\x84\x8a\x3a\x1a\xdc\x05\x19\x17\x55\x67\xf9\x2b\x16\x17\x50\xf6\x61\x84\x17\x43\xcc\x10\x57\xc4\x15\x4b\x66\x1d\x36\xd0\x71\xc5\xb6\x3e\xb6\x1e\xf1\x62\xbf\xa8\x0e\xe1\x15\x2a\xfa\xaf\xcc\x08\x31\x89\x4c\xe9\x81\x67\x2a\x8f\xb5\xc1\xd3\xaf\xd0\xac\xd8\x06\x73\x81\x35\x12\xe2\x9e\x46\xa4\x5b\x6e\x5c\xf1\x4d\x5f\x9c\x20\xf5\x1b\xda\x61\xdf\x35\x76\xf5\xe0\x91\xff\x5e\x88\x23\x6b\xf3\xda\xff\x3d\xbf\xe5\xba\xe7\x98\x6c\x69\xf9\x4c\x32\xfd\x4a\x1a\x18\xfd\xde\xc8\xc1\xa9\x0d\x46\x8d\xd3\x36\x50\x4f\x19\x4f\x75\x76\x7d\x2e\x1e\xca\x2f\xf3\x45\x7f\x12\x54\x85\x62\xae\xad\x16\x96\xca\xe3\xa0\x72\x02\x69\x07\xde\xbb\x02\x84\x14\x63\x50\xa3\xe9\x43\xbb\x2b\x2f\x13\xc2\x47\xb4\xda\x77\x84\x98\x0b\x5f\x95\x46\x79\x63\x1a\x38\x05\x94\xb0\xf8\x0b\xd8\x91\x50\xb2\x31\x27\x39\x34\xb6\xc4\xa0\x4b\x6a\xd8\x92\x67\x49\xd0\x6b\xa6\x96\x8c\x6c\x20\x24\x33\x65\x0c\xf4\xbb\x3b\x07\x3a\x17\xfa\x4f\xe3\x7d\xae\x87\x71\x42\xd4\x8c\x2b\xb4\x47\x06\xf4\x1e\xf4\xd3\xf3\x03\x85\xf0\xd2\x0a\x94\x08\x36\xbc\x31\x47\x8b\xf3\x5e\x89\xd6\x8b\xb1\x5d\x5d\x84\xaf\x46\x79\xbe\xa6\x1b\xcb\xf9\xff\x69\xc5\x33\x30\xd1\x7f\x91\x0d\xe5\xd6\xaa\x3c\x4d\xb7\xd7\x0a\x9f\x70\xd1\x73\x21\x70\xa8\xaa\xae\x89\xa5\xc2\x35\x2d\x7c\xe6\x83\x20\xac\x48\xb5\xba\xf5\x91\xcb\x81\xb6\x3c\x22\x37\xd0\xb7\xdf\x8a\xde\xca\x31\x31\xba\x62\x5b\xd7\xd8\xa1\x87\x54\x76\xf0\x0b\xe1\x7a\xdc\xec\x6c\xca\x4a\x4f\xc1\xb9\x7c\x04\xbf\x8d\x26\x3b\x0a\x36\x02\xbb\x47\xed\xde\xd9\x2c\x93\x39\x3b\xb7\xfb\xd0\xbe\xbb\xd7\x24\x0b\x07\xbb\x8a\x29\xba\x6a\xa7\xea\xad\x29\x22\xaf\xbd\x69\x96\xad\x58\x5e\x16\xdd\xcc\xe2\xbc\x48\x5d\xa6\x58\xc3\xa0\x72\xc6\x54\x50\x8f\x1a\xba\xc6\xbd\x0a\xe7\xd2\x01\xde\xda\x6f\x6b\x09\x4d\x97\x0c\xe5\x45\xd7\x61\xbd\xa7\x0e\xb9\xcc\x13\xb6\x6a\x37\x72\x42\x53\xb5\xea\x09\xda\x52\xdd\x60\xbf\x0e\x6d\x92\x1b\xb4\x43\x4d\x1b\x9a\x90\x31\x47\x48\xb3\xf7\xb6\x8c\x6e\xe4\x82\x8b\xf2\xd6\x2d\x72\xac\x64\xc1\x26\xcd\x27\xed\xa1\x93\xa9\x13\x72\xef\x9e\x7b\x69\xf5\xca\x2a\xa6\x3c\x02\x59\xb7\xad\x75\xac\x68\x0a\xb2\xbe\x51\x35\xfe\xce\xa5\x65\x86\x59\xc5\x3b\x04\xda\xc7\x32\x4e\xde\x25\x06\x36\xf3\xde\xd6\x31\xd9\x3b\xcb\x64\xb8\x95\x9c\xd6\x75\x90\xaa\x75\x07\x4b\xb9\x07\x2a\xca\xca\x7e\x6b\x99\xec\x71\x79\xd7\x63\x35\xef\xa0\x68\x98\xe5\xdc\x7f\x43\x59\x79\x4a\xfb\xae\x38\x06\xdf\x4a\x0c\xb1\xa2\x77\x96\x33\xd4\x92\xee\x5d\x10\xa6\xff\xed\x63\x4d\xf7\x82\x74\x66\xe1\x3e\x16\x35\xd9\x07\x61\xfd\x96\x35\xb9\xa3\x75\xdd\x8f\xad\x96\x05\xdc\x6f\x61\xf7\x82\x6c\x58\xe0\x7b\x58\xd9\x83\xe6\xda\x69\xf0\x27\x2d\xed\x5e\xb0\xbb\x96\xf8\x50\x6b\x9b\x0c\x0d\x30\xd8\xeb\x4a\x6f\x68\x74\xc7\x90\xa0\xe2\xd4\x61\x7f\x80\x15\x4e\x86\x5e\xf5\xf5\xcf\xb9\xd7\x22\x27\x7b\x5a\xe5\x64\x28\x56\xef\x6c\x9d\x27\x5e\xec\x9a\x32\xee\x6b\xa1\xa7\x96\x12\xda\xee\x7b\x5a\xe9\x3d\x4e\xec\x70\xef\x0d\xb5\xd4\x13\x20\xa3\x36\xfc\x10\x33\x60\xe0\xb9\x2e\x3a\xc0\xc8\xc2\x95\x31\x1c\x54\x4f\x30\x18\xde\xb4\x7c\xc2\x1f\x6a\x0b\x35\x76\xc2\xe9\x68\xe2\xdd\xb2\x50\x2f\x9b\x0e\x50\x4b\x4a\x70\xe4\x17\x09\x3d\x31\x13\x8d\x4e\xd0\x4d\x5b\x34\x97\x99\x9e\x66\x52\x64\x6c\x63\x74\xf3\x82\x74\x6a\xc9\x64\xc6\x54\xe4\xe3\x7a\x19\xd3\xf7\xad\xc5\x7e\x09\x4d\x2b\x72\xbc\x4b\xbd\xac\xe0\xc2\x6a\x8c\xa1\xf6\x50\x03\xc5\x19\xf0\x97\x84\x89\x44\xc5\x96\xc0\x0c\xd1\x1a\x01\x3e\xf6\xb9\xb3\x8a\x6f\x0a\x46\xfe\xb5\xda\x36\x47\x58\xc6\xf8\xdf\xb0\x7c\x41\x9c\x6b\xfc\xc6\x82\x56\xbf\x9e\x9d\xff\xd5\xff\xf5\x6f\xef\xe3\xad\xc4\x19\x0c\xb6\x4c\x9f\xe3\x6d\x6a\xd8\xa4\x81\xb9\xe5\xba\x92\xcc\x46\xba\xd4\xd9\xa4\x9c\x87\x9b\x4a\x57\xa0\xc1\x25\xec\x16\x45\x03\x90\x9e\x90\x9f\x57\x4c\x34\x32\x6e\x7b\x5a\x27\x93\xca\x98\x80\xbd\xf8\x4a\x5e\xe0\x59\x94\x1d\x41\x62\x25\x53\xf5\x37\x20\x03\x5e\xc9\xe7\xb7\x2c\x2b\x0d\x7b\x6f\xe7\xe0\x3e\x3e\xdf\x7f\xaf\x2d\x57\x5c\x6f\xc3\x72\xad\x59\x7a\xa8\xd7\x37\x89\xcb\x2b\xb6\xad\xfc\x8c\xde\x66\xee\x0d\xba\x08\xbc\x8e\xde\xc6\x40\xa3\xe5\x5b\xdf\xd6\x6e\x3d\xf7\xe5\x5f\x5c\x8d\x0d\x47\xf4\xb4\x7f\xb0\x28\x2a\xf2\xd8\xc3\x47\x51\xe0\xf4\xfe\xdb\xdd\xd6\xaf\x3f\x90\xdb\xba\xd2\x21\xdd\x26\x76\x60\x37\x3f\xff\x7b\x49\x8b\x09\x79\xe6\xdc\x1a\x3d\x24\xf5\xc3\x1d\x80\x1d\x2f\xd7\x0d\x2f\xf2\x8c\x2a\xcc\x0a\x41\x41\x92\x84\xa8\x25\xf2\x17\x05\x09\x97\x51\x51\x89\xb1\x9a\x53\x34\x36\xbc\xdf\x50\x65\x78\x56\x16\x34\x7d\x74\xb2\x7b\x7f\x29\x55\xd2\xc5\x3c\x88\x76\x35\xbb\x5f\xb0\x4c\x8a\x7c\xf8\x19\xf9\xb2\xfd\x64\x48\x4d\x68\x41\xc4\x14\x97\x79\x5f\xa6\xb2\xe1\x6b\xd6\xde\x78\x07\x78\x31\x5d\x15\x2b\x5a\x78\xd9\x56\x09\x8c\x9e\xdd\x63\x56\x4c\xdd\x70\xcd\x5a\xd1\x4e\xae\x37\xe9\x61\xad\x45\x6a\x29\x90\x16\x97\x61\x7b\x25\xee\xee\x18\x0d\xfa\x67\xdc\x5c\xdd\x36\x74\x64\xad\xc0\x26\xa1\x2e\xa4\x82\x56\x50\x07\xb9\x04\x78\xec\x9a\x67\xe6\x70\x42\xfe\xc2\x94\xc4\xc4\x7d\xb6\xc4\x50\x0a\xb7\x6d\x7b\xd3\x9e\x21\x14\x95\x41\x20\x2a\xd5\xe4\x01\x39\x00\x90\x84\xaf\xd7\x2c\xe7\xd4\xb0\x62\x7b\xe8\xaf\x63\xf5\x56\x1b\xb6\x4e\x2d\x3b\xa8\xf8\xf5\x24\x95\xfb\x38\x2c\x66\x13\x96\x30\x98\xbb\x7e\xf2\xf7\xfa\xb5\x98\xf6\xad\x26\x1a\xac\xe2\xd4\x7b\xaf\x8c\x8e\x9e\x03\xc3\x68\xde\x3a\xb8\x66\x88\x88\xae\x98\xec\x6f\x96\x4f\x29\x51\x6c\x69\xf7\xad\xdb\x71\xef\xb9\x33\xdf\xeb\x02\x22\xf1\xb0\x3d\x96\xf2\x8c\xf6\x35\x3d\xf4\xc3\x3c\x05\x7c\x17\x9a\xba\x00\xdd\xa0\x00\x65\x0f\x66\xd2\xe5\x5a\x36\x92\x74\xc4\xb5\xa6\xcb\xcc\xa5\x58\x0d\x5a\x21\x9d\xe6\x6b\x0e\x07\x96\x3e\xb7\x7a\x73\x74\xdd\x2b\x35\x6c\xed\x08\xfd\xc1\xad\xcd\x27\xd8\x0d\x82\xef\x58\x63\x56\x50\xbe\xd6\xad\x3e\x3f\xd8\x01\x49\xde\x10\x69\x6d\x75\xb1\xf5\xa3\x68\xa6\xa4\xeb\x97\x81\x71\x13\x87\x9d\x3d\xaf\xc1\xfb\xeb\xf2\x13\x2a\x7c\xf9\x58\x73\x77\x5e\x40\x8f\xbc\xb3\xce\xb2\xd6\xea\xbb\x7c\xf3\x56\x0a\x62\xf8\x8d\xf3\xce\x13\x3a\x97\x25\x58\x3d\x5c\x55\xaf\xd9\xdf\x65\xef\xd7\xf2\xfa\x46\x30\x65\xd5\xf6\x20\xc7\xfd\xce\x53\xad\xaa\xeb\x2b\xa9\x0d\x46\x50\x23\xea\x62\x37\x34\x6d\x84\xee\xb6\x77\x87\x03\xd2\x4f\x8d\x68\x6d\x2c\x7f\x17\x13\x46\x33\x41\xc6\xe4\xc2\x28\x9e\x99\x13\xf2\x4c\xba\x8e\x80\x96\x9a\xae\x15\x17\x17\x24\xe7\x0b\xe8\x74\x6a\xc2\x57\x47\xcd\x15\x58\x42\xdd\x93\xc4\xae\x6d\x82\xaf\x79\x21\x0c\x36\x9b\x02\x10\x2e\x54\xea\x04\x63\xa6\xdc\xeb\x62\x12\x42\x3a\xb0\xf5\x54\x36\xd4\xac\xea\xd2\xd0\xf6\x4d\x24\xc0\xe3\x0e\xaa\x12\x07\xc3\x9a\xfd\x5a\xa1\x35\x88\x96\x3b\x74\x12\xec\x5c\x68\x64\x2c\xbe\x24\x21\xf6\xa2\xa2\xd3\x5b\x66\xc3\xdb\x9f\x36\x9f\x68\x31\xa1\xa3\x37\xf4\xdd\xf3\x03\x63\x67\x3d\xfb\x3c\xef\xe2\xbf\xa0\xfd\xce\xee\xad\xdb\xcf\x8d\xd7\xa7\xaf\xa6\x30\x00\x5b\x2e\xfc\xb4\xde\x1e\xff\x4a\xd8\xed\x46\xea\x54\x8c\x55\x04\x24\xf6\xdc\x71\x29\x21\x00\xef\x9e\xae\xed\x5a\x94\x81\xc0\xa2\xf6\x25\xde\x45\xe0\x62\xa8\x9b\x8d\x93\xe9\x86\x4f\xe7\x85\x9c\x4f\xd7\x54\x1b\xa6\xb0\x23\xf2\xf4\xfa\x78\x6a\x49\xa5\x27\xcb\xd8\x9e\x70\x11\xd4\x27\xe4\xcf\x9b\x9c\x42\xf4\x60\x13\x17\x18\x98\xea\xbf\xd3\x3e\x8a\xd0\xc8\xfa\xbb\x67\x5c\xa7\x8f\xa4\xae\xce\x51\xa9\xb1\xb2\x9d\xb1\x76\x51\x48\x53\xda\x5e\xb2\xc5\xee\x45\x39\xcf\xe5\x9a\x46\xa3\x3e\x8d\x24\xda\xc8\x0d\xb9\x91\xea\xca\x2a\x7a\xd7\x96\xd0\xc1\xf5\x22\xa6\xb4\xab\xc2\xf9\xd2\x41\xbc\x03\xef\x7e\x65\x25\x93\x81\x78\x54\xc5\xaa\x29\x27\x02\xe8\xa2\x35\x3a\x5b\xc8\xd4\xe5\x66\x23\x95\xd1\xbb\x08\xb5\x76\xe6\x70\x8c\x7a\xdf\x7e\xb7\x8a\x82\x52\xbe\x1d\xc0\xee\xd4\x7b\xb4\x3d\xd3\xbe\x61\x3d\x73\x4f\x4a\x8e\x94\xc5\x64\xe9\x3a\x34\x7e\xb2\x31\x38\x1d\x3b\xf9\x06\xf9\xc5\x47\x49\x76\xcc\xf9\xee\x71\x93\x3e\x4a\x32\x6a\x5b\x7c\x8a\x9b\x84\xcf\xa7\xb8\xc9\xe0\xf3\x29\x6e\xf2\x53\xdc\xe4\xa7\xb8\xc9\x4f\x71\x93\x9f\xe2\x26\x3f\xc5\x4d\xfe\x73\xc6\x4d\x9a\x42\x5f\xb0\xac\x54\xdc\x6c\xcf\x95\x5c\xf0\xa2\xc3\x9f\xd8\xba\x80\xdd\x79\x22\xc8\xca\xd3\xcc\x58\x63\x49\x57\x29\x87\xe9\x32\x56\x60\x64\xa1\x2d\x56\x9b\x62\x3d\x99\x28\xe1\x31\xab\x03\x24\xdd\x70\x6b\xff\x59\x30\x78\xc5\xda\x6c\xe4\x9d\x15\xa5\x3d\x52\x11\xc5\xb4\x2c\x55\xc6\x5a\x95\xbd\x6e\x56\xac\xcb\xfa\xc7\x52\xec\x76\x22\xaf\x8b\xfc\x08\x0f\xde\xce\xfd\x8b\xc4\xfe\x51\xe6\x4c\x09\x6b\x13\x00\x42\xe0\x9c\x86\x33\x47\x37\x7b\x77\x51\x71\x3f\xbc\xd9\xd0\x98\x6b\xa2\x4b\xa0\x15\x38\x24\x56\x54\x2c\x19\x99\x33\x73\xc3\x98\xb0\xaa\x94\x51\x68\xfd\xfd\x5d\x27\x1b\x56\xee\xb2\x25\xbf\x66\xc2\x0a\x55\xe7\xcd\x42\xd8\x46\x56\x19\xc7\xe1\x22\xaa\x99\xe0\x25\x78\xa7\xe9\x2c\xab\x97\x93\x5f\x26\xff\x31\xf9\x8b\x5d\x38\x29\x37\x4b\x45\x73\x38\x5f\x35\x7e\xfc\xe2\x18\xf2\xdc\xdc\x39\x91\x08\x76\x33\x78\xf9\x78\x54\x0b\x62\x4c\xbb\x0d\xf6\x23\x4b\xc2\xb2\x88\xa4\x34\x61\x41\x05\x59\x14\xb2\xec\xf2\xac\xa4\x6d\x37\x2c\xba\x3e\xc4\xd3\xe1\xca\xb3\x83\x70\x2b\x35\x53\x63\x5f\x13\xd3\xb2\xbe\x76\xbb\xc4\xaf\x73\x42\x9e\xc6\xe4\x1b\xbb\x35\x76\xf7\x17\x16\x63\x0a\x8e\x1f\xc8\x71\x55\x01\x78\x8f\x2a\xaa\x09\x17\x58\x16\xa1\x81\xb5\x68\x04\x2e\x15\x98\xc8\x68\xa8\x36\x4a\x6e\x56\x3c\x9b\x90\x53\xe1\xd9\xa4\x0d\xbe\x90\xf2\x4a\x93\x82\x5f\xe1\x35\x4f\xc2\x9b\x47\x48\xc6\x37\x2b\xa6\xf4\x09\x7c\x33\x26\xcf\xcf\x9e\xfd\xf0\x7c\xfc\xfc\xec\xd9\xc5\xe9\xf8\xec\x87\xd3\xb3\x1f\x4e\x1f\x3e\x18\x9f\xbf\x7e\xf9\x1f\xc7\x8f\x1e\x7c\xd9\x18\xf3\xa6\x6b\x44\xec\x4d\xed\x07\x4f\x9f\x5f\x1c\x3f\xfc\x7a\xfc\xfd\xd9\x8f\xe3\x8b\x1f\x4e\x1f\x7e\xf9\xa4\xe3\xf5\x3b\x63\xe2\xc0\xd7\x5c\x5c\xbe\xbc\xf8\x89\x29\x8d\x17\x70\x2f\x2f\xae\x8f\x27\xc7\xdd\x07\xeb\x01\x85\x0f\xfa\xcf\x04\x1e\x6b\x51\x7b\xa3\xc5\x60\x38\x3c\x3c\xf6\xfa\x06\x06\x50\x74\x1c\x7e\x26\xb4\x58\x4a\xc5\xcd\x2a\xea\xd2\x25\x3e\x72\xd6\x6a\x11\xc1\x96\xd2\x70\xf0\xa4\x38\xd3\xcb\x82\xb2\x1c\xbb\xa2\x22\xd7\x2b\x7a\xc5\x26\xa4\xba\x50\x4e\x81\xb4\x5b\x5c\xb1\xb5\xbc\x66\x50\x62\xd8\x95\x57\xe0\x0a\xf5\xa4\xc8\x35\x71\xb7\x70\xce\x55\xd2\x2a\xc3\x9d\x9a\x2c\xca\xa9\x67\xcf\x2f\xc6\x67\x4f\xcf\x1e\x59\x3a\x12\x72\xb0\xa5\xeb\xe2\xf0\xa4\x93\xfd\xc2\x91\xf1\x74\xf6\x5e\xa3\x70\xd8\x05\x6f\x7f\x68\x5c\x83\xad\x06\xd2\xba\xf1\x50\x8c\xe2\x50\xf9\x25\x59\xfc\xf8\xda\x01\x70\x3e\x6b\xc8\xfb\x57\xd2\xc8\x4c\x16\x55\x53\xf9\x4e\x16\x48\xc0\x6c\x31\xc7\x77\xad\x7b\x19\x4b\x2a\x3b\xc4\xbd\x5a\x93\xe3\xc9\xf1\x11\x39\x9e\xa4\x7a\x1a\x59\xad\x79\x3c\x79\xd4\x24\x6a\xf7\x76\x04\x15\xfd\xfa\xf2\xf9\x09\xc9\x4a\xa5\x98\x30\xc9\x60\x36\xa8\x53\xc4\x97\x2b\xa6\x4d\x13\x9e\x8f\xb5\xb1\x08\x70\x5f\x5d\xbe\xbc\x38\x7e\x18\x67\x97\xbe\x0c\xe7\x00\x4a\xbc\x3a\x6d\x63\x58\xbc\x5e\x4f\x63\x58\x1c\x71\x8d\x61\xf1\x6a\x1a\xef\xd7\x00\x99\xc0\x75\xa0\x37\x0e\x86\x28\xc2\x70\x3c\xaa\xc3\x2e\x0d\x58\xd9\x6e\x09\xc5\xe2\x1d\xe3\x37\xfc\x8a\x4f\xd6\xf2\x37\x5e\x14\x74\x22\xd5\x72\xea\x8d\xce\xe9\x05\x98\x77\xef\x2e\x78\xce\xde\x5d\xbe\xbc\xf8\x2c\x34\x64\xde\x65\x72\xbd\xa1\x86\xcf\xa1\xad\xe4\xbb\xc9\xc3\xaf\x15\xcb\xe4\x7a\xcd\x44\xce\xf2\xc9\xc3\x6f\xe2\xef\xb5\xfc\xd8\x52\x7f\x49\x81\x73\xf9\xf2\xe2\xdd\xe9\xf3\x8b\x77\xc7\x0f\xbf\x7e\xf7\xfd\xd9\x8f\xef\xfa\x34\x4d\xf8\xcc\xc3\x2f\x9f\xf8\x67\x1e\x7d\xfd\x38\x00\xe8\xd5\xe2\x3b\xaf\x16\x07\x81\x4d\xea\x3e\x42\xfa\x34\xe8\x5e\xc0\x1f\x7e\xf9\xc4\x3f\x5a\x4f\xbd\x01\xbc\x39\x62\x38\xf0\x0f\x6e\x34\xf4\x99\x0c\x77\x98\x73\xa7\x7c\x8a\x08\x90\x01\xe6\x42\xcf\x26\x5c\xc3\xa1\x62\xc8\xf6\xc3\x91\x43\x36\xde\xdd\xb6\x18\x1e\x6f\x9a\x9b\xeb\x7f\xdd\x56\xea\x24\xee\xa3\x5a\xf9\x9c\x79\xe5\x63\x0f\xa6\x68\xd0\x44\x8b\x15\xbe\x3f\xf1\x65\x11\x71\x27\x35\x29\x2f\xd1\x47\xf1\x71\xc8\xfe\xba\xc8\xdf\xcd\x69\x76\x75\x43\x55\xfe\xbf\x9c\xf8\x9f\xe4\x68\x14\xf8\x47\x90\xa3\xe1\x63\xe9\xf9\x36\xa8\x31\x1c\xcf\x6f\xda\x0f\x25\x81\xee\x0f\xb1\x1b\x9c\x5d\xfe\x70\x72\xbd\x69\x3f\x94\x04\xba\x3f\xc4\x5d\xf2\xb4\xd1\x11\x81\xb9\x0b\xaa\x46\x60\x8c\x09\xf6\x24\x7e\x17\x6d\x12\xaf\xab\x56\x13\x81\xba\xbb\xe8\xc6\xc1\xb1\x5b\xb4\x3f\xf8\x58\xa2\x3b\xde\x8e\xb4\xbb\x0d\xa9\xeb\x1c\xda\xe1\x5a\x74\x6e\x45\xa9\xc8\x19\x38\x6a\x62\x17\x1e\x67\x95\x1b\xe7\x9a\xbb\xc6\x43\x84\xa2\xa4\x0e\x0f\x90\x5c\xe4\xfc\x9a\xe7\x25\x2d\x1a\xea\x22\x7e\xc1\x09\x4a\x64\x43\x15\x5d\x33\x03\x8e\xd9\x9d\x39\x86\xde\x4f\xaa\xd8\x1e\x70\xf5\xfb\x6a\xa7\x37\xb5\x65\xff\x6e\x90\x33\xcc\x25\xf5\x54\xef\xb7\xf3\xb5\x07\x17\x61\x70\x2a\x47\x18\x55\xce\xb6\xe8\xbb\x44\xc7\x2b\x16\x01\xe0\xd1\xfa\x99\x54\x43\x28\xa3\x77\xd4\x50\xc8\xd4\xbf\x66\x85\xdc\xb8\x40\x10\x76\xcb\x35\x04\x10\x84\x43\x16\xb2\x14\xb1\x78\x10\x74\x80\x72\x81\xa5\x0a\x27\x84\x3c\x63\x1b\x26\x72\x48\x94\x11\x64\xa3\x58\xc6\x35\x2b\xb6\xbe\xd4\x58\x00\xb5\xea\x93\x1a\x87\x4c\xed\xf2\x33\xa6\x9d\x77\x1a\x02\x03\x5c\xb5\x33\xc5\xf2\x32\xeb\xe8\x75\xc1\x1c\x7d\x7b\xd8\x84\xeb\xfa\x78\x1e\x3a\x7d\x58\x4e\xe6\x0c\x1d\xc1\x75\x98\xf7\x96\x19\x72\xc3\xa2\x91\xd7\x34\x97\x1b\x17\xa5\x63\x69\x2c\x05\xd1\x72\x61\x6e\xec\x1a\x0b\x3e\x57\x54\x71\x16\xab\x5a\x98\x0a\x7f\x79\x5d\xc4\x22\x5e\x42\x96\x8e\x0c\x49\x20\x61\xec\x36\x60\x42\x58\xec\x1f\x24\x63\x4a\xc1\xc5\xf2\xf5\x26\x92\x19\xd7\x12\x25\xe1\xe0\x2a\xf2\xb6\xde\xb9\x58\xbe\x34\xff\x5b\x89\xac\xe8\x72\x09\x20\x30\x58\x64\x5d\x2b\xee\x0c\x47\x26\x1b\x99\xeb\x09\x39\x2d\x0a\x1f\xfa\x6a\x29\x82\x8d\x50\x68\x01\xdc\x0e\xe1\x61\xa5\xee\x02\x89\xae\x41\xc5\xac\x34\x82\x08\xfc\x2a\xc6\x89\x2f\x7c\xec\x7f\x2b\x02\xb7\xb1\xac\xae\xfb\xa3\x3a\x00\xb7\x51\x23\x03\x13\xf1\xe0\x2a\xca\x8f\xb9\xe1\x66\xc5\x45\x2f\x44\xae\xc9\x92\x09\xa6\x68\xe1\xb8\x38\xf4\x1f\xec\x96\xc9\x73\x4d\x6a\x4b\x6e\xa0\x3d\x71\x77\x53\xc6\xb5\xd4\xa6\xe5\xae\xdf\xff\x4e\xa2\xe0\x4c\x98\xef\xb8\xb8\xe4\x6b\x26\xcb\x48\x76\x5b\xcb\x79\xdc\x7a\xa6\xe2\x8b\x95\xbc\x21\x85\xc4\xfb\x85\xea\x7e\x2e\x95\x09\x31\x67\x64\xc5\x0a\x88\xc2\x80\x12\x87\x05\x23\x37\x94\x23\x7a\x7d\x0b\x60\x78\x59\x5d\xf7\xdd\x77\x7d\x06\xa1\x3d\xb5\xa7\x08\x16\x95\x78\x59\x21\xab\x0b\xb6\x7a\x42\x89\x6b\x40\xe3\x16\x14\x2d\x79\x78\xac\xbb\xa5\x83\x0f\x85\xcf\x1d\x21\xf6\xdf\xb1\x9e\x14\x7b\xd3\xe1\x63\x13\x81\xb6\x49\xd0\xc4\x5f\x4c\x25\x74\x62\x95\x3c\x7a\xf0\x11\x11\x88\xb5\xbc\x9f\x96\x8b\x05\x53\x4f\xb7\x26\x76\x59\xd2\xaa\xe7\xdc\x7e\xa8\x15\x57\xbc\x2e\x21\x65\x68\x2d\xd5\xd6\xc5\x0b\xc7\xd1\xa8\x18\xb0\x65\x4e\x0e\xb8\x20\x73\x0b\xec\x10\x50\xb8\xd3\x28\x20\xa4\x8c\x76\x31\x02\x31\x1b\xac\x55\xbb\xb8\x55\xcb\xdc\x90\x82\x51\x6d\xc8\xf1\x13\x6b\xe8\x73\xac\xff\x3e\x7d\x08\x61\x0b\x50\x7f\x28\x36\x5d\xbf\xbf\x76\xe7\x76\xe0\x4d\x26\x23\x65\xa1\x27\x9c\x99\x05\x18\x4c\x2b\xb3\x2e\xa6\x6a\x91\x7d\xf5\xe5\xe3\x07\x9d\xb9\x10\x84\x44\x5b\x63\x45\x5e\xe5\xa5\x3a\xa1\x9e\x59\x22\x60\x71\xd5\x72\x41\x1e\x3d\xfc\xea\xc9\xd7\x88\xdb\x86\x34\x6e\xbe\xb4\x21\x6a\x23\x20\x03\x01\x6c\x8d\xad\x5d\x3e\x70\x82\xb8\xba\xdf\x32\x52\x12\xbd\xa6\x45\x91\x68\x8e\x3e\x57\x8c\x5e\x45\x16\x6b\xd5\x58\xf2\x2d\x31\xa3\xc5\xbd\xbb\xa0\x6a\x69\xe5\x58\x59\xe4\xee\x02\xbc\xfb\x3d\xee\xe2\x44\xf3\xa5\x80\x0b\xfa\xc4\xbd\x06\x28\x37\xc7\xdc\x66\x45\x05\x11\xcc\xda\x70\x54\x6d\x23\x56\x50\x3a\xe5\x87\x34\x5a\x08\x3c\x89\x9d\xd6\xfa\x92\xd0\x42\x24\xfd\x48\x6f\xdf\x30\x68\xfa\x7f\xa7\x1d\xdd\x7a\x3c\xbe\xb7\x23\x08\xaa\x33\x04\x3a\xf7\xb6\x92\xeb\x0e\x92\xda\xad\x95\xa8\x20\xee\x5a\x0e\x28\x98\x17\xdc\xc6\x8b\x9c\xd0\x8d\xb7\xc5\xfb\x04\x46\x6c\xa6\x5e\x8c\x04\x02\x83\x8b\x4c\xae\x2d\xcc\xb0\xba\x7c\x9d\x9d\x84\x3d\x5f\x13\xd5\x0c\x0e\x76\xd7\x36\x4e\x51\xe7\x10\x11\x73\x04\x29\x96\xf1\x9b\xce\x5d\xa8\x5e\x9c\x2d\x21\x21\x52\x21\x2f\x26\x5e\xf4\x8f\x92\x3e\x5f\x1f\x7f\xf3\xf0\x63\x0b\x9f\x36\xcb\x76\x88\xa1\x18\x0b\x78\xe1\xb4\x97\x18\x6a\xbd\x2f\x85\x82\x96\x30\x1c\x28\x90\xe2\x87\xc6\x1d\x31\xf5\x51\x05\xd2\xe3\x07\xdf\x74\xfb\xf8\xfa\xe4\x11\xda\x9a\xfb\xd9\xc8\xed\x67\x3e\xaa\x8d\xdc\x34\x86\x43\x5b\x39\x61\x9d\x39\xab\x6e\x5f\x1b\x39\x06\xb1\xb6\xf1\x3e\xa6\x8d\x8c\x2b\xdd\x9b\x0e\x1f\xdf\x46\x8e\x90\x20\x91\xf5\xd3\x77\x02\xf9\xb8\xb6\xb2\x59\x29\x46\xf3\x33\x59\x76\x55\xb5\xdc\x45\x63\x30\xbc\x42\x22\x14\x70\xac\x92\x8c\x71\x84\x26\x19\xe6\xb4\xc7\x9c\x3a\xac\x6a\x24\xe2\x1d\x46\x13\x72\x66\x1f\x81\x9e\xf9\x76\xf7\x7b\x40\x10\x55\xa1\x09\xa3\xd0\xf9\x7c\x60\x39\x48\x28\x4b\x23\x5d\x3a\x1f\xc2\x0b\x22\x45\x8f\x88\x73\x3c\x65\x12\xf3\x56\xd6\xf1\x52\xf1\x98\x73\x5f\xc5\x73\x6a\x32\x67\x76\x8a\x58\x60\xd3\x2f\xa1\x76\x4c\x55\x49\x5b\x65\x22\x13\xe1\xc9\x63\xbf\xba\xc9\x1d\x54\x55\x04\xa8\x97\xbb\x0d\xff\x81\x2b\xbd\xef\xe6\x17\x86\xb9\x56\x53\x38\x22\xf3\x32\xba\x9b\xed\xcc\x02\x67\x25\x17\x64\x51\x9a\x52\xb1\x20\x4e\x34\xa6\xff\x22\x20\x13\xde\x8f\x17\xc2\x32\x4d\x25\x85\x6a\x9e\x4a\xf7\x91\x69\x31\x4a\xc4\xad\x04\x7a\xc6\xf0\x82\xff\xe6\xf8\xe1\xec\xfc\xcf\x29\xbf\x6b\x29\xac\x71\x56\x48\x9a\x1f\x91\x8d\x34\x4c\x18\x0e\xb3\xd6\x86\xaa\x6b\xf0\x95\x9a\x95\x07\xcd\xa1\xee\x27\xe8\xc2\x15\x5f\x46\xd3\x06\xde\xb0\xbc\xcc\x76\x57\xe7\xe7\x5f\x07\xb4\xee\x95\x16\x6a\xa4\x77\xb5\x91\x8d\x94\xaa\x78\x0f\x0d\xe9\xbb\xd1\x47\x4a\x47\xf4\x74\x05\xeb\x53\x9f\xa6\xd0\x2f\x04\x38\xe7\x9e\xb1\x82\x0e\xca\xf4\x6d\x3d\xb2\x2b\xb7\xab\x54\x58\x45\x32\x1a\x93\xb0\x2b\x59\xe4\x24\xa7\x86\x62\x56\x9f\xb5\xb2\xeb\xda\x50\xf0\xf4\x2e\x17\xc3\x21\x6f\x25\x55\x6c\x67\x64\x54\x04\xc4\x72\x53\xb0\xe0\xad\xe9\x65\xa5\xbf\x77\x50\xf9\x2d\x97\x31\x65\x30\x4e\x3a\xc6\x71\x50\x8d\x20\xd5\x14\xd9\xa5\xab\xb2\x6b\xd0\x3d\x10\x7d\x3d\x67\xc6\xbe\xd8\xaf\x26\x36\xd9\xfa\xdd\xce\x4c\x9b\xfb\x12\xc1\xf1\x48\x78\xc4\x7b\x04\x62\x0e\xe4\xe0\x9a\x7c\xf9\x31\x75\x53\x29\x04\x2b\xf6\x50\xf2\x8d\x07\xba\x94\x3c\x0e\xe8\x3f\x44\x1d\x70\x91\x15\x25\x1c\xc3\x6e\xd8\x5c\xcb\xec\x8a\x19\x7d\x58\x1d\x98\xda\x26\x00\x14\x38\x41\xd0\x51\xb9\xc7\xf3\x82\x0d\xf2\x38\x92\xe3\xd5\xc7\x42\x69\xe2\x36\x20\x08\x39\x39\x03\x4f\xf2\xeb\x6b\xa6\x14\xcf\xbb\x8e\xda\x0d\xa4\xc7\x1f\xf4\x02\xd9\xdd\x0e\x82\xd6\xac\x07\x77\x39\x3e\x1b\x71\xfa\xe8\xf6\xd7\xd8\xde\x0f\xb4\x1b\xd7\x8d\xc8\x98\x1d\x08\x3d\x37\xab\xc9\x5b\xd5\xdb\x71\x5d\x4c\x6f\xbc\x71\x87\xfc\x71\x29\xae\x84\xbc\x11\x63\xf4\xf1\x77\xc0\x8d\xc0\xd4\x86\x9a\xb2\x85\xb9\x06\xd6\x70\x80\xcf\xc7\x04\x07\xbe\x62\x19\x9a\x10\x72\xee\x3c\x0c\x6e\x90\x0b\xb4\xdd\x31\x06\x9a\x08\x88\xbb\xf7\xab\x5b\xbb\x37\xc3\x0a\xc9\xec\x8c\x87\x4b\xb5\x4a\x5d\x55\xd3\xab\xc6\x55\x35\x63\x3a\x68\x4a\xb3\x4c\x2a\xd8\x48\xd1\x4c\x8b\xa0\x32\xe2\x87\xac\x2d\x93\x49\x81\x89\x67\xbd\x97\x5b\xf5\xc8\x66\x36\x73\xf0\xbd\x4b\xb8\xe2\xca\x11\x25\x92\xa6\x7d\x5a\xa1\xc4\x15\x97\xea\x5b\x30\xbc\x30\x71\xab\x6a\xdf\x8b\xe9\xe6\x95\xae\x82\xef\x3c\xd0\xda\x2c\x3d\xe0\x13\x76\x44\x26\x6e\x7a\x3b\x34\xec\x80\xcd\xfe\x5e\xd2\x42\x93\x89\xdd\x9f\x13\x4f\xc2\x43\x77\x8b\xad\x18\xde\xf8\x56\xc9\x7b\x21\x36\xe0\x66\xb8\xeb\x72\xcb\x95\x38\xc4\xc3\x68\xc5\xbd\x68\x29\x75\xe0\x61\xc1\xa8\x35\x26\x11\xbd\x19\xdd\x60\x48\x01\xef\xf6\xeb\x40\x70\xd7\x7d\xf2\x52\xd2\xfc\xa9\xeb\x7a\xf6\x23\x15\x74\xc9\x72\x0c\xcb\x52\x25\xb3\x86\x18\xf6\x5f\xb4\x92\x07\xee\xc4\xab\x49\x77\x21\x17\x7c\x0c\x06\x23\xc5\xee\x83\xa1\xcc\x44\xbe\x91\x5c\x18\xb2\x29\xe7\x05\xd7\x60\x21\xb8\xb4\xd1\xad\xcf\xa2\xd3\xee\x98\xc7\x3b\x2f\x36\x1b\x6d\xd9\x26\x30\xb5\xef\x68\xa1\x61\x6e\x54\x6c\x71\x37\x4b\xcd\x1a\xdc\xa5\x98\x95\x6c\xd4\x70\xbd\xe0\xdd\x19\x78\x1d\xab\x7f\xc3\x68\xbe\x1d\xb4\x76\xfb\x82\xae\x7b\xc3\x7a\xed\xa7\xcd\x79\x13\x30\xfb\x01\xb9\x93\x00\x3b\xed\x31\x1d\x30\xad\x29\xbb\xbd\xe3\xb2\xbb\xcf\xc8\x33\xfb\xf6\x67\xaf\x2e\xf6\x21\x76\x8b\xb4\x1d\x50\x7b\x89\x6d\x59\x72\x53\x50\x03\x86\xb5\x53\x3a\x76\x1a\x93\x24\xc8\xae\xcd\x0e\x85\x49\x20\xeb\x8e\x19\x8f\xcd\x5c\x24\xd3\x07\x13\xca\x91\x69\x3b\x0b\xf2\x9b\x14\xf6\xf0\xf5\x81\xd9\xeb\xd9\xab\x8b\xfd\xb8\x2a\x8d\x64\x3b\xd1\x5d\x4e\xb2\xdf\xda\x73\x9f\xca\x35\x59\xd1\x6b\x46\xe6\x8c\x09\xa2\xcb\x2c\x63\x5a\x2f\x4a\x7b\xc8\x72\xce\x83\xce\x99\xde\x69\xc9\xbb\x16\x55\x34\x55\xa7\xb3\xec\xe5\x99\x07\x6d\xd7\x03\xc5\xe3\x9c\x88\x13\x39\x55\x79\xfd\x66\x17\x0d\xd0\x35\xf1\xbe\xdc\xad\x82\x6a\x73\xa9\xa8\xd0\x00\xc8\x1a\xb1\xb1\x2c\x86\xca\x10\xa4\x86\x8d\x13\x47\xd8\xde\x24\x0d\xd7\x25\xf6\xce\xd9\xd8\xf6\xb4\x1e\x4f\xb6\xe8\x7d\xbc\xcb\x4a\xda\xe3\xf1\x78\x18\x5c\xff\xc9\xa2\x3f\x9b\xb9\x2b\xfd\x0a\xb7\x72\x8f\x39\x51\xef\x77\x88\x92\xcb\x4c\x49\x8b\xea\x4b\x61\x0d\xd9\x5d\xee\x48\x4c\xd7\x0b\xa9\xf3\x4a\x46\x5d\x38\x11\xd5\x33\x8f\xf8\x83\xad\xb9\x55\x22\xaf\xb3\x7c\x52\xe7\x7c\xd3\xbc\xbc\x92\xda\xbc\xc2\x86\x9c\x03\x4e\x6d\xc1\x68\x38\xa9\xef\x84\x15\x41\x27\xe1\x7a\x50\x84\xe2\x29\x61\x3e\x21\xe7\x58\x32\x04\x53\xe3\xad\x4c\x73\xa1\x91\x01\xdc\x6e\xaf\x4d\x7f\xc6\xa5\x4f\x7e\x1b\x9a\x86\x57\x25\xcb\xd5\x59\xed\x37\x2b\x06\x26\xd2\x3e\xd7\x26\xb0\xe6\x5b\x7b\x44\xd7\xf5\xfd\x5e\xd8\xbe\xd5\xdd\xab\x6c\x0a\xcb\x78\x97\x67\xe7\x04\x3a\x10\xc3\x8b\x12\x20\xbb\xdd\x8e\xfe\x45\xe7\x6f\x5e\xff\xf2\x1f\xd5\x8a\xe1\x38\xdb\xfc\x2a\x01\xd9\xa5\x0a\x43\xd2\x21\x54\xf2\x6a\x36\xb0\xc5\x2b\x25\xaf\x68\x39\xe6\xa6\xcb\xf5\xba\x14\x29\x9f\x89\x9f\x32\x5a\xc1\xd6\x5c\xb5\x33\x67\x60\x76\xfa\x8b\x8d\x00\x27\xe0\x35\x59\x48\x75\x43\xe1\x4c\x92\x04\xea\x34\x4a\x88\xcf\xee\x63\x18\x21\x7f\x06\x9f\x25\x60\x22\x01\xb2\x22\xbc\x6f\x08\x15\xbd\xb6\x57\x0c\x90\x80\x73\xc0\xa5\x25\xe0\xd6\x8b\xe6\x42\x1b\x46\x73\xa8\x37\x07\x20\xbc\xb3\xb1\x81\xea\x7b\xda\x3f\x92\x00\xca\x45\xd8\x17\x5f\xbb\xb4\x8b\xa5\x35\x39\xea\x7b\x65\x58\x48\xb5\xf4\x21\x4c\xe0\xaa\xd7\x45\xee\x60\xd1\xa1\xd6\x42\xfa\x82\xf2\x14\x44\xbe\x20\x5b\x59\x62\xe2\xb1\x34\x55\x5e\x7b\xd3\x54\x85\xd9\x96\x16\x45\x83\x67\x6a\xdf\x8c\x8c\x32\x94\x0b\x7c\x65\xfe\x93\xe9\xf4\xe6\xe6\x66\xe2\x7a\x96\x43\xc8\x4c\x2e\x6f\x84\x9d\xd1\xf4\xe1\xe4\xe1\x34\x97\xd9\x14\xdb\x99\x57\x1b\xc9\xdc\xf6\xb5\xd7\xe6\x02\xb5\x3e\xe4\x99\x42\x0d\xd0\x8e\xbd\x78\xd9\xb0\xd4\x7a\xcb\xe8\x58\x9c\x61\xae\xbf\x6f\xf0\x8b\xe6\x0b\xc4\x4b\xbb\xf3\x10\xd4\x49\x77\xe5\x71\xc9\x7d\x32\x1b\x5d\x9e\x9d\xcf\x12\x4d\x7b\x09\x0c\x82\xb9\xcd\x46\xd5\xa4\x42\x30\xad\x5e\xf9\xbe\x46\x73\x02\x22\x06\x22\x73\x0d\x52\xcc\x8a\x8e\xae\xe5\xb7\x42\x8a\xd3\xb7\x94\x04\x23\x31\x77\xca\x60\x24\x3a\x95\xf7\xe5\xea\x8e\xe2\x8f\x8e\xed\xc4\x13\xbf\xa6\x05\xc7\xfb\xa6\xda\x16\xc1\x91\x71\x80\x3a\x0e\x87\xc7\xf5\x71\xf3\x94\x1b\x99\x7b\x4c\xe9\x86\x87\xd8\x0b\x3c\x48\xbf\x87\xf2\xbd\xe6\x39\x53\xe7\xd5\x14\xf7\x50\xc3\xad\x27\xdd\x6a\x7d\xdd\xe4\xc6\x12\x93\x92\xb2\xde\x99\x75\x60\x31\x8a\x09\xb8\x44\x2a\xb6\xd8\x67\x7b\xa1\xa8\x36\xaa\xcc\xe0\xfe\xcc\xbf\x3e\xb5\x93\xee\x52\x53\x78\x77\x59\x29\xc1\x72\xe7\xfa\xc2\x01\xfe\x07\x34\xdd\xa1\x37\x3d\x95\xb2\x9a\xa4\xa1\x37\xba\x4e\x35\x69\x3a\x85\xab\xfa\x3f\xc9\x8a\x7d\xee\xad\x8a\x35\x08\x72\xfa\xf3\x45\xcb\xe6\x88\xd4\xc4\xec\x81\x9b\x26\x80\x9d\xfc\x2e\x5e\xfb\xa6\xba\x1f\xd6\xc9\x1e\x4d\xe2\xa0\x92\xb1\xd6\x3c\x7b\xd9\x2b\x07\xc2\x4f\x83\x22\x1d\x10\x76\xb6\x4d\x83\x4e\xbd\xe0\xc9\x4e\xee\x80\x00\xfa\xb8\x37\xb5\xbd\x68\x4e\x92\x0c\x00\xdb\x96\x35\x67\x08\xb0\xaf\x27\xd7\xc0\xa6\x55\xf8\x11\x78\x60\x78\x0f\x84\x76\x40\xf8\x48\x08\x75\x6f\xfa\x70\x08\x7d\xf5\xf2\xe9\x07\x45\x66\xea\x14\x5f\x7f\xba\x13\xdb\xe0\xde\xcd\xfe\x2d\x17\xbb\xbb\x7b\xc0\xda\xa0\xc1\xa8\x36\x54\x40\xad\x12\x8f\xb6\x9d\x82\x61\xbe\x1a\x39\xef\xef\x0c\x17\x36\xd3\x71\x66\xd4\x6c\xe4\x78\x70\x36\x3a\x21\xa7\x9e\x21\x41\x09\x92\xa7\x7b\xcc\x15\x3b\x23\x5d\x31\x0d\xd7\x00\x56\xa5\xe4\x2c\xe3\x58\x0f\xc5\xda\xe2\x1c\x8e\x90\xc4\x75\x4d\xa2\x42\x6f\xe2\xb7\xd7\xe1\xa7\xa0\x5b\xa6\xc8\xc1\xe5\xd9\xf9\xf4\xe2\xe2\xe5\x21\x71\x1a\x1e\xa4\x9b\xab\xea\xe5\x86\x40\x48\xb9\xfd\xcf\x45\x34\xec\x3b\xfc\x58\xb1\x48\x5a\xde\x43\x4c\x13\xaa\xee\x14\x9c\x74\x74\x99\xdb\xa4\x4a\xd2\xcb\x65\xa6\x27\xf4\x46\x4f\xe8\x9a\xfe\x26\x05\x94\xaf\x3e\x85\x3f\x9f\x9f\x5d\x4c\xa1\xc2\xaa\x99\xfa\x34\x38\xb5\x2c\x79\xce\xa6\x96\xf8\x63\x4f\xfc\x31\x56\xb2\x5e\x99\x75\xf1\x59\x56\xcc\x07\xcc\xd6\xd1\xea\xd5\xcb\xa7\x48\x27\xef\x92\x68\xd0\x29\xa0\xc2\x00\x90\x9d\x74\xb2\xf8\x20\x35\x81\xda\xe8\x1f\x8a\xd8\x7f\x3c\x5a\x45\x31\x4f\x6b\x28\xd2\x6b\x35\xfb\xcf\xd8\xef\x8a\x01\x23\x5f\xbd\x7c\x3a\x50\xfc\x0c\x68\x9d\x38\xa4\x25\xa2\x7d\xa9\x05\x98\x1c\x32\x50\xe0\x2d\xb3\x48\xa7\x05\xff\x69\xca\xb9\x65\xb6\xf9\x18\x86\xd0\xf7\x67\xe7\xff\x0d\x86\x90\x9d\xfc\x3f\x9b\x21\xc4\x99\x30\xa7\x70\xcf\xb0\xaf\xca\x09\x9f\x6d\x65\x0f\x38\x3f\x13\x85\xdf\x06\x6c\x5f\xa8\x4d\xab\xa1\x5f\x82\xeb\x34\x00\xd5\x8d\xec\xc6\xed\xa0\xca\x5d\xf4\x8e\x15\x64\xdf\x17\x72\x4e\x0b\x2b\xcb\x2e\xea\xf8\x0b\xd0\x6e\xf8\xae\x21\x9a\xa1\xe1\x43\x01\x9f\x1d\x42\x6d\x2e\x19\xdb\x94\xc9\x9b\x21\x6b\xc7\x07\x35\x66\x4c\x50\xb1\x25\x8a\x2d\x31\xe4\x16\xb3\x27\x57\x8c\xfc\x74\x7e\x36\xdc\xf7\xe7\x3f\x30\xb9\xea\x60\x4a\x02\x4b\xa7\x25\x02\xb3\x42\x96\xf9\x64\x29\xe5\xb2\x60\x20\xff\x82\xb0\x0f\x26\x96\x5c\x30\xec\xa3\xb8\x92\x37\x63\x23\xa7\x1e\x5b\xe3\x40\x0c\x72\xb1\xfc\x6c\x09\x78\x78\x37\x98\xe6\x4e\xbb\xbc\x94\x59\x8a\x26\x7b\x1b\x30\xb0\x6c\x00\xba\x43\x12\x0c\x40\x00\xeb\xcd\x23\x1d\x91\x3c\xc8\xd6\x70\xbd\x48\x1c\x75\x0e\xa8\xc8\x2d\x5d\x0e\x09\x45\xa3\x6b\x47\x82\x0c\x41\x81\x0b\x9a\xab\xe9\xda\xa2\x5a\x4d\xb3\x57\x72\x10\xd1\xeb\x64\x38\xde\xf0\x25\x91\x39\x5b\xd1\x6b\x2e\x07\x50\xbf\x49\x56\x24\xbd\xa7\xc6\x67\x88\x37\x47\xe4\x0f\xa7\xeb\x70\x0f\x0d\x18\x08\x84\xfd\x70\xca\x6e\xa0\x8e\xea\x37\xc8\x1b\x92\x31\xb4\xc5\xfb\xdd\x1d\x3d\xab\xe9\x76\x2f\x11\xdf\xf7\x22\x48\x89\x1e\x9d\xfe\x7c\x31\x3a\x22\xa3\xd3\xdf\x4a\xc5\x92\xfd\x6c\xed\x67\xf4\x94\x2a\xf6\x23\x33\xb4\xb0\xcf\x7c\x7f\x76\x6e\xff\xf7\x7a\xc3\xc4\x85\xa1\xd9\x95\x2b\xa8\x3c\xfa\xe9\x62\xb3\x62\x8a\x25\xbd\x9b\x43\xc8\x3c\xb6\x87\x91\xbe\x11\x76\xde\x3d\x63\xaa\x49\xf7\x8c\xfb\x3e\xe1\x5d\xc4\x11\xd5\x52\x7b\xc6\x39\x0c\xf4\x8c\x7a\xf1\xf4\xc7\x01\x8c\xd6\xc3\x91\xfd\xa6\x57\x8f\xd9\x35\x80\x9d\x75\x26\x53\xbc\xdc\x8c\xc8\xb3\x63\x5b\x4d\x5a\xf1\x3b\x28\xdb\xcc\xb3\x0e\x49\x95\xf6\x2e\xbb\x3e\x3f\x13\x72\x2e\xb5\xe6\xf3\x82\x35\xf8\xf7\xf9\x2d\x4a\x99\x11\xf2\xde\x0b\x27\x74\x12\xcc\xd7\xe7\x81\x7e\xd1\xa7\xd9\xc7\xc4\xbf\xf4\xee\xce\xe6\x34\xd9\xc6\x88\xb2\xbb\xb8\xa9\x85\xcc\xd9\xb9\x54\x43\xe2\x7c\xfd\xd0\xb8\x7b\xfa\x95\x1b\x71\x11\x0d\xd3\x22\x77\xbe\x32\x6e\xc1\xfe\x74\x6d\x4c\x3e\x5d\x1b\x07\x40\x3f\x5d\x1b\x7f\xba\x36\xfe\x74\x6d\xfc\xe9\xda\xd8\x7d\xfe\xc7\x5e\x1b\x6f\x14\xbf\x1e\x56\x9c\xd9\x8d\x8c\x6b\xe3\x73\x37\xc0\xeb\xdb\x98\x46\xdc\x47\x0b\x3b\x98\x77\x68\x56\xb2\x7f\xfd\x3c\xa8\x66\xd5\x11\x22\x8c\xaa\xd2\x77\x90\x4a\xee\x9b\xfa\x3a\xa0\xe3\xf2\x1b\xf4\x21\xc2\x67\xb1\x9c\x81\x08\x5c\x2f\xb7\xfe\xbd\xf2\x62\x34\x5e\x40\xbc\x91\x02\xfe\x3d\xe1\x5b\xd1\x07\x8e\xc4\x54\xdd\x9e\x74\xe6\x02\xc8\x45\xd7\x73\x86\x29\x7f\xd1\x94\x68\xa3\x7c\xda\x39\x35\xa8\xeb\xe6\xfa\x5f\x1b\xe9\xf1\xec\x36\x63\x3c\x2f\x04\x3f\x90\xb5\xc7\x4e\xaa\xf3\x7d\xed\xcc\x99\x70\x89\xa7\xf9\x4c\x8a\x8c\x6d\x8c\x9e\xba\x90\x7d\x3d\xae\x67\xea\xbf\x9b\x7e\x66\xa5\x6c\x8f\x55\x8f\x1e\xd2\x46\x34\xf7\x51\xd8\x8d\xb2\x0e\x6b\xae\x1c\xa3\x2e\xf0\x39\x9e\x08\x8c\xe6\xa7\x93\x3d\x5d\xd8\xb9\x67\x8f\x11\xce\x39\x24\xe8\x9a\x4d\xc2\xe8\xe9\x38\xaf\x55\x6f\xc6\x6d\x23\xea\x90\x71\xe2\x3b\x88\xcc\xb7\x77\x08\x42\xb7\x1f\xcc\x17\x01\x3a\x65\x7f\x91\x02\xf3\x51\xdc\x97\xb8\x29\xed\xb7\xc0\x71\x3f\x87\xb8\xc1\x29\x25\xfa\x25\x35\x0a\xfc\xd5\xc5\xfd\x60\x09\xce\x00\x38\xfd\xf9\xe2\x08\x8f\xce\x78\x64\xff\xfe\xec\xbc\x0a\xd2\x4f\xf5\xc6\xbd\x1f\x06\x7a\x0e\xd9\x6e\xf6\x85\xf6\x98\x11\xa5\x9b\x32\x60\x16\xaa\x58\xb2\xae\x5d\x8d\x6f\x76\xf3\xd1\x76\x1f\x36\x0a\xae\xd8\xf9\x88\xcc\xd3\x65\x1f\xed\x8a\xdc\xdc\xbf\x7e\x00\x08\x7c\xfc\xf8\x11\x56\x9d\x2b\x35\xce\xd9\x15\x2c\xe0\xae\xda\x5c\x35\xdd\xb8\xd9\x4b\x45\xcd\xa2\x2d\xf3\xa9\xb9\x9d\x07\x0b\xb5\x6b\x4e\x31\x45\xba\x9a\xee\x04\x89\xe8\x15\xc9\x4c\x40\xd7\x38\xb0\xdc\x3a\xde\x10\x01\xdb\xba\xd0\xed\x26\xc8\x7e\x33\x1d\x22\x0e\x91\x53\x5d\x5d\x4a\x76\xbb\x29\x78\xc6\xe3\x95\x98\xdc\x72\x7c\x31\x40\x20\x0b\x54\xc5\x59\x53\x51\x42\x02\x7a\x1a\xa5\xc9\x4d\xd0\x3a\xba\x0e\xda\x08\xa0\x61\x62\xa4\x0f\xf5\x8e\x07\x3e\x40\xe7\x24\xba\x81\xdc\x55\xe7\x90\xd3\x6a\x02\x11\xc8\x7b\xe9\x1b\x40\x7e\x75\x95\xd5\x2b\x09\xa0\x2a\xeb\x56\xd0\x35\xcf\x80\x46\xb4\x28\x64\x46\x5d\x85\x51\x70\xbe\x59\xd9\xfa\x2d\x59\xc9\x1b\x76\xcd\x14\x34\x35\x71\x12\x2e\xe6\xba\x32\xd4\xf0\x0c\xc0\x7b\x68\x58\xb9\x02\x38\x02\xcd\xce\xea\x58\x51\x6d\x14\x34\xc4\x23\x20\x5d\x1e\xa9\x57\x0d\x6d\x7a\xa1\xd6\xf2\xa9\xaf\xb1\xc2\xf3\xa9\xf2\xa7\x1d\xa6\x4d\x64\x64\x5f\x38\xff\xd8\x6f\xf5\xc8\xaf\x43\xfc\x3b\x49\x03\x38\xee\xc6\x8a\x78\x1d\x13\x26\xa5\x4f\x88\xfd\x1e\x2a\x5b\x98\xce\x8e\x3c\xcd\x56\xa6\x3b\x0f\x74\x24\x02\xbb\x42\x19\x98\x0a\xed\x1e\x48\xa6\xc7\x76\x14\x6d\x48\xa5\xc7\xea\x61\xed\x91\x75\xd0\x18\x99\xb6\x1a\xea\x1e\x59\xcb\xc2\x9d\xce\x70\x22\x47\x61\xc7\xbd\x0e\xaa\xb4\x6b\x72\x64\x52\xa1\xc2\x09\x33\x84\x3b\x0e\xc9\x97\x55\xc1\x8c\x0e\xa0\xbe\xcf\xa2\x05\x01\x50\x5d\xb1\x36\xc8\x6f\xad\xdb\x24\xda\x71\x3b\x69\xb1\x7b\xa5\xe2\x98\x42\x0f\xeb\x61\x58\x0f\xf4\x84\x6d\xb6\x2a\x6c\x77\xa4\xc3\xae\x49\x1d\x4b\xe3\xc2\xf5\xf7\xdb\x37\x05\x27\xd9\x06\xec\xa3\xb4\x00\xbb\x6b\xfb\xaf\x44\x11\xc5\x0f\xdf\xfa\xeb\x23\xb4\xfd\x4a\xb6\xfc\x1a\x78\x08\x8f\xf6\x56\xed\x6f\xf3\x75\xf7\x16\x5f\xbe\x9b\x56\x5a\x5b\x0c\x6f\xef\xd5\x22\x71\x04\xec\xe0\xd6\x5e\x1d\x0d\xbc\x62\xa6\xcd\x1e\x7d\xbd\xe2\xdd\xbb\x62\x26\xce\x3e\x3d\xbd\x52\xca\xb1\xb7\x97\xd7\x80\x3e\x5e\x03\x7a\x78\x0d\xe8\xdf\x75\xb7\x12\x21\x9d\x3f\xec\x7c\x89\x7a\x2a\x28\x94\xa1\x8d\x54\x74\x19\x96\xe4\xd0\xe5\xbc\xaa\x5a\xe0\x91\xa5\x33\x1a\x0a\x55\x50\x33\xbe\x25\xff\x39\x35\xab\x93\xaa\xb0\x81\xd7\x3d\xd5\x58\xcb\xd9\x5e\x9a\xfb\xa1\x61\x41\x83\x7a\x20\x00\x68\x0f\x4d\x97\x4b\x70\x19\xaa\xe4\x3f\xff\xeb\x0f\xff\x5f\x00\x00\x00\xff\xff\xa1\x09\xc8\xa2\x02\x64\x01\x00") +var _manifests00CustomResourceDefinitionYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xbd\xff\x7f\x1b\xb7\x95\x00\xf8\x7b\xff\x0a\x1c\xb3\x39\x4b\xa9\x48\x5a\xb6\x9c\x38\xea\x76\x7b\xb2\xec\x24\xfe\xac\x63\xeb\x2c\x35\x69\x1b\xa6\x3e\x70\x06\x24\x51\x0d\x01\x16\xc0\x48\x62\x9a\xde\xdf\x7e\x1f\xbc\x07\xcc\x60\x86\x03\xcc\x50\xb6\xb7\xbb\x7b\xe6\x0f\x89\x4c\x62\xde\x00\x0f\x0f\xef\x1b\xde\x17\xba\xe1\x3f\x30\xa5\xb9\x14\xa7\x84\x6e\x38\xbb\x33\x4c\xd8\x7f\xe9\xc9\xf5\x53\x3d\xe1\x72\x7a\x73\xfc\x9b\x6b\x2e\xf2\x53\x72\x5e\x6a\x23\xd7\x6f\x99\x96\xa5\xca\xd8\x73\xb6\xe0\x82\x1b\x2e\xc5\x6f\xd6\xcc\xd0\x9c\x1a\x7a\xfa\x1b\x42\xa8\x10\xd2\x50\xfb\xb5\xb6\xff\x24\x16\xe6\x98\x6e\x36\x4a\xde\xb0\x7c\x22\x37\x4c\xe8\x15\x5f\x98\x09\x97\xa7\x64\x65\xcc\x46\x9f\x4e\xa7\x4b\x6e\x56\xe5\x7c\x92\xc9\xf5\xb4\x1a\x30\xa5\x1b\x3e\xdd\x94\x45\x31\xfd\xf2\xf8\x4b\x00\xc4\x45\x56\x94\x39\x9b\x28\x56\x30\xaa\x59\x03\xd6\x94\xcf\xd7\xe3\xac\x90\x65\x3e\x5e\x53\x41\x97\x2c\x3f\x25\x23\xa3\x4a\x36\xea\x7f\x54\xb3\x62\xe1\x9f\x1a\xaf\xf8\x72\x35\xa6\x37\x94\x17\x74\xce\x0b\x6e\xb6\x7b\xc0\xe1\x62\x59\xb0\xb1\x90\x39\x1b\xe7\xec\x86\x15\x72\xc3\x54\xf0\xb8\xa0\x6b\x76\x4a\xb8\x58\x2a\xa6\x75\x26\x85\x51\xb2\x28\x98\xd2\x16\x8a\xa2\x46\xaa\x06\xb8\xdf\xe8\x0d\xcb\x2c\x06\x97\x4a\x96\x9b\x53\xd2\x3d\x08\xa1\x3a\x4c\xe3\x2e\xbd\xc4\x17\x9c\x57\x2f\x80\xdf\x0a\xae\xcd\x7f\x76\xff\xfe\x8a\x6b\x03\x63\x36\x45\xa9\x68\xd1\x35\x45\xf8\xd9\xae\xaf\x2c\xa8\xea\x18\xf0\x1b\x42\x74\x26\x37\xec\x94\xbc\xb6\xd3\xd9\xd0\x8c\xe5\xbf\x21\xe4\x06\xe9\x0a\xa6\x37\x76\xeb\xbf\x39\x46\x60\xd9\x8a\xad\x29\xce\x9b\xd8\xc5\x89\xb3\x8b\x97\x3f\x3c\xbe\x6c\x7c\x4d\x48\xce\x74\xa6\xf8\xc6\x00\x75\x8e\x76\xa6\xee\x7e\x9f\x33\x4d\x28\x71\x5b\xe8\x67\x47\xea\xe9\x91\x85\x54\x15\x48\x42\xcc\x8a\x91\xac\x28\xb5\x61\x6a\x42\xae\xec\x3f\xea\x91\x19\x15\x44\x33\x75\xc3\x33\x46\xde\x6c\x98\xb8\xb4\x98\x26\x6f\x65\x69\x18\xa1\x22\x27\xff\x59\xce\x99\x12\xcc\x30\xed\x11\x19\x00\x56\xee\x68\xe8\x09\x99\x09\xf2\xe3\x8a\x09\x42\xc5\x2e\xc2\x09\xd7\x24\x53\x8c\x1a\x96\x1f\x11\x4a\x04\xbb\xed\x98\x73\x00\x36\x67\x9b\x42\x6e\xd7\x4c\x98\xe0\x49\x62\x24\xa1\x45\x21\x6f\x89\x3d\xb1\x4a\xd0\x82\x18\x45\x17\x0b\x9e\xd9\x5f\x14\xa3\xd9\x0a\x16\xea\x16\xa3\x89\x59\x51\x13\x00\x65\x77\x1b\xa9\x99\x9f\x1c\x91\xca\x2d\x32\x58\xc3\x1f\x37\x39\x35\x5c\x2c\x89\x59\x71\x5d\xfd\x40\xd6\x74\x4b\x0a\x46\x61\x06\x39\xd7\xaa\x84\xed\x09\x40\x2f\xa4\x22\x9b\x72\x5e\xf0\x8c\x2c\x68\x66\x01\x08\x66\x6e\xa5\xba\xb6\xcb\x13\x2c\x03\xe6\x40\xa8\x8e\xae\x9d\x28\x76\xc3\x75\x13\xa8\x7d\xeb\x9c\x11\x18\x90\x13\x59\x1a\xc0\xb1\xe7\x20\xd7\xd5\xb6\xd8\x93\x98\xcb\x4c\x4f\x33\x29\x32\xb6\x31\x7a\xea\x31\x30\x76\xb3\xe0\x62\x39\x75\xef\x1c\xb7\x69\x1c\x3f\x6e\xf3\xd8\x0d\x53\x64\x23\xb5\xe6\xf3\x82\x1d\x11\x6d\xf9\xe2\xbc\x60\x24\x67\x0b\x5a\x16\x46\xc3\x42\x2d\x8e\x37\x05\x35\x0b\xa9\xd6\x84\x2a\x46\x4a\xcd\xf2\x09\xb9\x64\x8c\xd8\x3d\x08\xd1\xc2\x59\x91\xc3\x33\x6b\xa9\x2c\x14\x43\x79\x81\xa4\x72\x2e\xd7\x1b\x6a\x38\x72\x1c\x52\x58\xe6\x41\x8e\x4f\xc9\xa5\xa1\xf6\x7d\xb7\xdc\xac\xb8\x00\x0a\xff\x5b\x83\x90\x1d\x1b\x02\x98\x94\xac\xb9\xe0\xeb\x72\x4d\xe4\x82\x1c\x3f\x22\x6b\x29\xcc\x0a\xf6\xf5\xb1\xfd\x45\x2a\x3f\x5a\x93\x83\xdb\x15\xcf\x56\xb0\x3a\xae\x49\x21\xc5\x92\xa9\xc3\xc9\xa8\x02\xbc\x51\x96\xd9\x18\xee\xf9\x0a\x7e\x02\x39\x11\x7c\xdb\x3a\xa2\x0f\xec\x29\xc6\x51\x16\x4d\x5c\x00\xe1\x31\xcf\x09\x58\xee\x0e\xbe\x9d\xa4\xa3\xaa\x8d\x62\x9a\x09\x14\x19\x0d\xc0\xc4\x0e\xa2\x82\xc8\xf9\xdf\x58\x66\x2c\x4a\x95\x05\x43\xf4\x4a\x96\x45\x6e\xc9\xe5\x86\x29\x43\x14\xcb\xe4\x52\xf0\x5f\x2a\xd8\xda\x12\xa6\x7d\x69\x41\x0d\xd3\xa6\x05\x93\x0b\x77\x5a\x6e\x68\x51\xb2\x23\x38\xd4\x96\xb6\x14\xb3\x6f\x21\xa5\x08\xe0\xc1\x10\x3d\x21\xdf\xdb\xfd\xe2\x62\xd1\x14\x59\x5e\x3e\x66\x72\xbd\x2e\x05\x37\xdb\x29\x50\x13\x9f\x97\x46\x2a\x3d\x05\x11\x30\xd5\x7c\x39\xa6\x2a\x5b\x71\xc3\x32\x53\x2a\x66\xc5\xda\x18\xa6\x2e\xe0\x18\x4c\xd6\xf9\x67\xd5\x91\x7b\xd0\x98\xab\xd9\x5a\x86\xaa\x8d\xe2\x62\x19\xfc\x00\x5c\x3e\xb1\x03\x96\xcb\xdb\x7d\xa5\xee\x51\x5c\x45\x8d\x68\x3c\xd0\x8c\xbc\x7d\x71\x79\x55\x1f\x6a\xbb\x19\x6d\xec\x03\xde\xeb\x07\x75\xbd\x05\x16\x61\x5c\x2c\x98\xc2\x4d\x5c\x28\xb9\x06\x98\x4c\xe4\x1b\xc9\x85\x71\x2c\x96\x33\xd1\x46\xbf\x2e\xe7\x6b\x6e\xec\xbe\xff\xbd\x64\xda\xd8\xbd\x9a\x90\x73\x50\x1a\xec\xf1\x2e\x2d\xcb\xb1\xe7\xe7\xa5\x20\xe7\x74\xcd\x8a\x73\x2b\x66\x3f\xf6\x06\x58\x4c\xeb\xb1\x45\xec\xb0\x2d\x08\xf5\x9d\xf6\x60\xc4\x5a\xf0\x83\x97\xe3\x91\xfd\xb2\x3f\xdb\xed\x02\x5e\xbd\x61\x19\x5f\xf0\x0c\xce\x02\x1e\x11\xcb\x27\x34\x57\x2c\x27\x73\xb6\xa2\x37\x5c\x2a\xf7\x7d\x0b\xaf\x3b\x42\x66\xd2\x18\xd0\x7d\xa4\xed\x07\x77\xe9\xea\xd5\x65\xfb\x87\xd6\x3c\xab\x71\x7e\x96\x4c\x13\xcd\x8c\x25\x27\x64\x84\x6e\x47\x2d\x79\xd9\x33\x75\xc3\x14\x5f\x6c\x9b\x78\x6b\xbe\x93\x64\x76\x46\xb0\x5c\xa6\x8f\x08\x30\x25\x90\xbf\x73\x64\xa2\xf6\x18\x33\x01\x1c\x70\x5d\x9a\x92\x16\xc4\xbe\xbd\x29\xca\xfd\x87\xe5\x4b\x36\x36\x4c\xad\xb9\x00\xe1\x68\x67\xa0\x18\x13\x99\xda\x6e\x0c\x51\x56\xb2\xe9\xc9\xce\x73\x71\xac\xd8\x0f\x48\x57\x96\x5f\x96\xb0\xa3\x17\xd4\x58\xc6\xd1\x39\xb2\x85\xaa\xee\x07\x03\xbc\x51\x50\xc7\xec\x46\x2a\x06\x2a\x55\x27\x4c\x90\xd1\x76\x53\x41\x5c\x5a\xf1\xed\x59\xdf\xdc\x0a\x61\x93\xad\xec\x42\x97\x94\x0b\x8d\x07\x2e\xe7\x80\xfe\x92\xeb\x15\xa8\x5f\x5d\x1f\xab\x84\x11\x69\x85\xc9\x0d\x2d\x78\xde\xb1\x15\x16\xed\x0b\x5e\x18\x56\x6d\xa9\x9e\x10\xab\x26\x45\x20\xba\x25\x34\x26\xbb\x2e\xb5\xb1\x7b\x48\x2e\xce\xdf\xbe\x20\x7a\x2b\x0c\xbd\x9b\x10\xf2\xd2\x31\x7d\x58\x3d\xd7\x84\xad\x37\x66\x7b\x14\x9b\xa9\x9f\x86\xa5\x28\xae\xc9\x86\x29\x2b\x63\x2d\x73\x40\x40\xac\x82\x23\xa4\x40\x50\xf6\x5b\x41\x68\x9b\xe9\xf8\x8f\x95\x7f\x86\x48\xc1\xc8\x06\x37\x05\x27\x0a\xb8\x24\xb4\x03\x17\x0f\xf4\x1e\x48\x55\x84\x15\x9a\xc1\xc4\x3a\x55\x1a\x4b\x0c\x78\xce\x83\x37\x44\x20\x5a\xfa\xcd\x99\xe0\x4e\x7c\xd6\x7a\xd3\x2e\x11\xdb\x0f\x37\x6c\x1d\xa1\xcc\x28\x13\x6b\x0f\xa0\x4a\xd1\x6d\xe7\xef\x77\xe3\x5a\xbd\x1a\x5b\x9c\x8f\xdd\x13\x46\xae\x79\x16\x3d\xdf\xe7\x67\x03\xce\x8a\x1f\xda\x38\x1d\x99\x14\x0b\xbe\x5c\xd3\x0d\x20\x90\x72\xe1\x65\xd6\xc5\x8b\xef\xc7\x4c\x64\x32\x8f\x6e\xc5\xf9\x59\x83\x96\xe7\xa5\xc8\x0b\xd6\x3e\x3c\x9e\xbb\x20\x8f\xaa\x76\xfe\x41\x5b\x02\x56\xeb\xa9\x21\xe2\x51\x20\x34\xb7\xda\x96\x36\x60\x92\x21\x15\xa1\x5e\x8e\x04\x5e\x2f\x80\xb7\x75\x9a\x0a\xe9\x2b\x46\x2a\x53\x6e\x8c\x0f\xa0\x31\x67\xad\xa7\xee\x6d\x4e\xf3\x2b\xe2\x28\x31\xf6\x5b\x0b\xf3\x40\xb4\x4e\xf0\x78\x81\x36\x41\x4a\xc6\xd3\xa5\xd8\x82\x29\x26\x32\x96\xbb\xf5\x44\xe1\x5a\xed\x7c\x13\xfd\xb5\x97\xfe\x2c\x9b\xb1\x22\xae\x7b\xe2\x68\x38\x26\x08\x77\x47\xe2\xfa\x8f\xa3\xad\x7a\xf3\x2e\x64\xc1\xb3\xed\x00\xaa\x1c\x45\x1e\x0d\xa8\xf4\x76\xc5\xcc\x8a\xa9\xf0\xb8\xc7\xa8\x27\x64\x02\xb0\x52\xed\xa6\x06\x9a\xea\x46\xc9\x1b\x9e\x37\x98\x02\x32\x5c\xab\x58\x59\x93\x21\xc6\x22\x32\x30\x6f\x50\xbd\x06\x45\x95\xcc\x46\x6f\x1d\x2a\x67\x23\xcb\x8f\x66\xa3\x37\xb0\x20\x5a\xcc\x46\x60\x67\xbc\x96\x86\xb5\x0d\xc1\x06\x42\xbb\x79\x97\x14\xc5\x96\x64\x2b\x96\x5d\xeb\x2e\xd9\x0d\xe2\xbf\x25\x81\x13\x7c\xad\x96\xcb\x56\x9e\xa3\x6c\xfe\x1d\xe1\xc6\xca\x7e\xab\x06\xc2\x8b\x76\x5f\x90\x15\x8c\x2a\xc3\xee\x62\x73\xff\xee\xea\xea\xc2\x2e\x7a\x43\xb5\x36\x2b\x25\xcb\xe5\x2a\x78\x41\x60\xde\x84\x1f\x26\xca\x75\x8c\xec\x46\xdd\x8f\x8c\x89\x47\x72\xe4\x67\x8f\xf3\x04\xc9\x46\x0e\x43\xfc\x20\x8c\x2b\x46\x19\xff\xa9\x4d\xac\x3b\x23\x13\xc7\xc5\x99\xb4\x01\x8c\x1e\x75\x70\xb4\xfb\x04\x1a\x1d\x15\xd7\x00\x07\x05\xd1\x2c\x53\xcc\x04\x7c\xbc\x03\x2b\xa8\xe5\x02\xb8\x06\xf3\xb6\x86\xbb\x55\x7c\xb7\x11\xba\x9c\xa0\x8b\x05\xfc\x16\x5d\x87\x2f\x97\xe2\x81\x71\x47\x16\x60\x70\x45\xe4\xad\x08\xdf\x71\xd4\xb1\x72\xbb\x0e\x30\xe2\x3b\x40\xce\x04\xb0\x7f\xb7\x2a\xe4\xfb\xb8\x34\x98\xe3\x42\x5a\xd5\xcf\x4a\xab\x6b\xb6\xd5\x28\xc5\xad\x9d\x60\x9f\x23\xc4\x14\x7a\x92\x29\xd3\x45\x6f\xe1\xb2\x17\xbc\x40\x97\x14\x70\x08\x7c\xec\x9a\x6d\x4f\x2d\xcc\xd6\x8f\x33\x61\x55\xa1\x52\x68\x66\xba\xd4\x28\x4a\x6e\x79\x91\x67\x54\xe5\x8d\x17\xd8\x7d\x2a\x8d\x5c\x53\xc3\x33\x5a\x14\x5b\xb2\x64\x82\xa9\x4a\x6f\x46\x07\x46\xb7\xc2\xd7\x02\x83\x3a\xa4\x77\x83\x74\x70\x8e\x5c\xae\x2d\x6e\x0e\x2c\x5c\x5d\xce\xf1\x9f\xfa\xb0\x6b\xae\x22\x07\x20\xf5\x5c\x9a\x7a\xd8\xf9\x99\x5d\x4c\x61\xc5\x77\x73\xf2\xd6\xa0\x5f\xaa\x08\xcf\xb9\xe5\x66\x15\xfa\xfb\x1e\x68\x62\x94\xdd\x35\x6d\xa4\x62\x13\x87\xc0\x28\x9e\x3a\x20\x82\xee\x00\xcb\x59\x51\x85\xd4\xb9\x2e\x0b\xc3\x37\x05\x03\x16\x34\x7d\xe4\x8c\x96\xdc\x71\x1e\xe7\x6e\x21\x7c\xbd\x29\x78\x27\x9d\x9e\xbd\xba\x78\x7d\x88\xaa\xab\x97\x0b\x07\x7c\xc2\x26\x47\x44\x48\x43\xe7\xc5\x96\xcc\x95\xbc\xd5\x4c\xe9\x43\xf0\x30\x51\x43\x0a\x3e\x67\xca\x6c\xd1\xd5\x57\xea\xae\xad\xb2\xca\x45\xe8\x70\x9b\xa0\x48\x59\x33\x2a\x74\xa0\xec\x52\x81\x10\x50\xed\x72\x83\x89\x91\x9d\x5b\x24\x41\xe4\xa9\xca\x1b\x0a\x7a\x15\x28\xf4\xd7\xac\x80\xd9\x2c\x28\x2f\xdc\x9b\x9c\xc1\x5a\x82\x97\x09\xb7\xb5\x28\xba\x34\xcc\x6b\x61\x4f\x25\xd5\xe1\x04\x32\x49\x0b\xa6\x33\x2e\x96\x13\x7f\xe8\xb8\x18\xdb\x79\x86\x3b\x74\xe0\xa4\x70\x07\xd0\x9a\x8e\xa4\xb2\x9b\xa6\xc6\x5e\x76\xe7\x87\xbd\x94\x04\x74\xd3\x01\xb4\xf2\x08\x3f\xd0\x64\x5e\xf2\xc2\x8c\xb9\x20\x6f\xce\x4a\xb3\x42\x66\xa5\x3a\xc4\x4c\x5a\x69\x8b\x2b\x6c\x4d\xaf\xce\xeb\x5d\xc5\xcc\x74\xfa\x45\x52\x9e\x50\x79\xc3\xd4\x0d\x67\xb7\x53\xe7\x06\x1d\xdb\x65\x8e\x51\x20\xe8\x29\x28\x9f\xd3\xcf\xe0\x7f\x11\x19\x7b\xf5\xe6\xf9\x9b\x53\x72\x96\xe7\x04\x29\xa1\xd4\x6c\x51\x16\xa8\xa6\xe8\x49\xe0\x22\x3c\x02\x37\xd5\x11\x29\x79\xfe\x87\x07\xfb\x0b\xc2\x94\xa4\x02\x16\xd2\x2b\x9d\x90\xef\x80\x44\x7a\xfe\xfa\x12\x95\x5d\xe7\x08\x4e\x48\x94\x08\x5f\x72\x02\xc1\xd2\x37\x6a\xc2\xa5\x62\xf5\xa9\x5f\x30\x6a\x4a\xc5\x34\x70\xf9\x2f\xc8\x37\x8e\x1b\xbe\x92\x34\x7f\x46\x0b\x2a\x32\xa6\x2e\xf1\xcd\x5d\x8e\x0b\xef\x31\x03\x87\xb9\x5e\x59\xd9\x01\x66\x05\x5b\x6e\x8f\x48\xbd\x0c\xd2\x31\x83\xe7\xaf\x2f\x3b\x20\x2a\x96\x49\x95\x6b\xe7\x82\x76\xe0\x2f\x2a\xe8\x97\x0e\xf8\x04\x67\x0b\x12\xb4\xd4\xe0\xb5\xa9\xcf\x4c\x97\x30\xdd\x15\xd0\x47\x6d\x63\xb6\x3a\x57\x28\x17\xf0\x26\x00\xd7\x10\xc3\xac\xd1\x81\x5c\xc0\x39\xef\x4a\x64\x37\xd7\x2b\xaf\xe9\x82\x3b\x00\x57\xd4\x39\x57\x23\x09\x17\x39\xbf\xe1\x79\x49\x0b\x77\xaf\xa1\x0d\x35\xa5\x66\x9a\x68\x89\x6c\x8b\x89\xdc\xf2\x13\xa5\x1d\x07\xb2\x1a\xbd\x02\xad\xc5\x50\xb5\x64\x5d\x6a\x66\x75\xd3\x62\x49\xaa\xc2\xf3\x4c\xf8\x7d\x02\x7d\xc0\xda\x97\x82\xff\xbd\x64\x84\xae\xa5\x45\x6b\x51\xec\x7a\xe7\x74\xa7\xb8\x16\xb9\xd7\x81\x43\x57\x28\xca\x28\xe7\xe0\xa8\xee\x1e\x60\x8d\x00\x75\x82\x14\xd1\xbc\x8c\x74\xf2\xae\xe3\x2d\x13\xcb\x07\x27\x38\xe3\x0e\x76\x95\x38\x97\x71\x62\xea\x3b\x8e\xf1\x27\xc3\xb3\xe5\xb6\xb4\xc7\xa4\x0a\xb4\x0b\x0f\x16\xb0\x81\x1c\xc9\xdd\xf1\xe8\x23\xef\x3a\x2c\x24\xcd\xc9\xdc\x9d\xc3\x8a\xc5\x5b\x81\xd8\xb5\x05\xcc\x64\x93\x86\x56\xd5\x50\x4e\xad\x50\xa3\x76\xb2\x52\x58\xae\xab\xa8\x36\xaa\x04\xaf\xf2\xde\x7b\x00\xd4\x38\xf1\x17\x47\x4e\x41\x3c\xfb\xf1\xf2\xb4\x1e\xd4\xc1\x40\xc8\x01\x28\x34\x2f\x1c\x1d\x76\x40\x86\x6b\xd8\x43\x0b\xeb\x97\x52\xb1\xd3\xe1\xb0\xdc\x93\x5d\x3a\x2f\x21\xdf\x9e\x5f\xec\x35\xb1\x7a\x1a\x2f\x9f\x7d\x7f\x5e\xc8\xb2\xd3\xb6\x9f\xed\x0b\xec\xac\xe0\x73\x3a\xa7\x08\x70\x20\x83\x8d\xc1\x7a\xc5\xe7\x37\x5c\x19\xb7\xac\xef\xa4\x36\xaf\xdd\x25\xe5\x4c\x90\x33\xb1\x75\xf4\xe4\x77\xa8\x8b\xcb\x6c\x37\x56\xbb\xc3\x98\x00\xcb\x40\x5f\x4b\xc1\x0e\x2b\x62\x31\x32\x84\x0a\x54\x15\x3f\x06\x5d\x74\xbe\xc3\x09\xf6\xd5\x2d\x56\xf5\xeb\x07\xa8\x18\xc1\x68\xb2\x92\x45\xae\xc9\x86\x2a\xba\x66\xc6\xb2\x48\xaf\xe2\x07\x2b\x8a\xf9\xac\x13\x02\x6d\x42\x2e\xf0\x2a\x09\x3d\x0a\x7c\x01\x38\xb4\xc7\x2a\xc4\xd4\x3d\x5d\x5f\x56\x01\xba\x90\xdd\xe6\x95\x5f\x2e\xec\xcc\x29\x79\xfa\x70\x98\x8b\xcc\x83\xf4\x6e\xb2\x8d\xfd\x5b\xa2\xa9\x67\xd1\xe5\xee\x2c\xd0\xb1\x98\xf0\x8e\x05\x2e\xc7\x82\x6b\xc3\x04\xe0\x13\x1c\x16\xb5\x8b\xbd\x76\xf8\xd4\x9e\xca\x04\x4c\xcd\xec\xeb\x99\xc0\x49\x3d\x7d\x08\xba\x4e\xa1\x18\xcd\xad\x22\x6b\xdf\x37\x09\x04\xa6\x83\x28\x64\xcc\x7b\x42\x80\xb3\x72\x91\xf1\x9c\xd5\x56\xd3\x6b\x99\x33\x40\x80\xa2\x62\x59\xe9\x9f\x55\xec\x04\x28\x0f\x69\x98\x95\xd2\x5d\xfb\xa9\x1a\x32\xec\xe9\xc3\xee\xfd\x26\xa8\x3d\xac\xa9\x39\xb5\x4c\xfb\xf1\xa3\xe8\xa8\x35\xbd\xe3\xeb\x72\x7d\x4a\xbe\x7c\xf2\xe4\xf1\x93\xf8\x30\xbc\x1d\x3f\x25\xc7\x3d\x8e\x4a\x10\x11\x9d\x5c\x9b\x38\x2a\xd3\x03\xc9\xec\xe4\xe4\xf1\x70\x3a\xd3\x1f\x97\xd0\x2e\x53\x94\x96\x86\xd8\xa4\xb4\x93\x93\xc7\x43\x48\x2d\x01\x12\x7c\x7c\xf7\x21\xb5\x1e\x98\x7d\xa4\x76\x72\xf2\xf8\x7f\x16\xad\x6d\x94\x34\x32\x93\xc5\x40\x87\xfe\xc8\x8f\x8f\x78\xa9\xbb\x03\xc3\xba\x3f\xec\x6e\x03\xb7\x55\x5c\x64\x72\x6d\x59\x78\x18\xb9\x63\x24\xdc\xed\x6d\x0a\xab\xf3\x5e\x9d\x83\xd7\x35\x6e\x88\x57\x2b\xee\x9a\x42\xf5\xa2\x8b\xb7\x6f\xfe\xf4\xe7\x6a\xc5\x20\x2d\x9b\x5f\xa5\xf8\x56\x70\x67\x0c\xf4\xd4\xd0\xf9\xfc\xd5\x69\xb9\x01\xfa\xe5\x06\x8d\x28\x08\x19\x48\xdc\xc0\xf9\x29\xbb\xf8\x08\x9a\xe7\x76\xe6\x4c\x5b\xca\xf4\xbe\x93\x00\x27\x70\x40\x16\x52\xdd\x52\x95\xc7\xee\x39\x3c\x50\xa9\x59\x1b\x9f\x9d\xc8\x99\x10\xf2\x47\xb0\xcc\x00\x13\x09\x90\xd5\xc6\xa3\xce\xab\x23\xb8\x06\x67\x11\x20\x01\xe7\x80\x4b\x4b\xc0\xad\x17\xcd\x85\x36\x8c\xe6\x78\x65\x6d\x41\xf8\x1b\xb9\x06\xaa\x1f\x68\xff\x48\x02\x28\x17\x28\xf9\x56\x8c\xe6\x76\x77\xac\xd5\x53\xc8\xa5\x9e\x90\xfa\x7a\x02\x17\x52\x2d\x7d\x08\x11\x38\x6e\xb9\xbb\x6a\xb0\x49\x33\x5a\xee\x22\x7d\x41\x79\x0a\x22\x5f\x90\xad\x2c\xc1\xe3\x66\xd9\x8b\x37\x92\x9b\x06\x05\xcc\x16\xec\xca\xc1\x33\xb5\x6f\x46\x42\x19\x4a\x05\xd6\x2a\xb6\x32\xe2\x74\x3a\xbd\xbd\xbd\x9d\xac\xe8\x46\xc9\xbb\xed\x44\xaa\xe5\x34\x97\xb7\xc2\xce\x68\xfa\x68\xf2\x68\x9a\xcb\x6c\x0a\x3f\x8d\xab\x83\x64\xa2\xf7\x25\xc4\xd9\xe7\x5c\x20\xef\xe3\x52\x10\x3a\x97\xa5\xe9\x3a\x8b\x57\x0d\xe7\x36\x5e\x37\xa5\x08\x47\xb1\x86\x6f\xd8\xcb\x9b\xd3\xda\x90\x07\x7b\xd6\xd9\x99\x13\xf2\x05\x99\x8d\xae\xce\x2f\x66\xa3\x38\x87\x26\x30\x08\xe6\xe6\x6e\xb3\xda\x60\x02\xbe\x17\x58\x6d\xb1\xf8\x02\xfb\x71\x9e\x59\x0d\x5c\xcc\xb2\x8e\xae\xe5\x07\x64\x19\x80\x4d\x11\x0e\xf8\x34\x20\x78\xca\xb2\x9a\x95\x15\x68\x91\xfb\x27\x92\xbc\x83\x22\xa9\x7b\x28\xfc\xf1\xea\xfc\x22\xf1\x6b\x9a\x71\xf4\x5e\xcc\x12\xf0\x9a\x0c\x55\x77\x8e\xbf\x7e\xfc\xe5\x30\x49\x55\x41\x4d\xe8\x3b\xe0\x8b\x59\xa1\xdf\x26\x45\x6a\x55\xec\x19\x78\xa5\x15\x90\x5e\xe5\x1b\xea\x56\x81\x2b\xed\x23\x45\x19\x03\xf5\x92\x97\x10\xa2\x58\x39\x86\xec\x51\x4c\x69\x53\x95\x1f\x42\xd7\x7e\xbc\x3c\xc9\x11\xb8\xde\x65\x09\x29\x92\xde\x61\xca\x7e\xe1\xa5\x76\xc1\x09\x80\x6c\x7b\x2e\x57\x8c\x16\x66\xe5\x2e\x76\x53\x07\xef\x6a\x07\x26\x86\x49\x8b\x1c\x59\xf9\x46\xc9\xb9\x95\x8c\x22\x78\x81\xe5\x26\x09\x90\x4b\x7e\x03\xb6\x43\xce\x8e\x6a\x5c\x6f\xa8\x59\x91\x29\x4e\xeb\x97\x29\x2a\x98\x46\x92\x9c\xe1\xbd\x32\x23\x7c\xd1\xb3\xf2\x8e\x2b\x28\x08\x32\x75\x90\x14\xcb\x18\xbf\x61\x55\xa4\x34\x92\x5c\x52\xaf\xcc\xd9\x04\x5c\xba\x68\x80\xba\x30\x7c\xee\x68\xb5\x89\x14\x70\xff\x59\xfa\x72\x9b\x99\x9a\x6b\x1d\xaa\x4d\xe1\x25\xa4\x14\x86\x17\x78\x00\x82\x6d\x71\xd2\xd6\x2d\x61\xbf\x3d\x72\xfb\x4e\x0b\x2d\x89\x36\x72\x13\xa8\x27\x75\xf8\x25\xc6\x16\xa7\x58\xb9\x57\x7a\x2d\xe9\x9f\x3c\x21\x9a\x65\x52\xe4\x9a\xd0\x85\x3d\x6d\xad\xcd\xd2\x86\xe2\x64\x9d\x8a\x90\xd6\xd8\xc7\xb8\x28\x72\xa1\xe4\xdc\x4e\x8a\xdd\x30\xb5\x25\x4f\x2c\x52\x8e\x1f\xfa\x17\x39\xf2\xa0\xe4\xc9\x18\xbf\x49\x61\x95\xaf\x99\xe5\xe0\x56\xa5\x70\x4f\x99\x95\x62\x7a\x25\x0b\x50\x5d\xcc\xad\x24\xba\xcc\x32\xa6\xf5\xa2\x2c\xac\xda\x6a\x55\x80\x68\xb0\x02\x71\x57\xf2\x2e\x4e\x95\xcc\x59\x26\xd7\x7e\x7f\xb6\x70\xaf\x24\xfc\x3f\x14\xb3\xc2\xc7\xf0\x1b\x56\x6c\x8f\x2c\x13\x4a\xf1\x16\x56\x14\x63\xc3\xb4\x09\x42\x8c\xbd\x29\x9d\xb4\x62\xd2\x4a\x85\xe5\xbf\xff\x93\x0c\x9c\x9e\xe8\x9d\x22\x70\xef\x0d\x70\x5f\x85\xc3\xe3\xfe\xab\xc6\x09\x89\x21\x2b\xe6\xa4\xea\x70\x38\xde\xd7\x59\xe5\x82\x7e\xd4\x45\x35\xc5\x3d\x8c\xbc\xd6\x93\x6e\xb5\x3e\x4e\xb8\xb1\xc4\xa4\x1e\x5e\xeb\x7d\x3e\xf0\xd8\x2b\xa1\xa5\xc8\x99\x2a\xb6\x10\x79\xd9\xf0\x73\x57\x13\x4f\xb1\xa2\xee\x0b\x8b\xea\xce\x75\xb3\x29\xb8\x4f\x91\xa8\x5e\xbc\xbb\xac\x94\xcc\x87\xfb\x46\xbc\x5d\xc2\x2c\x0a\xa7\xb9\x62\x8c\x87\x7f\x65\x42\xe7\xea\xdf\x21\xfb\xa1\xb7\xc9\x9f\xdb\x5b\x43\x6f\xb5\x5f\x47\x2d\xde\x1d\x82\x7d\xa4\x74\x22\xf8\xaa\x7a\xab\x62\x8d\x0d\x39\xfb\xf1\xb2\x65\xd1\x46\xee\x84\x7a\xe0\xa6\x37\xc0\x4e\x7e\x17\xaf\x7d\x53\xdd\x0f\xeb\x64\x30\xe6\x09\x84\xef\x51\xad\x79\xf6\xaa\x97\x0f\x84\x9f\x56\x70\xe9\x0e\x84\x9d\x63\xd3\xd8\xa7\x5e\xf0\xa4\xcd\x54\xa8\x80\xfd\x71\x6f\x6a\xf1\x17\xcf\x49\x06\x80\x6d\xf3\x9a\x73\x04\x98\x3a\x66\xf8\xe9\xe1\xa2\xe1\xc7\xdd\x8d\xbd\x07\x42\x3b\x20\x7c\x24\x84\xfa\x8c\xb1\x0f\x86\xd0\xd7\xaf\x9e\x7d\x50\x64\xc2\xd0\xfd\xb0\x37\xf2\x73\xb1\x2c\x16\xfe\x96\x8b\xdd\xd3\x3d\x60\x6d\x70\xf7\xab\x0d\x15\x86\x43\xc0\x18\xa2\x6d\x27\x2b\x13\xd8\xc4\x0f\xd6\x0c\x1f\x00\xd3\x85\x90\x52\xc5\x9c\x91\x3e\x1b\x39\x1a\x9c\x8d\x4e\xc9\x99\x27\x48\x10\x82\xe4\xd9\x1e\x73\xb5\x76\xf3\x9a\x5e\x33\x0d\x16\x9a\x15\x29\x39\xcb\x38\xe6\x09\x50\x43\x18\x07\x07\x25\xaa\xef\x46\x51\xa1\xad\xee\x38\x00\x70\x41\xb7\x4c\x91\x83\xab\xf3\x8b\xe9\xe5\xe5\xab\x43\xe2\x24\x3c\x70\x37\x97\x41\xe3\x86\x40\x58\x16\x38\xc1\x0f\xfb\x49\x00\xbd\x2d\xa4\x15\xce\x07\x28\xce\x73\x8e\x91\x9d\x9e\x3b\xba\x6b\x5b\x52\x05\xdf\xe4\x32\xd3\x13\x7a\xab\x27\x74\x4d\x7f\x91\x02\x32\x9a\xcf\xe0\xcf\x17\xe7\x97\x53\xcc\x47\x9b\x56\xb9\xc1\xcb\x92\xe7\x6c\x6a\x37\x7f\xec\x37\x1f\x22\xeb\xf5\x64\x65\xd6\xc5\x67\x59\x31\x1f\x30\x5b\xb7\x57\xaf\x5f\x3d\xc3\x7d\xf2\x57\x78\x8d\x7d\x0a\x76\x61\x00\xc8\xce\x7d\xb2\xf8\x20\xf5\x06\xb5\xd1\x3f\x14\xb1\xff\x7a\xb4\x8a\x62\x9e\x96\x50\xa4\xd7\x27\xe3\x3f\x63\x7f\x2a\x06\x8c\x7c\xfd\xea\xd9\x40\xf6\x93\x74\xc8\xe0\x27\x1d\x31\x5f\xbf\xd4\x02\x4c\x0e\x19\xc8\xf0\x96\xd9\x66\x1f\x45\x68\x99\x6d\x3e\x86\x22\xf4\xed\xf9\xc5\x7f\x81\x22\x64\x27\xff\xdf\x4d\x11\xe2\x4c\x98\x33\xb0\x5b\xf7\x15\x39\xe1\xb3\x41\xb6\xfc\x4a\xde\xfa\x5b\x0c\x0a\xbf\x0d\x38\xbe\x98\x03\x6e\x14\xcf\xac\xe1\x8a\xae\xe3\xc0\xed\xd5\xda\x95\xfb\xc8\x1d\xcb\xc8\xbe\x2d\xe4\x9c\x16\x96\x97\x5d\x62\xe0\x36\x26\x14\x56\xef\x1a\x22\x19\x1a\x6e\x10\x70\x04\x20\xd4\xe6\x92\x7d\x8a\xdf\x90\xb5\xfb\xb8\x5c\xf0\x36\x52\x61\xed\xfd\xa5\x25\x67\x97\x9a\x6d\xb9\xda\x0f\x17\xe7\xc3\x6f\x96\xfc\xa7\x72\x7d\xc1\x9c\x49\xa0\xe9\xb4\x58\x20\xd4\xb6\x98\x2c\xa5\x5c\x16\x0c\xf8\x5f\x90\x92\xc5\xc4\x92\x0b\x86\xb1\x9e\x2b\x79\x3b\x36\x72\xea\xb1\x35\x0e\xd8\x20\x17\xcb\xcf\x96\x80\x87\x77\x83\xf7\xdc\x49\x97\x57\x32\x4b\xed\xc9\xde\x0a\x0c\x2c\x1b\x80\xee\x6c\x09\xc6\x27\x63\xc6\x89\x43\x7a\xaf\x8f\xca\x7f\xc0\x5f\x4c\xd7\xcc\xef\x0e\x84\x9d\xff\x70\x71\x7e\x48\x28\x2a\x5d\x3b\x1c\x64\x08\x0a\xf0\x82\x31\xd8\xd7\xd6\xae\xd5\x7b\xf6\x5a\x0e\xda\x74\x77\x95\xc0\xb5\xd7\x05\x7d\xc8\x90\x4f\xf1\xed\xdf\xfd\xe6\xb6\xe2\xd6\xfb\xdd\xf8\x0c\xf1\xe6\x36\xf9\xc3\xc9\x3a\x3c\x43\x03\x06\xc2\xc6\x7e\x38\x61\x37\x50\x46\xf5\x2b\xe4\x0d\xce\x18\xea\xe2\xfd\xee\x8e\x9e\xd5\x74\xbb\x97\xc8\x19\x26\x02\x07\x3c\x8e\x8c\xce\x7e\xbc\x1c\x1d\x91\x11\x84\xe7\x8d\x52\x8e\x7c\xfb\x19\x3d\xa3\x8a\x7d\xcf\x0c\x2d\xec\x33\xdf\x9e\x5f\xd8\xff\xbd\x2e\x0d\x15\xfc\xce\xfe\x09\xf1\xe8\x86\x66\xd7\x23\x2c\x64\x30\xfa\xe1\x72\xb3\x62\x8a\x25\xaf\xd1\x86\xec\xf8\xd8\xda\x25\x7d\x23\xec\x12\x7a\xc6\x54\xf3\xef\x19\xf7\x6d\xe2\x1a\x0b\x47\xb8\x55\xf7\x8c\xaa\x10\xd2\x33\xce\xe1\xa9\x67\xd4\xcb\x67\xdf\x0f\xa0\xcc\x1e\x12\xee\xd7\xd5\x7a\xf4\xb4\x01\xf4\x8f\xc5\x77\x06\x5e\xbe\xd9\xb1\x10\x1f\x8d\xb9\x75\xc0\x34\xe1\x3b\xea\xe3\x8c\x76\x48\x39\x7d\xd9\x89\xa5\x64\xf2\x09\xb9\x70\xb5\x52\x1a\x04\xef\x43\x2f\x47\x48\xa1\x2f\x1d\x97\x4a\x90\x68\xdf\x85\xe8\xcb\x3e\x55\x60\x9c\x8a\x90\xc5\xcf\x7b\x27\xa5\x02\xca\xee\xe3\xd7\x16\xee\x42\x71\x80\x4f\xdb\x0f\x8d\xfb\xb3\xfd\xed\x64\x3c\xfe\x95\xdc\x3b\x26\xb3\x05\xfb\x3d\x5c\xdd\x9f\xa2\x98\x3e\x45\x31\x7d\x8a\x62\xfa\x14\xc5\xf4\x29\x8a\xe9\x53\x14\x53\xe7\x8f\x1f\x35\x8a\xa9\x47\x1e\x6f\x14\xbf\xe9\xcc\x77\x27\x6d\xd1\xe4\x46\xc6\xa5\xf1\x85\x1b\xe0\xe5\x6d\x4c\x22\xee\x23\x85\x1d\xcc\x6e\x8a\xea\x59\x5a\xdc\x24\x8a\xdf\x4b\x74\x4c\xce\x89\xca\x09\xfa\x74\xd2\xe7\xa6\xbe\x3f\xe8\x4a\xec\xb1\xf2\xd0\xc5\x66\xe9\xe1\x29\x98\x04\xb2\x1f\x91\x6f\x05\xf5\x17\xc3\x17\x10\xaf\xa4\x80\x43\x50\x84\x55\x58\x9c\xe7\x31\x76\x80\x62\x89\xeb\x75\xcd\x45\xe0\x8b\x2e\xcb\xbf\xce\x30\xb3\x07\x3e\x02\xf2\xac\x73\x6a\xad\xda\x8d\x61\xb2\x5b\xfd\xb2\x18\xc8\x99\xb0\xec\x73\x58\x96\x6f\x57\xbd\x43\xf7\xdd\xf4\x33\xcb\x65\x7b\xb4\x7a\x74\xa9\xd6\xa9\xa7\x90\x06\x17\xa4\xc8\xd7\xc9\x8f\x95\x27\xd5\x97\xde\xec\x4c\x1b\xb7\x1f\x54\x3f\x1d\xef\xe9\xc2\xce\x03\x5d\x07\xd1\x09\xba\x66\x93\x30\xc7\x32\x4e\x6b\xd5\x9b\xf1\xd8\x08\x78\xea\x17\x29\x98\x76\xe5\x07\x21\xd9\x37\x17\xfb\x26\x49\x92\x2a\x51\x12\x0b\x59\xfe\x45\x0a\xcc\x78\x77\x5f\xe2\xa1\xb4\xdf\x62\xa9\xcf\x10\x37\x38\xa5\x75\xfc\x9a\xd2\x12\x42\xa9\x14\x13\xa6\xd8\x7a\x8d\xce\x2f\xc1\x29\x00\x67\x3f\x5e\x1e\xa1\x81\x8d\x86\xfd\xb7\xe7\x17\x55\x2a\x5a\x34\x54\x0f\x8e\x5d\x2b\x93\xad\xef\xb8\xd9\x17\x5a\x33\x23\xba\x6f\xca\xe8\x20\x1a\xb3\x3b\xbe\x0e\xe9\xd7\xe7\xaa\x7e\x84\xd3\x07\x31\xa1\x35\x39\x1f\x91\xb9\x2c\xa3\x91\x60\x46\x62\x44\x1d\xce\xfd\xe9\x43\x40\xe0\xc9\xc9\x63\x0c\x06\x2d\xb5\x8f\x09\xd4\x1b\xe9\x0a\x7a\x42\x79\x19\x37\xdd\xb8\xda\xdb\x8e\xf3\x0c\xd4\xa7\xce\xdc\xd5\x7e\xa6\x76\xc3\x29\x3c\x50\x4f\xd7\x65\x3a\x7b\x41\x32\x13\xe4\xb9\x64\x1a\x34\xb7\x61\xd9\xb1\xa4\x91\x21\x9b\xd8\x90\xfd\x66\x3a\x84\x1d\x1e\xf9\x5c\x79\x3b\x5b\x76\xb7\x29\x78\xc6\x4d\x67\xc1\x07\x52\x0b\x41\x1f\xa3\x0b\xdb\xe2\xaa\xb1\x89\x12\xca\x31\xa4\x51\x9a\x3c\x04\x2d\xd3\x75\xd0\x41\x00\x09\x13\xdb\xfa\x50\xee\x54\x11\xc1\xfd\x32\x27\x11\x5d\x7a\x5f\x99\x43\xce\xaa\x09\x44\x20\xef\x25\x6f\xb0\x9c\x8d\xbf\xfb\xea\xe5\x04\x96\xe5\xe6\x5b\x41\xd7\xae\x64\x06\x2d\x0a\x99\xc1\x2b\xe6\xdb\xba\x3a\xc6\xef\xc8\x4a\xde\xb2\x1b\xa6\x8e\xec\x9b\x1d\x87\x8b\xb9\xae\x0c\x35\x3c\xc3\x98\x61\x07\x0d\x92\xb3\x91\x22\x50\xed\xac\xcc\x8a\xea\xa0\x24\x2b\x63\xb9\x18\x6d\x2f\x1a\xda\xfb\x85\x52\x0b\x8a\x9a\xaa\x9b\xce\xb4\x5a\xd2\x53\x19\x6a\x58\xe6\xb1\x1d\xd9\x97\x2f\x3b\xf6\x47\x3d\xf2\xeb\x10\xff\xce\xbd\x4b\x4a\x75\x7a\x1d\x13\x2a\xa5\x55\x3e\xce\xe5\xda\x97\x79\xec\x49\xfa\x6f\x8d\xae\xca\x01\x53\xb2\xc1\x5a\x6a\x55\x0e\xac\x0f\x86\xce\xea\xd1\x5d\xa7\xfb\xd9\xb6\x32\x86\xec\xfe\x2a\x86\xbc\x06\x61\x24\x9f\x4d\x3b\xa9\xd6\x7c\xcd\xae\xb6\x9b\x98\x07\xab\xa9\x2a\x57\x83\xb1\xb6\x88\xaf\xe3\xf9\xfd\xcb\xef\x5f\xb8\x84\xf0\xb0\xd6\xe0\x8a\xde\xc4\xce\x53\x30\xe1\xfa\x22\xfa\xaa\xaa\x8e\xe9\xbc\x41\xee\x62\x9b\x8b\xaa\x3c\x2a\x56\x98\xdc\x83\xa9\x78\xf9\x61\x5f\xb2\x6d\xe0\xc9\x57\x84\x3b\xb5\xff\x8d\x71\xbe\xa2\x08\x17\x37\x67\x82\x2d\xb8\xc1\x7b\xd0\x00\xd6\x11\x99\x97\x86\x7c\x77\x76\x61\xed\x76\x3c\x60\xda\xf0\x22\xe6\x40\x28\x75\x50\x68\x1c\x0e\xb7\xda\x3a\x87\x16\x40\xb4\x06\x10\x17\x78\x0d\x03\xec\x6b\x42\xc8\xb7\x89\x0a\x42\x04\x33\x8d\x29\x8a\x21\xc3\xee\x0c\x39\x58\x99\x75\x71\x44\x32\xad\x8f\xc8\xdf\xf4\x11\x54\x76\x38\x74\x51\xd0\x89\x75\x44\x80\xdb\xd5\xf9\x67\x61\x83\xa1\x0e\x93\xcb\x81\xf5\xcf\xb3\x9c\x1c\xf0\x35\x5d\x5a\x9d\xad\xcc\xb9\x3c\x22\x37\x3c\x67\x32\x06\x13\x67\xe4\x67\x52\x70\x63\x0a\x2b\x98\x08\xbb\x43\xae\x57\x99\x95\x86\xaf\x51\xf9\xcc\x36\xa5\x5d\x66\x54\xb1\x84\xba\x49\x6e\x2d\xd6\x5a\x5a\x52\x2e\x26\x95\xdf\xc5\xda\x0d\x7f\x93\x6c\x25\x85\x91\x62\xb2\x66\x39\x2f\xd7\x70\x99\x68\x56\x6c\xbc\xfc\x85\x6f\xc6\x1b\x26\x68\x61\xb6\xe3\xfc\xf1\xf1\x3c\xff\xf2\xeb\xaf\x16\xc7\xf4\x51\x37\x77\x4c\x16\x1c\x6d\x9e\x98\x80\x07\x58\x42\xb2\x87\xa7\x51\x1a\x1c\xd1\x0a\xb5\xbe\x09\xb6\x50\x88\x9a\xfa\x15\x21\x4e\xc8\x8b\xc9\x72\x42\x66\x23\xbb\xd5\xd3\x4c\xeb\xdf\x59\x49\xa1\x34\x33\xbf\x2f\xcd\x62\xfc\x74\x36\x3a\xf2\x3f\x5a\x32\x98\x25\x2e\xda\xdc\xb0\x2f\xf0\x11\xd8\xbe\xa9\xbe\x59\xfe\xf6\x0e\x1e\x23\xb3\x51\x10\x50\x36\x95\x99\x61\x66\xac\x8d\x62\x74\x9d\x06\xfa\xa7\x71\x06\xdd\x32\xa6\xf8\x3f\x5d\xce\x2d\x34\x5f\x5e\xe4\xaa\x5e\xb7\xe3\x14\xe8\x96\x4a\x66\xa3\x9c\x63\xd5\xb7\x71\x8d\x41\x88\x9b\xb2\x24\xf3\xf6\x9b\x73\x72\xfc\xf8\xe4\xf8\xb4\x39\xe8\xf4\xf7\xe9\x9b\xa5\xd9\x68\x3a\x1b\x11\x5d\xce\xc1\x01\xf0\xc5\x4f\xb3\xd1\xef\x66\xa3\xda\xb1\xf1\x33\x19\xc3\x4c\xd1\x3d\x20\x1a\xb0\xa3\x30\x1d\xe3\xb2\xf6\x92\x5c\x9c\x42\xdd\xf8\x0a\x7f\xd5\xb9\x70\xa7\x64\xcd\xb4\xc6\x3f\xa0\x96\x12\x55\x09\x37\x97\xdd\x25\x7f\xa0\x08\xd4\xbb\x47\xcc\x3a\x91\x65\xe5\x79\xc6\x72\x54\x44\x2c\xfa\x67\x78\xdf\x84\x78\x4d\xa4\x78\xcc\xb7\x84\x12\x23\xaf\x19\x54\x20\xab\x8c\x46\x56\xc8\xdb\x89\x5f\x3f\xfc\x1a\x96\x54\xb7\xc4\x1a\xf7\x9d\xd5\x65\x86\x2d\x51\xd2\xcc\x58\x45\xc8\x4e\x06\x73\xcf\xaa\x8a\xb6\xb7\x2b\x6e\x18\x70\x2e\x9a\xb1\x23\xcf\xb4\xe3\x98\xf5\xc0\x34\x22\x40\x6c\xab\x6c\xb4\xea\x17\xe2\x82\x51\x0c\xa8\x74\xb4\x00\xeb\x3d\xee\x8d\x74\xeb\x0b\x07\xfb\x09\xea\x36\xe4\x83\xc3\x7f\xff\x8f\xff\xeb\xe8\x77\xa7\xb3\xd9\x6c\x34\xfd\xe9\xe7\x3f\x4c\x7e\xdf\x03\xd6\xd3\x55\x8b\x72\xb0\x7a\x80\x96\x1e\xed\x1e\xcb\x32\x55\x36\x93\x84\x81\xc2\x53\x1d\x78\x71\x4d\xf0\x26\xd0\x4e\xdd\x0e\x52\x7d\x0a\xb6\xe0\x75\xa2\x6e\xc0\x6c\xf4\xfb\xd9\x88\x1c\xe0\xfe\x4e\xc9\xdf\x4b\x69\x58\x3e\xc6\x3d\x3e\x74\xd3\x6a\x7c\x79\x14\xd0\x48\x14\xaa\x3b\x8f\x4f\x1f\x3d\x3a\x42\x27\xaa\x52\xd6\x5a\x75\xae\x08\x59\x5a\x8b\x13\xa0\xea\xba\xe6\x93\xc7\x7a\x14\x28\xd2\x0a\x50\x0a\xd9\x14\xa5\x86\xed\xaf\x76\x87\xbc\xf8\xd3\xf9\x8b\x8b\x2b\x32\x9b\x59\x8e\x85\xc4\x76\xfe\x76\x42\x08\x79\x19\x27\x53\xfb\x5a\xd8\x08\x5f\xc1\xd2\x82\x44\x0e\x4c\xce\x2e\xcf\x5f\xbe\x0c\xe0\x53\xec\x13\x61\xff\xcf\xe3\x10\xb9\x26\x4c\x67\x74\xe3\x4e\xe1\x2c\xea\x2e\x76\xe5\xb7\x4f\xc9\x5f\x0f\xfe\xc0\x0f\x0f\xee\xc6\x3f\xfd\xf5\xe7\x9f\xc8\xc1\xe1\x6c\x86\x34\x36\x9a\xfe\x61\xf2\xfb\xd9\xdd\xc3\x87\xe3\xd9\xdd\xf1\x37\xb3\xbb\xaf\xbe\xf9\xf9\xb7\xbf\x06\x8c\xe4\x57\xe0\x23\xbf\x02\x1b\xf9\xd5\x71\x91\x5f\x2b\x26\xf2\xab\xe5\x15\xbf\x02\xab\x38\x9c\x5a\xc8\xd1\x09\xa7\xdf\x78\xf0\x3b\xf2\xc5\x80\x89\xfd\xfe\xe0\x3d\xde\xf1\xeb\xe8\x60\x36\xfb\xc9\x7d\xf9\xd5\x37\x3f\xff\xfa\xd3\x5f\x67\x77\x0f\x9f\x8f\x66\xb3\x9f\x0f\xbf\x18\x1d\x1e\x7e\xf1\x6f\x31\x2f\xc7\xc7\xaa\xea\xad\x3b\x4a\x9f\xf5\x18\x05\x2f\xac\x96\xfa\xd6\x65\xca\xc5\x8a\x2c\x57\x79\xc2\x6f\xc1\xf3\xb2\x7b\x7e\x9a\x6a\x43\x04\x6e\x2b\x86\xd1\xe9\xfe\xd5\xfd\x53\xc7\x4a\xeb\xb2\xdf\x2b\x2a\xf2\x82\xe5\xe0\x5d\x6f\x14\x55\x07\x0d\x4b\x13\x59\x1a\x32\x67\x0b\x69\x55\x3b\x9f\xf8\xd7\x5d\x04\xd4\xe5\x91\xe6\x3b\x21\x3e\xcd\x8b\x22\x60\x48\xb3\x91\x5b\xb0\x93\x48\xb3\xd1\xcb\xa5\x90\x8a\x75\xdf\x12\xcd\x7c\x6d\x7b\x04\x80\xee\x57\xab\x1a\x07\x60\x62\xbe\x1b\xc8\xc8\xed\x9a\x2e\x75\x97\x95\x27\x0f\x1f\x5a\xc9\x71\xf2\xf0\xa9\x77\x7f\xb1\x23\xb8\xb5\x6c\xe3\xe3\x80\x2f\x7c\x44\x5e\x21\x97\xcb\x6e\x12\xb3\xa7\x1d\x2b\x9d\x1e\x22\xc7\xc9\x64\x29\xcc\x0e\x2c\x27\x91\xa0\x05\xd8\x46\x41\xc2\xc2\x9a\x19\xc5\xb3\x4e\x3f\x66\x62\xfd\x1e\x6d\xd1\xe5\x67\x85\xd4\x9d\x17\x13\xad\x19\xf9\x2b\x32\x8b\x2e\xbc\x55\x68\x60\x63\xe9\xc5\x4a\xfd\x48\x97\x5a\x02\x97\x8e\x99\x02\x47\xaf\x7d\xc2\x2f\x0a\x53\x71\x7d\x48\x5f\x55\xaf\x30\xd8\x3f\x54\x01\xb7\x1b\xf4\xa1\x74\xc1\x36\x2b\xd6\xba\xe8\x85\xcc\x53\xb0\x56\x9a\x61\x01\x0f\x7c\xba\xb0\xcf\xc1\x56\xe4\x47\xd6\x95\x37\xe0\x4b\xc8\x3e\x00\x6f\x4f\x59\x50\xc3\x6f\x9a\xaf\x38\x98\x8d\xac\x12\x85\xdf\xcc\x46\x87\x95\x6c\x9a\x33\xa2\xe9\x82\x75\x5a\x5e\x1c\x36\x25\x9f\x10\xf2\x5d\xe5\xf2\x81\xc9\x57\x89\xb3\x6b\xba\x45\x31\x33\x67\x78\x81\x0d\xc2\x41\x44\x9d\x22\x4c\x29\x69\x55\x1c\xa8\xb1\x2b\x7d\xc0\x78\x78\xaa\x1a\xd4\x80\xbd\x68\xd6\x1b\x96\x43\x4b\x25\x5c\x4c\xe7\x19\xc8\x49\xce\xe9\x52\x48\xcd\x21\xfc\xc1\x22\xac\x60\x6b\xbb\x61\x2f\x45\x95\x85\xd0\x39\xfd\x6a\xe6\x5d\x78\xdd\xa2\x43\x4a\x67\x54\xe8\xb6\xa5\xee\xa9\x09\xef\x77\x9b\x28\xe1\xb9\x1d\xec\xa6\xdc\x7d\xc0\x84\x51\x25\x3a\x08\x8c\xb1\x20\xba\x42\xca\xbb\xfd\x54\xe3\x28\x97\x1d\x13\xc4\x5c\x84\xc7\x77\x0a\x14\xe0\xc5\x76\x5b\xce\x65\xce\x2e\xe8\xb2\xcb\x55\xb2\xe3\xfb\x69\x3e\x10\xe9\xef\x00\x81\x2e\xa8\xc8\xc7\x48\x81\x6c\xec\xf3\x93\x7d\xdb\x2e\x44\x4c\xa9\x44\xcb\x05\xf4\xbe\xd4\x30\x02\x1f\x0e\x96\xf3\x8e\x02\x75\xa6\xdc\x08\xe6\x3b\xb6\xf3\x1d\xff\x3b\xce\x3d\x93\x39\xfb\x8f\x89\x45\xc7\xe8\xc8\x5d\xa3\x84\xbf\x80\x2e\xdc\x45\xad\xc0\xae\xeb\x81\x58\x82\x80\xdd\xd1\xf5\xa6\x60\x47\x8d\x17\x3d\x79\xf8\x18\xe1\xd7\x0e\x36\x81\x4f\x76\xdd\x75\x53\xe7\x56\x00\xf8\x4f\x1e\x3e\xae\x38\x9f\x9e\x90\xf3\xea\x4e\x0a\x6e\xa2\x02\xe4\xc3\x23\x4f\x1e\x76\x55\x33\x83\xbb\x95\x87\x27\x35\x1c\xcf\x31\x70\x53\xf9\x2f\x96\x37\xbc\xa0\xd9\xca\x73\x42\xe1\x59\x2b\x62\x39\x29\xb0\x41\x0e\x94\x45\x11\xf0\xe7\xba\xda\x62\x18\x7e\x33\x21\x2f\x96\xe3\xca\xc9\xa1\xe8\xed\x04\x5b\x4a\x96\x9a\x29\x57\x2c\xbd\xd5\x5d\x12\xab\x85\x4c\x17\x34\xa7\xf3\x93\x27\x5f\x9d\x7c\x45\xbf\x9e\x3f\x7e\x98\x65\x8f\x17\x0f\xe9\xc9\xfc\xe4\x98\xe6\x8f\x9e\x7e\x75\xbc\xf8\xfa\xc9\xd3\x47\x5f\xd1\xaf\x1f\x4f\x41\xd5\xd4\xfe\x31\x17\xbb\x32\xb5\xcb\x98\x76\x6c\x46\xc7\xaa\x7c\xef\x9b\x4a\xae\xd5\x1d\x6b\xba\x6f\x23\x9a\xf1\x1f\x3d\xa7\x63\x4f\x97\xe7\xc0\xaa\xd3\x1f\xac\x45\x48\xac\x3d\xc8\xbd\x5d\xd7\x9d\x2d\x41\x7a\xb4\xd4\xef\x90\x56\xfa\x6a\xd5\x06\x43\xab\x23\xd5\xf6\x58\x57\x64\x87\x77\xe5\x1d\xcd\xd3\x48\xd5\xf0\xaf\xb9\xd5\x0d\xbd\x40\xd7\x0d\x03\xf7\xad\xed\xe9\x62\xaf\x58\x8e\x53\x1d\xde\xd9\xa4\xf3\xc1\x66\xac\xa5\x80\x53\xbd\x4a\xba\xa3\x76\x23\xd4\x34\x73\xba\xdf\x37\xfe\x0d\x47\xe4\x4f\xe3\xea\x1f\xf6\xaf\xe6\x17\xdf\x49\x1d\xf5\xf7\x84\xe3\x2e\xa4\x32\xcd\x27\x2f\x94\x34\x12\x35\x84\x9d\xaf\xc7\xae\x02\x79\x04\x70\x73\xfb\x82\xf2\x40\x4e\xce\xa3\xdf\xaa\x99\xa6\x58\x25\xc4\x9e\x6d\x36\x0c\x14\xf0\x08\x70\x57\xa5\x31\x88\xdf\x72\x11\x0f\xbb\xd8\xa2\x00\x4a\xbb\xb4\x4a\x37\xa1\x18\x5c\x77\x57\x05\x9a\xc4\x1d\xf6\xa3\x6a\x90\xe0\x17\xa0\x5d\x6e\x0a\x9a\x81\x7a\xdc\x9a\x46\xf4\xea\x3d\x3a\xb9\x6a\x2b\xeb\x99\x11\x05\xf0\x31\xf1\x27\xe6\x82\xaf\x26\x57\xed\x89\xd5\x47\xc3\x2d\xfa\x62\x67\xde\x2f\x17\xaf\xa5\x60\x1f\x04\xa7\xbb\xd3\x8e\x5d\x93\xc0\x06\x6f\xab\xa0\x48\xef\xc8\xd7\xcc\x54\xf3\x7a\x6d\x95\xd9\x0f\x82\x4d\xec\x23\xda\x85\xd3\x7a\x5f\xa3\x21\x07\xdb\xee\x1d\x6f\xdd\x85\x79\x06\x05\x76\x86\xa7\xd2\x7b\x5c\x6c\xe2\xa3\x91\x1f\x1d\x85\x45\x7e\xc5\x7d\x8c\x5d\x63\x5a\x14\xec\x2f\x01\x3c\xaa\x5e\xd3\x35\x3b\xa7\x9a\x9d\xe5\x7f\x2b\xb5\xb1\x06\xd7\xa0\xbb\xba\xe8\xc3\x01\x39\x81\x9a\x4e\xeb\x9f\x52\xdb\xeb\x34\x1b\x77\x5b\x07\x05\x9e\x6b\x6e\x82\x8a\xe4\x84\xa0\xb2\x53\x43\xec\x76\x20\x90\x46\x69\x5c\xaa\x2b\x13\x3d\x00\x56\xa7\x99\xf9\xd2\x2e\x19\xdd\x70\x43\x0b\xfe\x0b\x8d\xb7\x82\xb3\x26\x74\x43\x59\xd4\x75\xca\xde\x6c\xd4\x62\xc8\xb3\x51\x23\x11\x24\xd9\x17\x6a\x36\xba\x1b\x57\xc2\xc3\xfe\x35\x1b\x35\xa6\x5c\xab\x6d\xb8\x78\xc4\x50\xe2\x06\xd4\xd4\x91\x07\xbb\x6b\x73\xb7\x24\xcd\xcd\x81\x03\x6b\x95\xd3\xd8\x71\xa9\x77\xa6\xea\x16\x75\xd4\xee\x4c\x75\xe4\x9a\x50\x8d\x9b\xdd\x21\x63\xfc\xc7\x0e\x06\x7d\xf8\xb6\xee\x94\x00\x05\x05\x8e\x61\x8e\xdf\xd4\x9d\x2f\xeb\x73\x6d\xda\x13\x8f\xc1\x56\x35\x35\xc1\x3b\xa0\x8f\x26\x76\x8d\x01\x8a\x03\xdb\x03\x6a\x82\xb9\x60\x69\xd4\x3f\x9b\x51\x6c\xab\xe3\x31\xbe\x6a\x6c\x89\xf9\xf7\x46\x95\xf1\xae\x82\xbe\x37\xfb\x84\xb8\x99\xa3\x5e\x9d\x98\x7a\x38\xc9\xd8\x46\x42\x27\x6a\x5f\xcf\xba\xb2\x27\x6a\xdd\x68\x47\x13\x12\xd2\x23\x2d\x02\xb2\x0a\xab\x57\x35\x39\x45\xf8\xd9\xf0\xab\xc8\x1d\xd6\x6c\x67\xfc\x5d\x84\x49\xc4\xcf\x2d\x52\xae\x57\x83\x5b\x07\xf7\x60\xd1\x30\xd4\x5a\xe7\x6d\xd4\x55\x5f\xdf\x2d\x43\xa4\x8e\x7a\x43\x53\x71\x5d\x26\x5c\x0b\xcf\x28\xc4\x1d\x7e\x12\xdc\x6f\xb9\x3b\x8a\x47\x5f\x1e\x7f\x49\xb4\x73\x8b\x9d\x4c\x1e\xc5\x58\xca\x9a\xde\xbd\x62\x62\x69\x56\xa7\xe4\xf8\xe1\xa3\x93\xd8\x28\x2e\xfc\xa8\x58\x81\xf5\xda\xf1\xff\x6f\xbf\xfe\xf5\xa7\xf1\xff\xf1\xd9\xbf\x7d\xfe\x7f\x3e\xf8\xe2\xb7\x93\x87\xe3\xaf\xcf\xc6\x7f\xf9\xeb\xbb\xff\x87\x8e\x7f\xf9\xf5\xff\xfd\xf9\xb7\xf7\xf6\x7a\x8b\xb2\x28\xe8\xbc\x60\xa7\x24\x7a\x0e\xd2\x8e\x71\xec\xdd\xf1\x32\x92\xf7\xd5\x14\x30\x7e\x6c\xe0\x94\x6e\x56\x3b\x58\x84\x17\x95\x76\x3f\x92\xe4\x5e\x2b\x11\x5d\xbe\x5d\x64\xaf\x5c\x40\x44\x3e\x17\x50\x03\x06\xd3\x9a\x52\xaa\x6e\x50\xf5\xbb\x72\x38\xe2\x99\x74\xef\xdc\xa9\x44\x99\xe0\xd8\xd4\x51\xa0\x6f\xfb\xe4\xfa\x9c\xb8\xe0\xac\xf0\x7d\x8e\x60\xdd\x3b\xb2\x4e\x1f\x07\x09\x0a\x96\xcf\xb7\xe1\x3d\xb1\x76\xce\xd5\xa2\xcc\x91\x56\x6b\x5f\xb4\x4b\x79\xc9\x78\xc1\x4d\x3c\xed\xc9\x28\x54\x55\x83\x2e\x30\x2d\x6c\xc4\x99\x53\x04\xa4\x80\xd2\x81\xab\x00\x6d\xb8\x15\x30\x41\x23\x6b\xd0\xdd\x5c\xaa\x3f\x4f\xce\x15\xe8\x8b\x1e\xe7\x66\x3f\x28\x1f\x3d\xd0\xc8\x15\x71\x5f\xd6\xfd\xd8\x70\x82\x51\x90\x0d\x1e\xf1\xe0\x81\xc6\xed\x6d\x14\x88\x5f\x51\x88\xb1\x62\x8b\x05\x76\x47\x2f\xec\x3e\x44\x1b\x72\xe2\x87\xd7\x0a\x56\xee\x04\x0d\xd0\x07\x46\x07\x8d\xb1\x85\xca\xe0\xd0\x53\x80\x68\xd9\xe9\xda\x77\x89\x77\x04\xec\x56\x5b\x79\x4a\x20\xf3\x1f\x7b\x0d\xbb\x7c\xff\xbe\x85\x17\x72\xe9\xa0\xd4\x3d\x85\x7b\x8d\xf6\xf4\xc2\x47\x9f\xff\xe3\xb7\x7f\xfa\xa7\x9c\xcd\xc8\xe7\x19\x3f\xfd\x3c\xdb\xbc\xfb\x7c\xc1\x4f\x3f\x5f\x6c\xde\x7d\x7e\xa5\xdf\x7d\xae\xcc\xe9\xe7\x1b\x9e\x8f\x7e\x47\xb4\x2b\x86\x93\x49\x85\x32\xb3\x27\xe1\xce\xc7\x56\xe5\x32\x2b\x2b\x4c\x9c\xfa\x84\xaa\x6c\x2e\x85\x61\xce\xeb\x05\x4a\x01\x2a\x0c\xe3\x3c\x93\xe2\x66\xfa\x68\xf2\x70\xda\xe0\x4c\x58\x0a\xe7\xe9\xe4\xd1\xe4\x71\x77\x7b\x2e\x32\x98\xeb\x0f\xe2\xfb\x8d\x2b\xdf\xcf\x0f\x3e\xff\xf5\x60\xf6\x8f\x9f\xc6\xbf\xfd\xf9\x0f\x3f\xfd\xdf\x7f\x7a\xf1\xf3\xc1\x51\xfd\xf7\xe1\x17\xb3\x7f\x1e\xfe\xe1\xe0\xa7\xb3\xf1\x5f\xe8\xf8\x97\x9f\x7f\xfb\xeb\xec\xa7\x9f\x9c\x78\x78\x07\x5f\x1c\xcc\x0e\x7e\xfa\xeb\xe1\xcf\xbf\x9d\x1d\x1e\xfe\xe1\xe0\x28\xfe\xdb\xe1\x17\xb3\x9f\x0f\x0f\x7f\xfd\xe9\xaf\x9f\xff\x74\x9a\x09\xa3\x8a\xd3\x9f\x7f\x8e\x5f\xa7\x0e\x4a\xe6\xde\xb7\xbb\x6e\xf3\x74\x86\x8e\xb3\xb6\xca\x90\xd8\xf8\x5a\x99\x40\x76\x3b\xe6\xf9\xe8\x70\x80\xa8\x48\xd1\x69\x20\x44\x5a\x7c\x11\x43\x93\xec\x19\xf0\xec\xa0\xd6\x37\x12\x10\x31\x4d\xee\x3e\x1a\x87\x3d\x78\x09\xc0\x5d\x47\x52\xc8\x0e\x2e\x1c\x0f\x72\xf9\x48\x64\x7c\x3f\x05\xe6\x7d\xf3\xd5\x12\x3f\xba\xcb\xa6\x1e\xff\xa6\xbf\x92\xaa\x5c\x9b\xcd\x34\xb6\xdb\x66\xa7\x6c\x3b\xba\xbb\xb1\xe6\x8a\x29\x36\x89\xbb\xb5\xab\x3a\xc9\x90\x2c\xb1\x44\x4d\xde\x37\xc6\x9c\x97\x5d\x7a\x7f\x28\xe4\x21\x92\x87\x6b\x18\xbe\x77\xdf\xfe\x44\xa1\xa4\x56\xc1\xce\xae\xb2\x48\x58\xde\x1b\x52\xa3\xab\x0b\xbb\xe4\x89\xaa\xd0\x94\x34\x79\x9a\xd7\xe9\x90\x35\xe3\xd7\x77\x5f\x8d\x21\x67\xda\x58\x9b\xb6\x33\x16\xbb\x73\xc5\xc1\x13\x76\x06\x78\x21\x15\xe2\x7d\x29\xe3\xe7\x68\x58\xc1\xaa\x2a\x69\x61\x8f\x5a\x2c\x75\xa2\x43\x34\xb5\xf2\xdc\x0f\x49\x42\x25\x01\x79\x57\x2b\x8d\xe7\x56\x56\x40\xd3\xe5\x52\x06\x16\xa0\xd1\x5b\x5d\xc8\x8e\xe3\x17\x5d\x36\x3e\xd0\xbd\x66\xea\x7f\xf5\xb9\xa4\x13\x32\xb0\xf8\x65\x7b\x91\x97\x00\x27\xbd\x42\x29\xd8\x9b\x45\x5f\x45\x98\xe1\x05\xcb\xaa\x9c\xfb\xfe\x81\x41\x61\xea\xcd\x4d\x9c\x37\xff\x57\xcd\x20\xde\xc0\x80\xec\x55\xb2\x6d\xf0\xdb\x1b\x04\xe1\x9e\xf2\x57\x6e\x2f\x2f\xaa\x6f\x9c\xda\x80\x34\x31\x60\x49\x55\xc5\x0f\xd0\x13\x5c\xb8\x12\x9c\x71\x1f\x74\x9b\x2c\xf8\x8f\x9f\xc1\x15\x9a\x88\xb7\xc8\x22\x37\x50\xd1\x05\x8f\xfc\x73\x2d\x55\xc9\xd1\xbe\xff\x75\xc0\x82\xe5\xa2\xb9\xb8\x14\x2b\x4e\x5b\x06\xed\x25\xa1\x53\xbd\x90\x19\x2d\x8e\xa3\x4e\xf5\xf0\x33\xb4\x9e\xd6\x35\x53\xfd\x45\xcd\xc6\x90\x07\x35\x60\xd8\x3a\x5d\x06\xc2\x0f\xcb\x29\x5b\x0f\x28\xcb\x3b\x26\xb4\xec\xec\x80\xdc\x1e\x36\x90\x22\xc7\xa4\xd8\x0c\x59\x85\x60\x03\xca\xf1\x8d\x49\x59\x66\xdd\x17\xca\xcd\x61\x99\x1a\xbc\xd4\x78\x49\xfc\x7a\xdc\xa2\xf3\x62\xbf\x3d\x4a\x0c\x1a\x45\xcb\x3c\x11\x48\x1b\x8c\x2b\xd8\x80\x6a\xb8\xb8\xd4\x21\x6b\x00\x3a\x8e\xab\xb7\xad\x81\xf1\xf2\xfe\xad\x81\x83\x5f\x1d\xef\x88\xd7\x1a\x98\x96\x05\xc1\xc0\x78\x97\x82\xd6\xc0\x34\x6f\x0f\x06\x7e\xf5\x21\x39\x63\x6d\x7d\x0c\x60\x8d\xbe\x93\x4e\xca\x4c\xa9\x47\x37\xf2\xd2\xfc\x6b\xaa\x78\x0d\xd7\xb5\xa3\xc0\x6f\x51\x80\x0c\x60\x7b\x8e\xf5\x3a\x46\xda\xc3\x46\xeb\xd5\x0d\x80\x1c\xc6\x72\xda\x15\x0e\x62\xa6\xc3\xda\x56\xf8\x4f\xd5\xbe\xe2\xe4\xe1\xd7\xfd\xdb\x5d\x75\xb1\x38\x49\x34\x04\xf5\x9f\x21\x0d\xfb\xfc\x67\x93\x6c\x98\xe4\x3f\xcd\xda\x22\x41\x37\xa4\x3f\x3e\xbf\xc0\x7f\x8b\x72\x3d\x67\xea\x5f\x23\xfc\xef\x89\xf8\x74\xdf\x90\x6a\x78\x7f\xff\x10\xff\x19\x8e\xf7\xa1\x45\x91\xfb\x8b\x3b\x81\xae\xd9\x57\x85\xfc\xe3\x94\xa5\xec\xac\x11\x1f\x9a\x6d\xd6\x38\x80\x42\x53\x3d\x98\x9b\x91\x97\xa6\x72\xdd\x24\xa2\x5a\x2a\x1b\x88\xcc\xfa\x64\xe5\x55\xe0\x6e\x42\xfb\x3e\xa8\x9b\xe0\x35\x37\x9e\xb3\x8c\xaa\xda\xa4\xeb\x81\x29\xe8\x9a\xe5\xa0\x5f\x2d\xf5\x6c\xe4\xcb\x5f\x74\x38\xb5\x36\x32\x77\x41\xe6\xfe\x7d\x3d\x90\x23\x70\x8c\x24\xb7\x8a\x1b\x56\x5d\x26\x04\xb3\xee\xc7\x29\x76\x59\x72\x0e\x0a\x03\xa1\x52\x37\x94\xc3\x8d\x13\xa1\x61\xc6\xbe\xab\x05\x76\xd5\xcb\x73\xb1\x74\x5c\x65\x92\x57\x7e\xbd\x66\xa8\x6b\x80\x65\x1a\x8f\x97\x0d\x3f\xde\x10\xd6\xb2\x28\x5d\xbf\x2a\x60\x06\x34\x77\xa6\xbe\x6b\x9c\x66\xb9\xf2\xd0\xd5\xd7\x25\xa4\xf0\x1e\xbc\xb9\x5c\x57\xba\x17\x7e\xc7\x7f\x47\x32\x4f\xea\xcf\x9c\x91\x5c\xc9\xcd\xa6\x4e\xca\x50\xd4\x30\xa7\xcc\x6b\xc2\xee\x32\xc6\xf2\x2a\xa3\x60\x10\x41\xa9\x52\x18\xbe\x66\x0f\xb4\x6f\x08\xe0\x6e\xdf\xda\xf8\x78\xa0\x49\x46\x37\x34\xe3\x26\xd9\x56\x8b\xf8\x92\x12\x68\x46\xdb\xbf\x5f\xf9\xfd\x07\x9f\x02\x74\xf0\xda\xb5\xd5\xfb\x37\xbe\x23\x96\xd9\x85\x6a\x40\x04\x6f\x83\x87\x67\x54\x78\x3e\xde\x03\xb5\x29\xbe\xbd\x43\x77\x10\x91\xf5\x40\x5e\xd1\xc6\x4d\x61\x75\xad\xe9\xde\x88\x0d\x29\xb2\x54\x89\x30\x32\xb0\xf4\xec\x30\x5f\xcf\xd8\x6d\xc9\x00\x26\xfd\xdf\xa0\x42\x2b\x54\x43\xa0\x1b\x53\x2a\x76\x2e\xe5\x75\xd2\x85\xb0\x5b\x46\xa1\xf1\x60\x60\x2b\xbb\xf4\x27\xfc\xba\xa7\xa2\x7e\xed\xd4\xcd\x10\x5c\xfb\x66\x35\xee\xcd\x4d\x40\x15\xb2\x7a\xbf\x3d\x12\x1e\x74\xfc\x48\x25\xc3\x35\x76\x56\xbf\x13\xb2\xe1\x50\x61\x17\x8e\xe8\xa8\xbd\xb7\x49\x42\xf0\x61\x1a\x38\xd9\x46\x91\x86\xf9\x90\x69\x0f\x77\xfe\xac\xa9\xc9\x56\x57\x03\xba\xc3\x34\x16\x5a\x3d\xd5\xf2\x84\x78\x15\x00\x7e\x8f\xd7\xd7\xaa\x3f\x73\x46\x36\x4c\x59\xed\x0d\x82\x8a\x1c\xef\x84\x45\x63\x5d\xad\x8e\xaa\xd4\xbd\x40\x47\x2f\xee\x68\x66\x46\xbe\xd3\x0c\xb3\xff\xf2\xb9\xd0\x38\x33\xa8\xed\x7b\xa1\xd8\x82\xdf\xc1\xb0\x7e\xff\x9c\x7f\x7e\x03\x0f\x21\x18\x24\x41\xff\xb6\xf0\x1e\xf9\x28\x79\xfd\x84\x1f\xb8\x72\xf2\x2a\x4f\x1d\x59\xe6\x22\x6b\xe0\x57\x20\x6c\xf7\x16\x37\xdb\x5e\xb0\xbc\xea\xea\x65\x67\x51\xcd\x37\xf5\x1e\x04\xdd\xaf\x63\xbb\xd9\xc4\xe2\xf5\x6a\xa2\x18\x3c\x59\x48\x33\xaf\x66\x40\x46\x0b\x29\x47\xbe\x78\x28\xd0\x38\xa4\xdf\x54\xe4\x90\xe3\x88\x5e\xb0\x52\xc1\xc0\x39\x55\x23\xa8\x85\x21\x24\x26\xb8\x3b\x18\xf6\xfb\xea\xee\x50\x41\x11\x27\x93\xad\xfa\x8d\x63\xe2\xe7\xc2\xf5\xa0\x33\x48\x06\xba\xd7\xc6\x04\x48\xa8\x77\xd4\xa0\x7d\x1a\x6c\xec\x0f\x36\xf5\x5b\x27\xdf\x5b\xee\x61\x8a\xd4\x8e\xf9\xde\x6f\x28\x41\xb3\x5c\x85\xb9\x72\xd4\x54\xb5\xf9\xf0\x6e\xca\xc7\x31\xbb\xa8\x9a\xbe\xf0\x08\xfc\x04\x84\x72\xe4\xff\x01\x5c\xe3\xc8\x85\x45\xb2\x71\x9d\x22\x9e\xb3\x82\xaf\x39\x34\xc5\x4d\x5f\xe9\xd6\xd3\x45\x9d\xc9\xa8\x6d\xa0\xe9\x71\xed\xd6\x8c\x0e\x06\xbc\x82\xbe\x8d\x97\x92\xa9\x3f\x73\x46\x8c\x2a\x05\x96\xa3\xe2\xa2\x7a\x83\xd3\x85\x76\x4a\xa0\xa6\x8b\xa8\x85\x58\xa8\x8c\x07\x97\x6b\x28\xb5\x3d\x46\x9a\x6d\x28\xa8\xab\x50\x8c\xce\xf3\x5a\x23\xfb\xaa\xdd\x13\x28\x11\xe1\xbd\x32\x8d\x2a\xc2\x56\x20\x57\x21\x54\xfd\x8e\x25\x67\x43\xf7\x7b\x8b\x06\xdb\xdb\xc3\xad\xed\x74\x2c\x04\x7e\x52\x11\x11\xb4\x25\x95\x5e\x1a\xdd\x1b\x5f\xe3\x16\xd3\x8c\x7f\x6c\xc8\xf5\xee\xa8\x83\x5e\x90\x5f\x3e\xfa\xf2\x49\x10\x95\x70\x3c\x00\xf7\x83\x42\x0a\xdc\xe8\x21\x81\x05\xf8\xa9\xc3\x0b\xe2\xb1\x05\x89\x18\x16\xfc\x0c\x66\x58\xb5\xa0\xd8\x7b\x27\x9d\x7c\x89\xec\xa7\x13\x92\xb8\xad\xbd\xd8\xef\x0c\x6b\x4d\x6d\x6b\xbf\xc8\xc6\x60\x93\xff\x5f\x6e\xeb\x10\x37\xd8\xb8\x56\x2d\x7a\x46\xf5\xbb\x77\x07\xf9\xc1\xd6\xf4\xee\x25\x28\xfe\x09\x0e\x34\x20\x74\x97\x0c\xa8\x6b\xd1\xb0\xb5\xa2\x39\x7f\xfe\xb3\x9b\xfb\xd7\x7c\xb2\x8a\x93\x69\xf0\xe9\x7f\x95\xa9\x15\x86\xc5\x57\xfa\x92\x2f\xe3\x16\xf4\x15\xc2\x5a\x3a\x18\x0a\x80\xb1\xfb\x3d\xbd\xa3\xab\x74\x89\x9d\x8a\x1a\x20\xe9\x8d\xb4\xc7\xc8\x6a\x8f\x03\x0a\x6e\xf8\x0f\x86\x13\xf9\x7e\x22\x5d\xae\x38\x9f\x93\xa1\x89\xcb\xc5\x80\xda\x13\x3d\xc8\xe5\x7a\x27\xa5\x83\x40\x2a\x83\xcf\xe7\x08\x66\x77\x38\x21\xe4\xbb\x64\x2a\x18\xc1\x42\x38\x56\x9f\x0d\xb7\xcb\xce\xfd\xea\xd5\x25\xd9\x50\xad\xcd\x4a\xc9\x72\xb9\x0a\xe1\xbe\x77\x67\x61\x27\xe3\xf7\x71\x0c\xfb\x2c\x93\x30\x55\xd3\x22\x37\x0c\xe0\xeb\xc9\x7b\xf3\x1f\xbb\xe1\xb8\xd2\x81\x59\x1a\x03\xe1\x36\xc8\x32\x6d\x54\xf4\xba\x01\x76\x10\x90\x72\x05\xe0\x1e\x0f\x74\x05\x90\xc0\x1d\x10\x06\xde\xef\xe7\x0e\x20\x7b\xc5\x83\xec\x75\x0f\x78\x0f\x03\x61\x00\xcc\xba\x4b\x95\x5b\xb4\x0b\xf6\xb6\xdb\x4f\x1b\xdf\x79\x6d\x7c\x10\xd0\x84\xc6\xde\xa5\x91\x0f\x82\x39\x40\x6b\x1f\x16\x37\x5e\x7f\x86\x6a\xee\xfb\xe0\xf3\x7d\xb4\x77\xb2\xdf\x2d\xd8\x7e\xf7\x8f\xc3\xb4\x73\xd2\xab\xa1\x07\xa1\xbc\x03\x55\x39\x92\x56\xe7\xba\x63\x83\x07\x41\xdd\x33\x63\x29\xfc\x0c\xd1\xbe\x12\x01\xbb\xf5\x67\x8f\x6b\xff\x61\x97\x91\x43\x75\x2c\x12\x2f\x7a\xd0\x35\xc3\x01\x0d\xac\x07\x2a\x5b\x64\x90\xc2\x45\x60\xc1\x98\xf8\xb7\x9f\x48\x73\xe9\x87\x11\x99\xd6\x4c\x4e\xfc\x80\x42\x6d\x2f\xc0\x9f\xa4\x5a\xfc\xf3\x49\xaa\x7d\x92\x6a\xed\x39\x7d\x92\x6a\x9f\xa4\xda\xff\x0e\xa9\x36\xf0\x5e\xf7\x95\x5c\x7e\xb3\x4f\xf2\xe6\xa8\xf1\x50\x77\x0e\xa7\x8b\xd6\x49\xdf\x6f\xfb\x20\x3d\x77\x2d\xd7\x4c\xc0\xdd\x3b\xc3\x95\x34\x79\x94\x86\x2a\xed\xc0\x9c\x1a\xd9\x97\x0f\x74\x95\x0f\xd9\x4e\xa2\x4c\x00\xc6\xa2\x14\x2e\x89\x31\x0e\xe2\xa8\xca\x8b\x74\x43\x53\x17\xc5\x1f\x3a\x13\x32\x35\xfd\x1d\x17\x8b\xdf\xa7\xc0\xc5\xb2\xeb\x48\x49\x39\x1d\x06\xbb\x58\x9a\x8e\x94\x94\x67\x64\x2f\x17\x4b\x97\x23\x25\x01\x3c\xe5\x62\x79\x69\x82\x36\x03\x98\x29\xec\x2f\x5f\x10\x4d\x3d\x5e\xa2\xfb\x79\x5a\x06\xf0\xad\x42\x2e\x1b\x35\x64\x53\x07\xd4\xc5\xde\xbe\x4a\x9c\xb8\x76\x22\x5d\x03\x76\x70\x8e\xa1\xbb\x7e\xb0\x81\xc9\x28\x74\xdc\xd9\xc0\xd7\x02\xcd\x89\xb0\xbe\xec\x4e\x26\xde\x84\xa4\x0b\x87\x56\xb8\x81\x32\x1e\xad\xb2\x93\x03\x6b\x88\xa6\x89\xe0\x47\x36\x1f\x52\x4b\x34\x28\x25\x3a\x3a\x4c\xcd\xb4\xbb\x60\x66\x77\x11\xce\x52\x40\x79\x0c\xcb\xe1\x83\x82\xa3\xbd\x68\x18\x56\x8a\xb4\x2a\x3c\x9a\x6c\x35\x3f\xac\xb8\x27\x96\x35\x58\x94\x05\x90\xb7\x2f\x3b\xda\xd3\x50\x95\xb9\x19\xec\x53\x90\x94\xcc\x53\x1c\xf2\xbe\x85\x49\xd3\xdb\x55\x95\x2c\xed\x2a\x50\xba\x13\x5e\xe2\x2b\x22\xa7\x58\x40\x55\x2b\x79\xf4\x4a\x2e\x7d\x9f\x68\xa8\x52\x3a\x8a\x95\xd3\x85\x91\xf7\x6f\x21\x9d\x3a\xe5\xd1\x0a\xa9\xfe\xf3\xde\xbd\xa3\x83\x78\xde\xce\x11\xf7\xcd\x00\xae\x6a\x99\x5e\xb2\x82\x65\x46\x76\xe4\x60\x36\x15\x91\x9d\x07\xa0\xaa\x87\xc6\x42\x20\x0b\x5e\x18\xd7\x74\x59\x33\xd0\x48\xaa\xe1\x9d\x75\xb6\xb1\x45\x11\x1c\xa9\x48\xcb\x2e\x2c\x2c\x81\xaf\xf0\x87\xa3\x52\x2e\xba\x91\xa9\x57\x54\xe5\x55\x3a\x57\x29\xa0\x19\x61\x58\x93\x01\x7b\x00\xe1\x5c\xb9\x58\xee\x5d\x48\x11\x2e\xc2\x5e\xdc\xf9\x16\x25\x43\xf2\x86\xdb\x8f\x34\x5b\x01\x15\x74\xce\x0a\xa2\x3d\x42\x1d\x2d\x40\x2d\xa5\x18\xbd\x5e\xad\x58\x63\x1c\x9c\x85\xb3\xd7\xcf\x63\xb6\xfa\xf0\x92\x47\x67\x89\xe9\xb8\x8e\x16\xfe\x97\xe4\xb5\x4b\xd5\x14\x02\x4f\xf5\x11\xa1\xe4\x9a\x6d\x31\x0c\x84\x8a\x3a\x1a\xdc\x05\x19\x17\x55\x67\xf9\x6b\x16\x67\x50\xf6\x61\x84\x17\x43\xcc\x10\x57\xc4\x35\x4b\x66\x1d\x36\xd0\x71\xcd\xb6\x3e\xb6\x1e\xf1\x62\xbf\xa8\x8c\xf0\x0a\x15\xfd\x57\x66\x84\x98\x44\xa6\xf4\x40\x9b\xca\x63\x6d\xf0\xf4\x2b\x34\x2b\xb6\xc1\x5c\x60\x8d\x1b\xf1\x40\x23\xd2\x2d\x35\xae\xf8\xa6\x2f\x4e\x90\xfa\x03\xed\xb0\xef\x1a\xbb\x7a\xf0\x48\x7f\x2f\xc5\x91\xd5\x79\xed\xff\x5e\xdc\x71\xdd\x63\x26\xdb\xbd\x7c\x2e\x99\x7e\x2d\x0d\x8c\x7e\x6f\xe4\xe0\xd4\x06\xa3\xc6\x49\x1b\xa8\xa7\x8c\x56\x9d\x5d\x9f\x8b\x87\xf2\xcb\x7c\xd9\x9f\x04\x55\xa1\x98\x6b\x2b\x85\xa5\xf2\x38\xa8\x9c\x40\xda\x81\xf7\xae\x00\x21\xc5\x18\xc4\x68\xda\x68\x77\xe5\x65\x42\xf8\x88\x56\xfb\x8e\x10\x73\xe1\xab\xd2\x28\x6f\x4c\x03\xa7\x80\x1c\x16\x7f\x01\x3d\x12\x4a\x36\xe6\x24\x87\xc6\x96\x18\x74\x49\x0d\x5b\xf2\x2c\x09\x7a\xcd\xd4\x92\x91\x0d\x84\x64\xa6\x94\x81\x7e\x77\xe7\x40\xe7\x42\xbf\x35\xde\xe7\x7a\x18\x27\x58\xcd\xb8\x42\x7b\x64\x40\xaf\xa1\x9f\x9e\x1f\x08\x84\x57\x96\xa1\x44\xb0\xe1\x95\x39\x5a\x5c\xf4\x72\xb4\x5e\x8c\xed\xca\x22\x7c\x35\xf2\xf3\x35\xdd\x58\xca\xff\x87\x65\xcf\x40\x44\xff\x24\x1b\xca\xad\x56\x79\x96\x6e\xaf\x15\x3e\xe1\xa2\xe7\x42\xe0\x50\x55\x5d\x13\xbb\x0b\x37\xb4\xf0\x99\x0f\x82\xb0\x22\xd5\xea\xd6\x47\x2e\x07\xd2\xf2\x88\xdc\x42\xdf\x7e\xcb\x7a\x2b\xc7\xc4\xe8\x9a\x6d\x5d\x63\x87\x9e\xad\xb2\x83\x5f\x0a\xd7\xe3\x66\xe7\x50\x56\x72\x0a\xec\xf2\x11\xfc\x36\x9a\xec\x08\xd8\x08\xec\x1e\xb1\x7b\x6f\xb5\x4c\xe6\xec\xc2\x9e\x43\xfb\xee\x5e\x95\x2c\x1c\xec\x2a\xa6\xe8\xaa\x9d\xaa\xd7\xa6\x88\xbc\xf1\xaa\x59\xb6\x62\x79\x59\x74\x13\x8b\xf3\x22\x75\xa9\x62\x0d\x85\xca\x29\x53\x41\x3d\x6a\xe8\x1a\xf7\x3a\x9c\x4b\x07\x78\xab\xbf\xad\x25\x34\x5d\x32\x94\x17\x5d\xc6\x7a\x4f\x1d\x72\x99\x27\x74\xd5\x6e\xe4\x84\xaa\x6a\xd5\x13\xb4\x25\xba\x41\x7f\x1d\xda\x24\x37\x68\x87\x5a\x29\x9a\x95\x9a\x59\x87\x77\xeb\xfa\xbd\x11\x90\xa0\x38\x63\x71\x51\x60\xbb\x8d\x02\xee\x09\x15\x36\x67\x9b\x48\xe3\x16\x82\x4d\xfd\xea\x0b\x08\xb7\xa1\xee\xc9\x9a\x4e\xdc\x41\x72\xe1\xfe\xd8\xd6\x9a\x74\xb5\xbc\xbe\x39\x9e\x3a\xbc\x44\xa5\x8b\x36\xd4\x94\x88\x89\x1f\x57\x4c\xec\xbe\x8c\x6b\xf2\xa3\x54\xd7\xbe\x5e\x68\xa0\x8a\xc7\x78\xda\x4c\x58\x45\xad\xd1\xb3\x5c\xea\x53\x52\x70\x51\xde\x39\x32\x18\x2b\x59\xb0\x49\x73\xcc\x2d\xbc\x25\x06\xf4\xc1\x83\xe4\x14\xdd\x95\xda\x45\x41\x05\xfb\xc8\xf3\x5c\x53\x8b\xef\xf4\x3c\xb1\x98\x6d\xe3\xa0\xe9\x12\xb8\x05\xb8\x0d\xa1\xe3\xe3\xbd\x2b\x01\x0d\x33\x62\xc8\xfb\x18\x32\xc9\xab\xdf\xc0\xc4\xd9\xdb\x98\x21\x7b\x27\x05\x0d\x37\x6a\xd2\xaa\x09\x64\xd6\xdd\xc3\xb0\xe9\x81\x8a\xa2\xad\xdf\xb8\x21\x7b\xdc\xb5\xf6\x18\x39\x3b\x28\x1a\x66\xe8\xf4\x5e\x0e\xd5\x8e\xed\xbe\x1b\xa9\xc1\x97\x48\x43\x8c\x9e\x9d\xe5\x0c\x35\x7c\x7a\x17\x84\xd9\x9a\xfb\x18\x3f\xbd\x20\x9d\x16\xbf\x8f\x01\x44\xf6\x41\x58\xbf\x21\x44\xee\x69\x0c\xf5\x63\xab\x65\xb0\xf4\x1b\x44\xbd\x20\x1b\x06\xd3\x1e\x46\xd1\xa0\xb9\x76\xda\x67\x49\xc3\xa8\x17\xec\xae\xe1\x34\xd4\x38\x22\x43\xe3\x41\xf6\xba\x81\x1d\x1a\x8c\x33\x24\x06\x3c\xe5\x9b\x19\x60\x34\x91\xa1\x37\xb3\xfd\x73\xee\x35\xa0\xc8\x9e\x46\x14\x19\x8a\xd5\x7b\x1b\x53\x89\x17\xbb\x1e\x9a\xfb\x1a\x54\xa9\xa5\x84\xa6\xd6\x9e\x46\x55\xcf\x9d\x43\x78\xf6\x86\x1a\x56\x09\x90\x51\x93\x6b\x88\x1a\x30\xd0\x0c\x8f\x0e\x30\xb2\x70\x55\x27\x07\x95\x7f\x0c\x86\x37\x35\x9f\xf0\x87\xda\xa0\x88\x19\xa4\x1d\x3d\xd7\x5b\x06\xc5\x55\xd3\x5f\x6d\xb7\x12\xee\x5d\x8a\x84\x9c\x98\x89\x46\xe3\xee\xa6\xc2\x99\xcb\x4c\x4f\x33\x29\x32\xb6\x31\xba\x79\x9f\x3d\xb5\xdb\x64\xc6\x54\xe4\xe3\x7a\x19\xd3\xf7\x2d\x9d\x7f\x05\x3d\x46\x72\xbc\xfa\xbe\xaa\xe0\xc2\x6a\x8c\xa1\xd6\x06\x85\x5a\x1a\xf8\x4b\x42\x45\xa2\x62\x4b\x60\x86\xa8\x8d\x00\x1d\xfb\x54\x67\xc5\x37\x05\x23\xff\x5e\x1d\x9b\x23\xac\x3a\xfd\x1f\x58\x6d\x22\x4e\x35\xfe\x60\x41\x67\x66\x4f\xce\xff\xee\xff\xfa\x8f\xf7\x71\x2e\xe3\x0c\x06\x6b\xa6\x2f\xf0\xf2\x3b\xec\xa9\xc1\xdc\x72\x5d\x05\x6d\x23\x5d\xa6\x73\x92\xcf\xc3\xc5\xb2\xab\xa7\xe1\xf2\xab\x8b\xa2\x01\x48\x4f\xd0\xf2\x09\x13\xa4\x7b\x3a\x5d\x93\x4a\x99\x80\xb3\xf8\x5a\x5e\xa2\xeb\x80\x1d\x41\x1e\x2c\x53\xf5\x37\xc0\x03\x5e\xcb\x17\x77\x2c\x2b\x0d\x7b\x6f\x5f\xee\x3e\x2e\xfa\xff\xac\x35\x57\x5c\x6f\x43\x73\xad\x49\x7a\xa8\x93\x3e\x89\xcb\x6b\xb6\xad\xdc\xc2\x5e\x67\xee\x8d\x91\x09\x9c\xc4\x5e\xc7\x40\xa5\xe5\x77\xbe\x0b\xe1\x7a\xee\xab\xf5\xb8\x92\x28\x6e\xd3\xd3\xee\xdc\xa2\xa8\xb6\xc7\x1a\x1f\x45\x81\xd3\xfb\x2f\xbf\x65\x78\xf3\x81\x6e\x19\x2a\x19\xd2\xad\x62\x07\x7a\xf3\x8b\xbf\x97\xb4\x98\x90\xe7\xce\x38\xee\xd9\x52\x3f\xdc\x01\xd8\x71\x4a\xde\xf2\x22\xcf\xa8\xc2\x24\x1e\x64\x24\x49\x88\x5a\x22\x7d\x51\xe0\x70\x19\x15\x15\x1b\xab\x29\x05\x2a\x70\x52\xb2\xa1\xca\xf0\xac\x2c\x68\xda\x74\xb2\x67\x7f\x29\x55\xf2\x46\x60\xd0\xde\xd5\xe4\x7e\xc9\x32\x29\xf2\xe1\x36\xf2\x55\xfb\xc9\x70\x37\xa1\x63\x14\x53\x5c\xe6\x7d\x89\xe5\x86\xaf\x59\xfb\xe0\x1d\x60\x1c\x41\x55\x5b\x6a\xe1\x79\x5b\xc5\x30\x7a\x4e\x8f\x59\x31\x75\xcb\x35\x6b\x05\xa7\xb9\x56\xb2\x87\xb5\x14\xa9\xb9\x40\x9a\x5d\x86\xdd\xb0\xb8\xbb\x12\x36\xe8\xc3\x73\x73\x75\xc7\xd0\x6d\x6b\x05\x36\x09\x75\x21\x15\x74\xee\x3a\xc8\x25\xc0\x63\x37\x3c\x33\x87\x13\xf2\x17\xa6\x24\xd6\x59\x60\x4b\x8c\x7c\x71\xc7\xb6\x37\x4b\x1d\x22\x87\x19\xc4\x0d\x53\x4d\x1e\x92\x03\x00\x49\xf8\x7a\xcd\x72\x4e\x0d\x2b\xb6\x87\xfe\xf6\x5c\x6f\xb5\x61\xeb\xd4\xb2\x83\x02\x6d\x5f\xa6\x52\x55\x87\x85\xd8\xc2\x12\x06\x53\xd7\x0f\x3e\x0c\xa3\x66\xd3\xbe\x33\x48\x83\x54\x9c\x78\xef\xe5\xd1\x51\x3b\x30\x0c\xbe\xae\x63\xa1\x86\xb0\xe8\x8a\xc8\xfe\x66\xe9\x94\x12\xc5\x96\xf6\xdc\xba\x13\xf7\x9e\x27\xf3\xbd\xee\x8b\x12\x0f\x5b\xb3\x94\x67\xb4\xb7\x47\xa5\x1f\xe7\xb7\xc0\x77\x0d\xaa\x0b\x06\x0e\x0a\x28\xf7\x60\x26\x69\xc7\xf4\x20\x17\x74\xd7\xf5\xc0\x7d\x9c\xd2\x81\xfb\xb9\x03\xe4\x55\x38\x8d\x10\x0b\x9a\x19\x82\x8d\x45\xda\x93\xa5\x24\xb3\xa6\x91\xc0\xe9\x74\xc0\x74\x13\x7c\x29\x16\x8a\x6a\xa3\xca\x0c\xca\xa4\x9c\xbf\x05\xac\x74\x79\x96\x5d\x15\xcb\x86\x83\xb9\xcb\x1a\xb1\xab\x0e\x5e\x5d\x65\x0f\x64\xc1\x83\x57\x72\x23\x0b\xb9\x44\x5f\x08\x0f\xc1\x3b\x17\xfb\x3e\x80\x79\x63\x09\x35\xe8\xb7\x1e\x51\x30\xd0\xac\x58\x17\x6a\xe7\xcc\xbf\xf9\x98\x48\x45\x1e\x39\x74\xde\xae\x98\x3d\x4a\x3b\x2f\xad\xda\x43\x70\x4d\x2e\xc1\xe0\x75\x6f\xe9\xba\x86\x52\xe4\x3b\xbe\x5c\x15\xdb\x33\x5f\x2c\xef\x08\x92\x70\x58\x66\xd9\x67\xb1\x9d\xa4\x1d\xe4\x9d\x96\x5e\xdc\x65\x9e\xae\x5c\x99\x62\x87\xd0\x5d\xed\x2c\x5f\x73\x30\xaa\x7b\x0f\x61\x63\x74\xdd\x7e\x39\xec\x16\xbb\xa2\x22\x2f\xac\x5d\x22\xd8\x2d\x82\xef\x58\x4c\x56\x50\xbe\xd6\xad\xd6\x61\xd8\x54\x4d\xde\x5a\xe4\xe5\x4c\x6c\xfd\x28\x9a\x29\xe9\x5a\xf0\x60\x28\xd6\x61\x67\x1b\x7d\xb8\x6b\x72\x29\x4f\x15\x4e\x3d\x9d\x38\x9b\x16\x2f\xf9\x9c\x05\x91\xb5\x56\x1f\x3b\xcf\x18\xd1\xe7\x2e\xfc\x08\x9d\xcb\x12\x34\x73\xae\xaa\xd7\xec\x7f\x0b\xe8\xd7\xf2\xe6\x56\x30\x65\x55\xcb\x41\x77\x81\x3b\x4f\xb5\x1a\x39\xac\xa4\x36\x98\x94\x81\xa8\x8b\x5d\xfa\xb6\x11\x1a\x08\x1a\xd8\x3e\x97\x62\xff\x43\x23\x01\x04\x2b\x6a\x26\xee\x91\xc6\xe4\xd2\x28\x9e\x99\x53\xf2\x5c\xba\x26\xa3\x76\x37\x5d\x77\x3f\x2e\x48\xce\x17\xd0\x3c\xd9\x84\xaf\x8e\xaa\xd4\xb0\x84\xba\xcd\x91\x5d\xdb\x04\x5f\xf3\x52\x18\xec\x5f\x07\x20\x5c\xf4\xe5\x29\x86\x61\xba\xd7\xc5\xa4\x98\x74\x60\xeb\xa9\x6c\xa8\x59\xd5\xd5\xe6\xed\x9b\x48\x80\xc7\x1d\x54\x25\x9c\x17\x35\xf9\xb5\xa2\xf5\x10\x2d\xf7\x68\x4e\xda\xb9\xd0\xc8\x58\x7c\x49\x42\x34\x47\xc5\xbb\xb7\x1e\x86\x77\x54\x6e\x3e\xd1\x22\x42\xb7\xdf\xd0\xca\xd3\x0f\x8c\xf9\x23\xec\xf3\xbc\x8b\xfe\x82\x8e\x5e\xbb\x17\xf9\x3f\x36\x5e\x9f\xbe\xed\xc6\x9c\x0e\x2b\x3a\x71\x5a\x3f\x1d\xff\x4c\xd8\xdd\x46\xea\x54\xd8\x66\x04\x64\x43\xda\x02\xbc\x07\xba\xb6\xbd\x90\x07\x02\x89\xda\x97\x78\x37\x96\x4b\xcb\x68\xf6\x62\xa7\x1b\x3e\x9d\x17\x72\xee\xee\x51\xb1\xc9\xba\x55\x0a\xec\x56\xe9\xc9\x32\x76\x26\x5c\x52\xc6\x29\xf9\xe3\x26\xa7\x10\x90\xdc\xc4\x05\xc6\xba\xfb\xef\xb4\x0f\x4c\xb6\xb2\xd5\x7f\xf7\x9c\xeb\xb4\xdb\xc4\x95\x4e\x2b\x35\x16\xcb\x34\x56\x77\x0f\xf7\x94\xb6\x97\x6c\xb1\x7b\x59\xce\x73\xb9\xa6\xd1\x40\x72\x23\x89\x36\x72\x43\x6e\xa5\xba\xb6\xca\xa8\x93\x7d\x0e\xae\x67\x31\xa5\x5d\x15\xce\x97\x0e\xa2\x1d\x78\xf7\x6b\xcb\x99\x0c\x84\xb8\x2b\x56\x4d\x39\x11\x93\x1b\x2d\xfb\xdb\x42\xa6\x2e\x37\x1b\xa9\x8c\xde\x45\xa8\xb5\x85\x86\x63\xd4\xdf\x3f\x75\x8b\x28\xa8\x0e\xde\x01\xec\x5e\xed\x8c\xdb\x33\xed\x1b\xd6\x33\xf7\x24\xe7\x48\x69\xf5\x76\x5f\x87\x86\x64\x37\x06\xa7\xc3\xb1\xdf\x22\xbd\xf8\xc0\xeb\x2e\x9d\xee\xde\xa1\xd8\x3e\xf0\x3a\xaa\x5b\x7c\x0a\xc5\x86\xcf\xa7\x50\xec\xe0\xf3\x29\x14\xfb\x53\x28\xf6\xa7\x50\xec\x4f\xa1\xd8\x9f\x42\xb1\x3f\x85\x62\xff\xf7\x0c\xc5\x36\x85\xbe\x64\x59\xa9\xb8\xd9\x5e\x28\xb9\xe0\x45\x87\xcf\xbb\x15\x24\xb0\xf3\x44\x90\xe8\xab\x99\xb1\xca\x92\xae\xb2\x98\xd3\x95\xf1\x40\xc9\x42\x5d\xac\x56\xc5\x7a\x92\xdb\x42\x33\xab\x03\x24\xdd\x70\xab\xff\x59\x30\x5d\x9e\x55\xef\x75\x55\x4c\xcb\x52\x65\xac\x55\x2c\xf0\xb6\xdb\x15\x88\xdd\x1d\xec\x44\xde\x14\xf9\x11\x1a\xde\xee\x8a\x02\x37\xfb\x7b\x99\x33\x25\xac\x4e\x00\x08\x01\x3b\x0d\x67\x8e\x57\x41\xdd\x7d\x0a\xfc\xf0\x66\x8f\x74\xae\x77\x03\x61\xc9\x9c\x99\x5b\xc6\x84\x15\xa5\x8c\x6a\x2b\x30\xbe\xe9\x24\xc3\xca\x5d\xb6\xe4\x37\x4c\x58\xa6\xea\xbc\x59\x08\xdb\xc8\xaa\x88\x41\xb8\x88\x6a\x26\x18\xa8\xd1\xa9\x3a\xcb\xea\xe5\xe4\x4f\x93\x3f\x4f\xfe\x62\x17\x4e\xca\xcd\x52\xd1\x1c\xec\xab\xc6\x8f\xbf\x3d\x86\xd4\x59\x67\x27\x12\xc1\x6e\x07\x2f\x1f\x4d\xb5\x20\x6c\xbd\x5b\x61\x07\x87\x69\x59\x44\xb2\x24\xb1\x46\x8b\x2c\x0a\x59\x76\x79\x56\xd2\xba\x1b\xf6\x71\x18\xe2\xe9\x70\x1d\x1f\x80\xb9\x95\x9a\xa9\xb1\x2f\xb3\x6b\x49\x5f\xbb\x53\xe2\xd7\x39\x21\xcf\x62\xfc\x8d\xdd\x19\x7b\xfa\x0b\x8b\x31\x05\xe6\x07\x52\x5c\xd5\x53\xc2\xa3\x8a\x6a\xc2\x05\x56\x5a\x69\x60\x2d\x1a\xd4\x4f\x05\xe6\x46\x1b\xaa\x8d\x92\x9b\x15\xcf\x26\xe4\x4c\x78\x32\x69\x83\x2f\xa4\xbc\xd6\xa4\xe0\xd7\x78\x15\x99\x8c\x0a\xcf\xf8\x66\xc5\x94\x3e\x85\x6f\xc6\xe4\xc5\xf9\xf3\xef\x5e\x8c\x5f\x9c\x3f\xbf\x3c\x1b\x9f\x7f\x77\x76\xfe\xdd\xd9\xa3\x87\xe3\x8b\x37\xaf\xfe\x7c\xfc\xf8\xe1\x93\xc6\x98\xb7\x5d\x23\x62\x6f\x6a\x3f\x78\xf6\xe2\xf2\xf8\xd1\xd3\xf1\xb7\xe7\xdf\x8f\x2f\xbf\x3b\x7b\xf4\xe4\xcb\x8e\xd7\xef\x8c\x89\x03\x5f\x73\x71\xf5\xea\xf2\x07\xa6\x34\x5e\x12\xbf\xba\xbc\x39\x9e\x1c\x77\x1b\xd6\x03\x6a\xa9\xf4\xdb\x04\x1e\x6b\x51\x7d\xa3\x45\x60\x38\x3c\x34\x7b\x7d\x4f\x14\xb8\x7f\x80\x9f\x09\x2d\x96\x52\x71\xb3\x8a\xba\x74\x89\x8f\xee\xb6\x52\x44\xb0\xa5\x34\x1c\x3c\x29\x4e\xf5\xb2\xa0\x2c\xc5\xae\xa8\xc8\xf5\x8a\x5e\xb3\x09\xa9\x82\x1e\x52\x20\xed\x11\x57\x6c\x2d\x6f\x18\x54\x2d\x77\x15\x5b\xb8\x42\x39\x29\x72\x4d\xdc\x4d\xb1\x73\x95\xb4\x2a\xfb\xa7\x26\x8b\x7c\xea\xf9\x8b\xcb\xf1\xf9\xb3\xf3\xc7\x76\x1f\x09\x39\xd8\xd2\x75\x71\x78\xda\x49\x7e\xe1\xc8\x78\x85\x8c\x5e\xa5\x70\x58\x10\x42\x7f\xf8\x66\x83\xac\x06\xee\x75\xe3\xa1\xd8\x8e\x43\x31\xa9\x64\x3d\xf5\x1b\x07\xc0\xf9\xac\xa1\x94\x88\x92\x46\x66\xb2\x20\xae\xcc\x49\x37\x09\x24\x60\xb6\x88\xe3\x9b\xd6\xbd\x8c\xdd\x2a\x3b\xc4\xbd\x5a\x93\xe3\xc9\xf1\x11\x39\x9e\xa4\xda\xa4\x59\xa9\x79\x3c\x79\xdc\xdc\xd4\xee\xe3\x08\x22\xfa\xcd\xd5\x8b\x53\x92\x95\x4a\x31\x61\x92\x01\x97\x50\xfa\x8c\x2f\x57\x4c\x9b\x26\x3c\x1f\x0f\x66\x11\xe0\xbe\xba\x7a\x75\x79\xfc\x28\x4e\x2e\x7d\x45\x13\x02\x28\xf1\x82\xd7\x8d\x61\xf1\x12\x60\x8d\x61\x71\xc4\x35\x86\xc5\x0b\xf4\xbc\x5f\x4f\x75\x02\xd7\x81\x5e\x39\x18\x22\x08\xc3\xf1\x28\x0e\xbb\x24\x60\xa5\xbb\x25\x04\x8b\x77\x8c\xdf\xf2\x6b\x3e\x59\xcb\x5f\x78\x51\xd0\x89\x54\xcb\xa9\x57\x3a\xa7\x97\xa0\xde\xbd\xbb\xe4\x39\x7b\x77\xf5\xea\xf2\xb3\x50\x91\x79\x97\xc9\xf5\x86\x1a\x3e\x87\x4e\xb5\xef\x26\x8f\x9e\x2a\x96\xc9\xf5\x9a\x89\x9c\xe5\x93\x47\x5f\xc7\xdf\x6b\xe9\xb1\x25\xfe\x92\x0c\xe7\xea\xd5\xe5\xbb\xb3\x17\x97\xef\x8e\x1f\x3d\x7d\xf7\xed\xf9\xf7\xef\xfa\x24\x4d\xf8\xcc\xa3\x27\x5f\xfa\x67\x1e\x3f\x3d\x09\x00\x7a\xb1\xf8\xce\x8b\xc5\x41\x60\x93\xb2\x8f\x90\x3e\x09\xba\x17\xf0\x47\x4f\xbe\xf4\x8f\xd6\x53\x6f\x00\x6f\x8e\x18\x0e\xfc\x83\x2b\x0d\x7d\x2a\xc3\x3d\xe6\xdc\xc9\x9f\x22\x0c\x64\x80\xba\xd0\x73\x08\xd7\x60\x54\x0c\x39\x7e\x38\x72\xc8\xc1\xbb\xdf\x11\x43\xf3\xa6\x79\xb8\xfe\xd7\x1d\xa5\xce\xcd\x7d\x5c\x0b\x9f\x73\x2f\x7c\xac\x61\x8a\x0a\x4d\xb4\xfe\xe9\xfb\x6f\xbe\x2c\x22\xee\xa4\xe6\xce\x4b\xf4\x51\x7c\x9c\x6d\x7f\x53\xe4\xef\xe6\x34\xbb\xbe\xa5\x2a\xff\x5f\xbe\xf9\x9f\xf8\x68\x14\xf8\x47\xe0\xa3\xe1\x63\xe9\xf9\x36\x76\x63\x38\x9e\xdf\xb6\x1f\x4a\x02\xdd\x1f\x62\x37\x38\xbb\xfc\xe1\xdb\xf5\xb6\xfd\x50\x12\xe8\xfe\x10\x77\xb7\xa7\x8d\x8e\x08\xcc\x5d\x50\x35\x02\x63\x44\xb0\xe7\xe6\x77\xed\x4d\xe2\x75\xd5\x6a\x22\x50\x77\x17\xdd\x30\x1c\xbb\x59\xfb\xc3\x8f\xc5\xba\xe3\x1d\x8e\xbb\x3b\x1b\xbb\x66\xc4\x1d\xae\x45\xe7\x56\x94\x8a\x9c\x83\xa3\x26\x76\xe1\x71\x5e\xb9\x71\x6e\xb8\xeb\x65\x46\x28\x72\xea\xd0\x80\xe4\x22\xe7\x37\x3c\x2f\x69\xd1\x10\x17\xf1\x0b\x4e\x10\x22\x1b\xaa\xe8\x9a\x19\x70\xcc\xee\xcc\x31\xf4\x7e\x52\xc5\xf6\x80\xab\xdf\x57\x3a\xbd\xad\x35\xfb\x77\x83\x9c\x61\x2e\xf1\xac\x7a\xbf\x9d\xaf\x35\x5c\x84\x8b\x94\x3d\xc2\xcc\x07\xb6\x45\xdf\x25\x3a\x5e\xb1\xae\x08\x8f\x96\xe4\xa5\x1a\x42\x19\xbd\xa3\x86\x42\xf1\x8f\x1b\x56\xc8\x8d\x0b\x04\x61\x77\x5c\x43\x00\x41\x38\x64\x21\x4b\x11\x8b\x07\x41\x07\x28\x17\x58\xfd\x74\x42\xc8\x73\x88\x3a\x86\x64\x2e\x41\x36\x8a\x65\x5c\xb3\x62\xeb\xab\x17\x06\x50\xab\xd6\xcb\x71\xc8\xd4\x2e\x3f\x63\xda\x79\xa7\x21\x30\xc0\x15\x50\x54\x2c\x2f\xb3\x8e\xf6\x39\xcc\xed\x6f\x0f\x99\x70\x5d\x9b\xe7\xa1\xd3\x87\xe5\x64\xce\xd0\x11\x5c\xa7\x22\x6c\x99\x21\xb7\x2c\x9a\x1d\x40\x73\xb9\x71\x51\x3a\x76\x8f\xa5\x20\x5a\x2e\xcc\xad\x5d\x63\xc1\xe7\x8a\x2a\xce\x62\x85\x50\x53\xe1\x2f\x6f\x3a\xc3\x9d\xab\x50\x3a\x47\xd2\x91\x21\x09\x24\x8c\xdd\x01\x4c\x30\x8b\xfd\x83\x64\x4c\x29\xb8\x58\xbe\xd9\x44\xb2\x37\x5b\xac\x24\x1c\x5c\x45\xde\xd6\x27\x17\x2b\x22\xe7\x7f\x2b\x91\x14\x5d\xbe\x0b\x04\x06\x8b\xac\x6b\xc5\x9d\x11\xf3\x64\x23\x73\x3d\x21\x67\x45\xe1\x43\x5f\xed\x8e\x60\x6f\x25\x5a\x00\xb5\x43\x78\x58\xa9\xbb\x40\xa2\x6b\xb0\x0e\x73\xae\x63\x9c\xf8\xc2\xe7\xa7\xb4\x22\x70\x1b\xcb\xea\xba\x3f\xaa\x03\x70\x1b\x65\x77\x30\x59\x14\xae\xa2\xfc\x98\x5b\x6e\x56\x5c\xf4\x42\xe4\x9a\x2c\x99\x60\x8a\x16\x8e\x8a\x43\xff\xc1\x6e\xe5\x4d\x1f\x96\xcd\x0d\x74\x3c\xef\xee\xf3\xba\x96\xda\xb4\xdc\xf5\xfb\xdf\x49\x14\x9c\x09\xf3\x0d\x17\x57\x7c\xcd\x64\x19\xc9\xc0\x6c\x39\x8f\x5b\xcf\x54\x74\xb1\x92\xb7\xa4\x90\x78\xbf\x50\xdd\xcf\xa5\xb2\x75\xe6\x8c\xac\x58\x01\x51\x18\x50\x35\xb5\x60\xe4\x96\x72\x44\xaf\xef\x2a\x0e\x2f\xab\x5b\x49\xf8\x46\xf2\xc0\xb4\xa7\xd6\x8a\x60\x51\x8e\x97\x15\xb2\xba\x60\xab\x27\x94\xb8\x06\x34\x6e\x41\xd1\xca\x43\xc7\xba\x9b\x3b\xf8\x50\xf8\xdc\x6d\xc4\xfe\x27\xd6\x6f\xc5\xde\xfb\xf0\xb1\x37\x81\xb6\xb7\xa0\x89\xbf\x98\x48\xe8\xc4\x2a\x79\xfc\xf0\x23\x22\x10\xdb\x03\x3c\x2b\x17\x0b\xa6\x9e\x6d\x4d\xec\xb2\xa4\x55\x22\xbe\xfd\x50\x2b\xae\x78\x5d\x42\x5a\xdb\x5a\xaa\xad\x8b\x17\x8e\xa3\x51\x31\x20\xcb\x9c\x1c\x70\x41\xe6\x16\xd8\x21\xa0\x70\xa7\xf7\x48\xb8\x33\xda\xc5\x08\xc4\x74\xb0\x56\x39\xf4\x56\x7b\x04\x43\x0a\x46\xb5\x21\xc7\x5f\x5a\x45\x9f\x63\x4b\x89\xe9\x23\x08\x5b\x80\x92\x66\xb1\xe9\xfa\xf3\xb5\x3b\xb7\x03\xaf\x32\x19\x29\x0b\x3d\xe1\xcc\x2c\x40\x61\x5a\x99\x75\x31\x55\x8b\xec\xab\x27\x27\x0f\x3b\x73\x21\x08\x89\x76\xdb\x8b\xbc\xca\x73\x75\x42\x3d\xb1\x44\xc0\x56\x49\x46\x8f\x1f\x7d\xf5\xe5\x53\xc4\x6d\x83\x1b\x37\x5f\xda\x60\xb5\x11\x90\x01\x03\xb6\xca\xd6\x2e\x1d\x38\x46\x5c\xdd\x6f\x19\x29\x89\x5e\xd3\xa2\xb0\x8a\x4d\x8c\x08\x14\xa3\xd7\x91\xc5\x5a\x31\x96\x7c\x4b\x4c\x69\x71\xef\x2e\xa8\x5a\x5a\x3e\x56\x16\xb9\xbb\x00\xef\x7e\x8f\xbb\x38\xd1\x7c\x29\xe0\x82\x3e\x71\xaf\x01\xc2\xcd\x11\xb7\x59\x51\x41\x04\xb3\x3a\x1c\x55\xdb\x88\x16\x94\x4e\xf9\x21\x8d\xae\x24\x5f\xc6\xac\xb5\xbe\x44\xc9\x10\x49\xdf\xd3\xbb\xb7\xec\x56\x71\xc3\xee\x75\xa2\x5b\x8f\xc7\xcf\x76\x04\x41\x75\x86\x40\xe7\xd9\x56\x72\xdd\xb1\xa5\xf6\x68\x25\x9a\x12\xb8\x2e\x26\x0a\xe6\x05\xb7\xf1\x22\x27\x74\xe3\x75\xf1\x3e\x86\x11\x9b\xa9\x67\x23\x01\xc3\xe0\x22\x93\x6b\x0b\x33\x6c\x58\x51\x67\x27\x61\x1b\xe9\x44\xc5\x8d\x83\xdd\xb5\x8d\x53\xbb\x73\x88\x88\x39\x82\x34\xe0\xf8\x4d\xe7\x2e\x54\xcf\xce\x96\x90\xb4\xab\x90\x16\x13\x2f\xfa\x57\x71\x9f\xa7\xc7\x5f\x3f\xfa\xd8\xcc\xa7\x4d\xb2\x1d\x6c\x28\x46\x02\x9e\x39\xed\xc5\x86\x5a\xef\x4b\xa1\xa0\xc5\x0c\x07\x32\xa4\xb8\xd1\xb8\xc3\xa6\x3e\x2a\x43\x3a\x79\xf8\x75\xb7\x8f\x6f\x00\x3f\x2a\xcc\xea\x7c\xc5\xb2\x6b\xb0\xe7\x6e\x68\x31\x90\x0f\xb5\x1f\xdb\x55\xd2\xaa\x9c\x9e\x18\x9a\xac\x2a\xa6\xab\x30\x2d\x73\x2b\x2d\x2b\xb0\x26\x3c\x98\x3a\xae\x9f\x44\x66\x5f\x02\xe9\xc3\x1c\xda\x4f\xa0\x49\x10\x95\xfd\x4e\x6b\xd6\x50\xf3\x9f\x07\xf9\xa5\x3e\x3a\x6a\x59\xc8\x39\x50\x30\xd5\xf5\xe1\x40\x6d\x30\xaa\x46\x62\x52\xcd\x11\x99\x97\x95\xe5\x2f\x6f\x98\x52\x3c\xcf\x99\xb0\x56\xe1\x18\x46\xf8\x54\x09\xfc\x07\x34\x84\x8d\x2b\x7c\x84\xcc\x30\x4f\x43\x35\xa3\xed\x5c\xab\x99\x09\x2e\x7f\x02\xcb\x9f\x70\x87\xe4\xd9\x28\x3c\x9c\x51\xfd\x94\x43\x0e\x4c\x61\x2d\x51\xa0\xb4\x27\x1a\xa2\x9a\x90\x90\xd9\x1d\xb4\x10\x36\x8a\x2e\xac\xbd\x98\x97\x68\x78\x48\x49\x16\xc0\x46\xa3\xc1\x9f\x57\xe7\x17\xad\x4d\x01\xee\x9e\xc1\x2d\x87\xd8\x5a\x6e\x71\xf9\xe7\xd7\x64\x63\x77\xc0\x10\x6d\xa4\x5a\x63\x2f\x07\xc3\x94\xa0\x98\xa5\x1b\xd3\xa9\x75\xc8\x6e\xec\x5c\x56\x7c\xb9\x82\x49\x63\xc8\x1a\xe1\xc2\xb2\x7c\xc5\xc0\xff\x55\x50\xc3\x44\xb6\x3d\x72\x93\x4f\x93\x82\xb3\xa7\x82\x03\x2e\x24\x10\xa8\x65\x18\x75\x2e\xb1\xdd\xdb\x15\xbd\x61\xe2\x01\xb8\x5a\xa2\x9a\x30\xf3\xcd\x2b\x90\xbd\xe9\x32\x5b\xc1\x9e\x9c\xf9\x82\x4c\x52\x91\x5f\x98\x92\x5d\x47\x2b\xa6\xaa\x40\x65\x09\x21\x89\xdc\x70\x01\x51\x17\x22\xef\x92\x93\x2b\x29\x35\x0b\xa8\xd6\x55\xa6\x88\x40\xed\x8c\x89\xac\x5c\x73\x93\xe0\x6e\x2e\x34\x68\x86\xcf\xba\x3a\x59\x4f\x50\x62\x34\xe1\x39\xd6\x54\x05\x90\x54\xa3\x8f\x81\x6c\x12\xa6\x95\x6f\xdf\xb7\xf3\xe4\xa3\xe3\x93\xaf\x4e\x9e\x3e\xfe\xf2\xe4\xab\xb5\x26\x07\x8f\x4e\x26\x4f\x9f\x90\x9c\x6e\xa1\x7b\xd0\x33\x19\xed\x12\xd6\x59\x25\x35\x40\xc4\xc7\x33\xda\x90\xf0\xf6\x73\x40\xb4\x9f\xf9\xa8\x0e\x88\xa6\xa7\x21\x74\x44\x24\xf6\xc7\x99\xcc\xfb\x3a\x20\x62\x10\x6b\x03\xfa\x63\x3a\x20\x70\xa5\x7b\xef\xc3\xc7\x77\x40\x44\xb6\x20\x91\x52\xd9\xe7\xde\xf9\xb8\x8e\x08\xb3\x52\x8c\xe6\xe7\xb2\xec\xaa\x42\xbe\x8b\xc6\x60\x78\x85\x44\x28\xb8\x5d\x15\x19\xc1\x11\x9a\x64\x58\xd4\x26\xe6\x31\x67\x55\xe3\x37\xef\x8d\x9f\x90\x73\xfb\x88\x45\x25\xa8\x56\x1e\x10\xf0\x0d\x4d\x18\xcd\x56\xc3\xcb\x77\x43\x5d\x3a\xe9\x72\xa5\x11\x5e\x10\x86\x7f\x44\x9c\x57\x3f\x93\x98\x14\xb8\x8e\xb7\xf6\xc1\xa2\x3b\x55\xb0\xbc\xd5\x6f\xec\x14\xb1\x20\xba\x5f\x42\xed\xf5\xaf\x32\x62\xcb\x44\x9a\xd7\x97\x27\x7e\x75\x93\x7b\xd8\x01\x11\xa0\x5e\xa9\x6d\x38\x67\x5d\xab\x24\x37\xbf\x30\x87\xa0\x9a\x02\x88\xca\x94\xea\x11\xdc\x04\x71\x41\x16\x25\x94\x42\xa9\x83\xf0\x63\xc6\x45\x04\x64\xc2\xb5\xfc\x12\x15\x02\xcf\x85\x6a\x9a\x4a\xf7\xfd\x6b\x11\x4a\xc4\x67\x0f\x4a\xbc\xe1\x05\xff\xc5\xd1\xc3\xf9\xc5\x1f\x53\x97\x5a\xa5\xb0\x96\x6f\x21\x69\x7e\x44\x36\xd2\x30\x61\x38\xcc\x5a\x1b\xaa\x6e\xe0\x22\x0a\x4a\xa0\x00\x68\xbe\x70\x65\x52\x50\xcf\x89\x99\x7c\x6f\x59\x5e\x66\xbb\xab\xf3\xf3\xaf\xb3\x05\xf6\xca\xb9\x37\xd2\xdf\x63\x90\x8d\x94\xaa\x78\x0f\xf3\x03\x25\xf5\x29\x89\xd4\x8e\xea\xe9\xe2\xda\x67\x9b\x98\x42\xbf\x14\x70\xf3\xf1\x9c\x15\x74\x50\x19\x85\xd6\x23\x29\x9b\xc4\x2a\x97\x31\x23\x5e\x16\x39\xc9\xa9\xa1\x98\x32\x6d\x95\xdc\xba\x38\x24\x3c\xbd\x4b\xc5\xe0\x41\x5b\x49\x15\x3b\x19\xb5\xfe\x1d\x4c\xc1\x82\xb7\x76\xad\xe5\xfe\xde\xfb\xef\x8f\x5c\xc6\x94\xc1\x24\x94\x18\xc5\x41\xa9\x97\x66\xef\xc5\x66\x87\x45\x57\x0b\x80\xdd\x80\xec\x81\xd4\x96\x39\x33\xf6\xc5\x7e\x35\xb1\xc9\xd6\xef\x76\x36\xf0\xdc\xb7\x74\x88\xa7\x19\x21\xde\x23\x10\x73\xd8\x0e\x50\x16\x3f\xa2\x6c\x2a\x85\x60\xc5\x1e\x42\xbe\xf1\x40\x97\x90\xc7\x01\xfd\x1e\xaa\x03\x2e\xb2\xa2\x04\x1f\xd7\x2d\x9b\x6b\x69\x2d\x20\x7d\x58\x79\xa3\xda\x2a\x00\x54\x38\x43\xd0\x51\xbe\xc7\xf3\x82\x0d\xba\xce\x21\xc7\xab\x8f\x85\xd2\xc4\x55\x6b\x10\xcf\x77\x0e\x36\xf9\x1b\x34\x88\xbb\xfc\x98\x0d\xa4\xc7\x1f\xf4\x0c\xd9\x85\x5e\x80\xd4\xac\x07\x77\xdd\x2a\x35\x92\xa0\xf0\x4e\x55\x63\x3b\x66\x90\x6e\x5c\x37\xc2\x0e\x77\x20\xf4\x84\xad\x24\x43\x56\xee\xc6\x75\x35\xdd\xf1\xc6\x79\x50\xc7\xa5\xb8\x16\xf2\x56\x8c\xf1\x02\xb5\x03\x6e\x04\x26\xd6\x2b\x6b\x62\xae\x81\x35\x1c\xe0\x93\xdd\xe1\x76\x54\xb1\x0c\x55\x08\x39\x77\xee\x5b\x37\xc8\x65\x31\xec\x28\x03\x4d\x04\xc4\xef\x4e\x2b\xa3\xf8\xed\xb0\x4a\x72\x3b\xe3\x21\x62\xa1\x12\x57\xd5\xf4\xaa\x71\x55\xd1\xb5\x8e\x3d\xa5\x59\x26\x15\x1c\xa4\x68\x1a\x5b\x50\x1a\x79\x77\x4f\xef\x5f\xb8\x2b\x93\x02\xb3\x7a\x7b\x23\x07\xea\x91\xcd\x52\x11\xc1\xf7\x2e\x9b\x95\x2b\x5f\x8a\xae\x63\xa9\x33\x41\xaa\x5a\x66\xbe\xba\x64\xdf\x82\xe1\x85\x89\x90\x15\xea\xfc\x1d\x3c\xab\x64\x15\x7c\xe7\x81\xd6\x6a\xe9\x01\x9f\xb0\x23\x32\x71\xd3\xdb\xd9\xc3\x0e\xd8\xec\xef\x25\x2d\x34\x99\xd8\xf3\x39\xf1\x5b\x78\xe8\x42\x84\x14\xc3\x70\x9a\x2a\x33\x3a\xc4\x46\xcc\x4d\xe1\x6b\x1c\xa3\x31\x5a\x51\x2f\x6a\x4a\x1d\x78\x58\x30\x6a\x95\x49\x44\x6f\x46\x37\x18\xaf\xc5\xbb\x9d\xe6\x10\x39\xfb\x05\x79\x25\x69\xfe\xcc\x75\xa9\xfd\x9e\x0a\xba\x64\x39\xc6\xbc\xaa\x92\x59\x45\xcc\x40\xbf\x6c\xcb\x79\x20\xe0\xa8\x9a\x74\x17\x72\xc1\x81\x6b\x30\x0c\xf7\x0b\x50\x94\x99\xc8\x37\x92\x0b\x43\x36\xe5\xbc\xe0\x1a\x34\x04\x97\x93\xbf\xf5\x29\xca\xda\x99\x79\xbc\x33\x6a\xa4\xd1\x46\x77\x02\x53\xfb\x86\x16\x1a\xe6\x46\xc5\x16\x4f\xb3\xd4\xac\x41\x5d\x8a\x59\xce\x46\x0d\xd7\x0b\xde\x9d\xde\xdc\xb1\xfa\xb7\x8c\xe6\xdb\x41\x6b\xb7\x2f\xe8\x80\x19\xac\xfd\xac\x39\x6f\x02\x6a\x3f\x20\x77\x12\x60\xa7\x3d\xa6\x03\xa6\x55\x65\xb7\xf7\x5c\x76\xb7\x8d\x3c\xb3\x6f\x7f\xfe\xfa\x72\x9f\xcd\x6e\x6d\x6d\x07\xd4\xde\xcd\xb6\x24\xb9\x29\xa8\x01\xc5\xda\x09\x1d\x3b\x8d\x49\x12\x64\xd7\x61\x87\xaa\x4f\xae\x88\xa3\xc7\x66\x2e\x92\xb9\xd9\x09\xe1\xc8\xb4\x9d\x05\xf9\x45\x0a\x6b\x7c\x7d\x60\xf2\x7a\xfe\xfa\x72\x3f\xaa\x4a\x23\xd9\x4e\x74\x97\x92\xec\xb7\xd6\xee\x53\xb9\x06\x0f\x2d\x3a\x61\x75\x99\x65\x4c\xeb\x45\x69\x8d\x2c\xe7\x3c\xe8\x9c\xe9\xbd\x96\xbc\xab\x51\x45\xf3\x20\x3b\xeb\x5e\x9f\x7b\xd0\x76\x3d\x50\x3d\xd6\xb1\x38\x91\x53\x95\xd7\x6f\x76\xa1\x56\x5d\x13\xef\x4b\x8c\x2d\xa8\x36\x57\x8a\x0a\x0d\x80\xac\x12\x1b\x4b\x11\xab\x14\x41\x6a\xd8\x38\x61\xc2\xf6\x66\xc0\xb9\xae\xfe\xf7\x2e\x75\x61\xad\xf5\x78\x26\x5b\xef\xe3\x5d\x5a\xd2\x1e\x8f\xc7\x63\x8c\xfb\x2d\x8b\xfe\x52\x11\x5d\xb9\xad\x78\x94\x7b\xd4\x89\xfa\xbc\x43\x08\x72\x66\x4a\x5a\x54\x5f\x0a\xab\xc8\xee\x52\x47\x62\xba\x9e\x49\x5d\x54\x3c\xea\xd2\xb1\xa8\x9e\x79\xc4\x1f\x6c\xcd\xad\x62\x79\x9d\xb5\xe9\x3a\xe7\x9b\xa6\xe5\x95\xd4\xe6\x35\x36\x50\x1f\x60\xb5\x05\xa3\xc1\x52\xdf\x89\xd9\xb4\x53\xfd\xae\x1e\x14\xd9\xf1\x14\x33\x9f\x90\x0b\xac\xc7\x84\x75\x47\x2c\x4f\x73\x71\xe7\x01\xdc\x6e\xaf\x4d\x7f\x3a\xfb\xca\x98\xcd\x85\x54\x89\x86\x10\xce\xc6\x3b\x25\x4f\xe3\x29\xb2\x4d\x94\x38\x90\x7e\xa3\x40\xf6\xb8\xaa\x8a\x50\x79\x13\xe3\x9e\x93\x31\x5e\x04\xbd\xd5\x3e\x81\xda\xea\xb4\x4c\x54\xf1\x1e\x55\x84\x85\x2b\x49\x84\x4e\xbb\x2a\x84\x24\x01\x53\x33\x2c\x11\x82\x93\x7a\xfa\x10\x34\xd8\x02\xc4\xbe\x27\xef\xa0\x22\xb3\x83\x18\x8f\x2e\x20\x20\xda\xb8\xc8\x78\xce\xb0\x7a\xa2\x5d\x24\x34\x99\xb4\xf0\x15\x5e\xee\x2c\xc2\x72\xd1\xae\xcb\x45\x1a\x66\xdd\xa3\xb1\x2e\xfc\x1c\x16\x14\x7c\xfa\x30\x5e\x39\xa9\xdf\x53\x46\x1a\xde\xb2\x27\x4f\x1e\xc7\xf2\x87\x7a\x9d\x66\x64\x60\x35\x76\x08\x74\x1b\x48\x66\x27\x27\xf1\xac\xe8\x1d\x3a\xd3\x1f\x97\xd0\x2e\x53\x94\x96\x86\xd8\xa4\xb4\x93\x93\xc7\x43\x48\x2d\x01\x52\x48\x73\x3f\x52\xeb\x81\xd9\x47\x6a\x27\x27\x8f\xff\x67\xd1\x9a\xaf\x95\x30\xb4\x6a\x43\x55\x5b\xa1\x2e\x82\x14\x56\x08\x1f\x1a\x65\x43\xa0\x0a\xcc\x86\x65\x46\xd7\xe1\x60\xc1\xad\x8d\x0f\xc3\xd9\x14\x56\x94\x5e\x9d\x5f\x10\xa9\xfc\x8b\x12\x20\xbb\x2f\x52\xfc\x8b\x2e\xde\xbe\xf9\xd3\x9f\xab\x15\x83\x83\xae\xf9\x55\x8a\x6f\x61\x65\x19\xa0\x7c\xa0\xa7\x86\x7d\xe4\x02\x14\xbc\xe9\xc0\xf1\xb6\x5a\xae\xd7\xa5\x48\x79\x81\xfd\x94\xd1\xae\xb7\x06\xb8\xab\x81\x2f\x17\xd5\x55\x6d\x80\x13\x38\x20\x0b\xa9\x6e\x29\x78\x59\x92\x40\x9d\x8e\x1c\xe2\xb3\xdb\xb1\x44\xc8\x1f\xe1\x16\x06\x30\x91\x00\x59\x6d\xbc\x6f\x49\x1c\x8d\xf2\x54\x0c\x90\x80\x73\xc0\xa5\x25\xe0\xd6\x8b\xe6\x42\x1b\x46\x73\x57\xd9\x5f\xaa\x2a\x75\xa3\x81\xea\x07\xda\x3f\x92\x00\xca\x05\x4a\x3e\x0c\x28\xd3\x2e\x4b\x77\x69\x8d\xa8\x3a\x0c\x11\x16\x52\x2d\x7d\x08\x11\x48\x91\x0a\xd9\xc3\x2b\x82\x16\xd2\x17\x94\xa7\x20\xf2\x05\xd9\xca\xd2\x45\xb6\x98\xaa\x0c\x52\xd3\xf8\x86\xd9\x96\x16\x45\x83\x67\x6a\xdf\x8c\x84\x32\x94\x0a\x7c\xb3\xb1\xd3\xe9\xf4\xf6\xf6\x76\xe2\x43\x99\xa4\x5a\x4e\x73\x79\x2b\xec\x8c\xa6\x8f\x26\x8f\xa6\xb9\xcc\xa6\xf0\xd3\xb8\x3a\x48\xe6\x2e\x25\x9d\xb1\xe8\x19\xf2\x3e\x08\x90\x81\x92\xf1\x1d\x67\xf1\xaa\x61\x7b\xf6\x56\x5d\xb4\x38\xc3\xd2\x50\xa8\x38\x7a\x79\x03\xe9\x75\xce\xc3\x03\x41\x3d\xae\xe3\x07\xf9\x82\xcc\x46\x57\xe7\x17\xb3\x51\xaa\x8e\xa2\x1d\x04\x73\x73\xe1\x5a\x6d\x30\x01\xdf\x0b\x5c\xfa\xa9\xda\x3e\x28\x56\xb9\x06\x2e\x66\x59\x47\xd7\xf2\x5b\x19\x68\xe9\xb8\x0b\x12\x89\x10\x8a\x5c\x06\x92\x01\xa5\x5d\x46\xf1\x47\xc7\x76\xe2\x89\x5f\xd3\x8c\x63\x40\x85\x21\x6b\x1a\x0e\x55\x77\x8e\xbf\x7e\x1c\x4b\x88\xee\xf0\xba\xf7\xe9\x3b\x4c\xd5\xee\xcb\x14\xa9\x41\x40\x75\x70\xf7\x67\x49\xcf\x99\x1e\x3e\x4d\xaa\xad\x02\x57\xda\x47\x8a\x32\x06\xea\x25\x2f\x17\x50\xdd\xf1\x0e\x62\xf1\x0a\x60\x0e\x29\x6d\x2a\xf0\xeb\xd5\x21\x97\x49\x8e\x00\x05\x56\x5b\x2c\xa1\xaf\x6f\x64\x93\x45\xb9\x85\x97\xbe\xa9\x13\x20\x1b\x1a\x60\x84\x41\x87\xa9\x83\xb7\xeb\x73\xb4\x22\x57\x33\x91\x23\x2b\xdf\x28\x39\x67\xae\x15\x8d\x7f\x81\xe5\x26\x09\x90\x58\x13\x50\xc8\x9c\x1d\xd5\xb8\xde\x50\xb3\x22\x53\x9c\xd6\x2f\x53\x54\x30\x8d\x84\xb8\x40\xb5\xe6\x82\x11\x9e\xea\x88\x15\x71\xf2\x43\x7d\x57\x07\x49\xb1\x8c\xf1\x1b\x56\x05\x69\x46\x4b\x37\xfa\x8f\x9d\x20\x16\x99\x42\x03\xd4\x95\xdd\xe4\x8e\x56\x9b\x48\x81\x90\x74\x4b\x5f\x6e\x33\x53\x73\x75\x13\x80\x34\x58\xe8\x92\x5f\x0a\xc3\x0b\x3c\x00\xc1\xb6\x38\x69\xeb\x96\xb0\xdf\x1e\xb9\x7d\xa7\x85\x76\x25\xed\x6b\xf5\x24\x0c\xec\x37\xab\x68\x2d\x7c\x02\x45\x90\x7d\xe0\xa0\x5c\x90\x93\x27\x44\xbb\x96\x65\x74\x61\x4f\x5b\x6b\xb3\xb4\xa1\x38\x59\xa7\x22\xa4\x35\xf6\xb1\x73\x52\x5f\x28\x39\xb7\x93\x62\x37\x4c\x6d\xc9\x13\x68\x73\xf3\xd0\xbf\xe8\xc8\xd7\xf2\x7f\x32\xc6\x6f\x52\x58\x75\x97\xb7\x98\xfd\x09\x4f\x99\x95\x62\x1a\x02\x0f\xec\xc9\xbd\x95\x81\x7f\xd3\xaa\xad\x56\x05\x48\xb6\x65\xac\x10\x05\xa9\xd0\x99\x5c\xfb\xfd\x81\x80\xd4\x52\xf8\x7f\x84\x2d\x73\x8e\x22\x7e\x7e\xff\xb9\x65\x45\x31\x36\x4c\x1b\x56\x15\xf6\xae\x4c\xe9\xa4\x15\x93\x56\x2a\x2c\xff\xfd\x9f\x64\xe0\xf4\x94\x2e\x28\x82\x2b\x96\x01\xee\xab\x70\x78\xdc\x7f\xd5\xbc\x15\x8a\x4c\x3e\xe6\xa4\x0a\x2f\x7d\x2e\xf1\xe2\xe9\xbe\xce\x2a\x57\x1e\x41\x5d\x54\x53\xdc\xc3\xc8\x6b\x3d\xe9\x56\xeb\xfb\x8c\x35\x96\x98\xd4\xc3\x6b\xbd\xaf\xce\x72\x46\x25\x14\x82\xae\x0a\x08\x15\x68\xf6\xad\xaa\x26\x9e\x62\x45\xf7\x69\x70\xb4\xbb\xac\x94\xcc\xbf\x77\xb3\xa3\x00\xff\x03\xba\x54\xd3\xdb\x9e\xb2\xdd\xcd\xad\xa1\xb7\xba\xae\x7b\xd1\x0c\xa2\xa8\x8a\x11\x27\xdb\x07\xb8\xb7\x2a\xd6\xd8\x90\xb3\x1f\x2f\x5b\x16\x6d\xa4\x41\x47\x0f\xdc\xf4\x06\xd8\xc9\xef\xe2\xb5\x6f\xaa\xfb\x61\x9d\x0c\xc6\x3c\xc1\xb6\x4a\x5a\xf3\xec\x55\x2f\x1f\x08\x3f\x8d\x1d\xe9\x80\xb0\x73\x6c\x1a\xfb\xd4\x0b\x9e\xec\x14\x32\x10\xb0\x3f\xee\x4d\xed\x5b\x67\xc7\x49\x06\x80\x6d\xf3\x9a\x73\x04\xd8\xd7\xc4\x7e\x60\x97\x77\xfc\x08\x74\xb0\xbf\x07\x42\x3b\x20\x7c\x24\x84\xba\x37\x7d\x38\x84\xbe\x7e\xf5\xec\x83\x22\x33\x75\xeb\x55\x7f\xba\xab\xec\x40\x9c\x9a\xfd\x5b\x2e\x76\x4f\xf7\x80\xb5\x19\x09\x0e\x19\x2a\xa0\x70\xaa\x47\xdb\x4e\xf5\x72\xdf\x1a\x8d\xa7\xfb\x4e\xe3\xa7\xae\x21\xe1\x8c\xf4\xd9\xc8\xd1\xe0\x6c\x74\x4a\xce\x3c\x41\x82\x10\x24\xcf\xf6\x98\x2b\xb6\x12\xbf\x66\x1a\x2c\x34\x2b\x52\x72\x96\x71\x2c\xce\x4a\x0d\x61\x1c\x1c\x94\xa8\xbe\x1b\x45\x85\xde\xc4\xa3\x3d\xc3\x4f\x41\xb7\x4c\x91\x83\xab\xf3\x8b\xe9\xe5\xe5\xab\x43\xe2\x24\x3c\x70\x37\x57\x62\xdc\x0d\x81\xfc\x76\x70\x82\x47\x73\xd0\xc3\x8f\x65\x8b\xa4\x75\xdb\x8e\x35\x4b\xaa\x18\x1c\xc7\x1d\x5d\x19\x39\x52\x55\x0c\xca\x65\xa6\x27\xf4\x56\x4f\xe8\x9a\xfe\x22\x05\xf4\xd2\x3a\x83\x3f\x5f\x9c\x5f\x4e\xa1\xdd\x8b\x99\xfa\x9a\x3c\x6a\x59\xf2\x9c\x4d\xed\xe6\x8f\xfd\xe6\x8f\xb1\xad\xd6\xca\xac\x8b\xcf\xb2\x62\x3e\x60\xb6\x6e\xaf\x5e\xbf\x7a\x86\xfb\xe4\xaf\xf0\x1a\xfb\x14\xec\xc2\x00\x90\x9d\xfb\x64\xf1\x41\xea\x0d\x6a\xa3\x7f\x28\x62\xff\xf5\x68\x15\xc5\x3c\x2d\xa1\x48\xaf\x4f\xc6\x7f\xc6\xfe\x54\x0c\x18\xf9\xfa\xd5\xb3\x81\xec\xa7\xb7\x0b\x48\x7f\x97\x0f\xff\x52\x0b\x30\x39\x64\x20\xc3\x5b\x66\x91\xb6\x8f\xfe\xd3\xe4\x73\xcb\x6c\xf3\x31\x14\xa1\x6f\xcf\x2f\xfe\x0b\x14\x21\x3b\xf9\xff\x6e\x8a\x10\x67\xc2\x9c\x81\xdd\xba\xaf\xc8\x09\x9f\x6d\x95\x32\x70\xb7\x18\x14\x7e\x1b\x70\x7c\xc1\x91\xa2\xa1\x79\xa3\x6b\x7b\x08\xe9\xb3\xde\xed\xd5\xda\x95\xfb\xc8\x1d\xcb\xc8\xbe\x85\x3c\x62\xcb\xcb\x2e\xeb\x78\x65\x90\x6e\xf8\xae\x21\x92\xa1\xe1\x06\x01\x47\x00\x42\x6d\x2e\x19\xfb\xfa\xcb\xdb\x21\x6b\xc7\x07\x35\x7a\x1b\xa9\xb0\xf6\xfe\x12\x53\xd4\xb0\x94\xd3\x8a\x91\x1f\x2e\xce\x87\xdf\x2c\xf9\x4f\xe5\xfa\x82\x39\x93\x40\xd3\x69\xb1\xc0\xac\x90\x65\x3e\x59\x4a\xb9\x2c\x18\xf0\xbf\x20\x4c\x9a\x89\x25\x17\x0c\xb8\xe4\x74\x25\x6f\xc7\x46\x4e\x3d\xb6\xc6\x01\x1b\xe4\x62\xf9\x19\x66\x69\xbf\x1b\xbc\xe7\x4e\xba\xbc\x92\x59\x6a\x4f\xf6\x56\x60\x60\xd9\x00\x74\x67\x4b\x30\x60\x17\xb4\x37\x8f\xf4\x5e\x1f\x95\xff\x54\x8d\x51\xdd\xee\x1c\x50\x91\xdb\x7d\x39\x24\x14\x95\xae\x1d\x0e\x32\x04\x05\x2e\xc9\xa4\xde\xd7\xd6\xae\xd5\x7b\xf6\x5a\x0e\xda\xf4\xba\x32\x0f\x6f\xdc\x54\x90\x39\x5b\xd1\x1b\x2e\x07\xec\x7e\x73\x5b\x71\xeb\xfd\x6e\x7c\x86\x78\x73\x9b\xfc\xe1\x64\x1d\x9e\xa1\x01\x03\x61\x63\x3f\x9c\xb0\x1b\x28\xa3\xfa\x15\xf2\x06\x67\x0c\x75\xf1\x7e\x77\x47\xcf\x6a\xba\xdd\x4b\xe4\x2c\x4c\xb5\xc6\x60\xc8\xd1\xd9\x8f\x97\xa3\x23\x32\x3a\xfb\xa5\x54\x6c\x94\xee\x93\x4f\xc8\xe8\x19\x55\xec\x7b\x66\x68\x61\x9f\xf9\xf6\xfc\xc2\xfe\xef\x75\x69\xa8\xe0\x77\xf6\xcf\x37\x1b\x26\x2e\x0d\xcd\xae\x5d\xa3\xa7\xd1\x0f\x97\x9b\x15\x53\x2c\x79\x8d\x36\x64\xc7\xc7\xd6\x2e\xe9\x1b\x61\x97\xd0\x33\xa6\x9a\x7f\xcf\xb8\x6f\x13\xd7\x58\x38\xc2\xad\xba\x67\x54\x85\x90\x9e\x71\x0e\x4f\x3d\xa3\x5e\x3e\xfb\x7e\x00\x65\xf6\x90\x70\xbf\xae\xd6\xa3\xa7\x0d\xa0\x7f\x9d\xc9\x14\xf1\x37\x2f\xdf\xec\xd8\x2a\x45\x00\x0f\x00\x7e\x47\x7d\x9c\xd1\x0e\x29\xa7\x2f\x3b\x5d\x97\xe2\x09\xb9\x90\x5a\xf3\x79\xc1\x1a\x04\xff\xc2\xdd\x8d\x8d\x90\x42\x5f\x3a\x2e\x95\x20\xd1\xbe\x0b\xd1\x97\x7d\xaa\xc0\x98\xf8\x97\xde\xff\xee\x33\xbd\x6d\x63\x44\xd9\x7d\xfc\xda\xc2\x5d\x28\x0e\xf0\x69\xfb\xa1\x71\x7f\xb6\xbf\x9d\xbc\x8c\xe6\x41\x90\x7b\xc7\x64\xb6\x60\xbf\x87\xab\xfb\x53\x14\xd3\xa7\x28\xa6\x4f\x51\x4c\x9f\xa2\x98\x3e\x45\x31\x7d\x8a\x62\xea\xfc\xf1\xa3\x46\x31\xf5\xc8\xe3\x8d\xe2\x37\xc3\x5a\x4b\xb9\x91\x71\x69\x7c\xe1\x06\x78\x79\x1b\x93\x88\xfb\x48\x61\x07\xf3\x1e\xad\x56\xf7\xaf\xfe\x0f\x31\x38\x1d\x39\x78\x28\x2a\x7d\xff\xeb\xe4\xb9\xa9\xef\x0f\x3a\x6e\xcb\x41\x1e\xba\xd8\xac\x58\x52\x6e\x04\xae\xe7\x5b\xff\x59\xb9\x3d\x1a\x2f\x20\x5e\x49\x01\x87\xa0\x8b\x43\x6a\x78\x1e\x53\x55\x87\xd3\xa9\xc1\xc0\x17\x5d\xc7\x5c\xa6\xfc\xcd\x94\x3d\xf0\x11\x90\x67\x9d\x53\x83\x98\x2f\xcc\xa7\x83\x5a\x21\x88\x07\x77\x18\xe3\x89\xd7\xf8\x81\xb2\x18\xec\xb4\x72\x08\xd4\xde\x9f\x09\x97\x68\xfe\x67\x52\x64\x6c\x63\xf4\xd4\xe5\xc4\xea\x71\x3d\x53\xff\xdd\xf4\x33\xcb\x65\x7b\xb4\x7a\x74\xa9\x36\xd2\x25\x8f\x08\xb5\x12\x28\xcf\x2c\xbb\xaf\xf3\x06\x2b\x4f\xaa\xcb\x2c\x8c\x57\xda\x41\xf5\xd3\xf1\x9e\x2e\xec\x3c\xd0\x75\x10\x9d\xa0\x6b\x36\x09\xd3\x13\xe3\xb4\x56\xbd\x19\x8f\x8d\xa8\x73\x32\x89\xef\x7f\x3a\xdf\xde\x23\xcb\xd3\x7e\x30\x21\x1b\xf6\x29\xfb\x8b\x14\x98\xf0\xed\xbe\xc4\x43\x69\xbf\x05\x8a\xfb\x31\xc4\x0d\x4e\x29\xd1\xed\xb9\xd1\x9e\xa0\x6e\x4d\x00\x4b\x70\x0a\xc0\xd9\x8f\x97\x47\x68\x60\xa3\x61\xff\xed\xf9\x45\x95\x05\x1b\x0d\xd5\x83\x63\x17\x64\x52\x0d\x39\x6e\xf6\x85\xd6\xcc\x88\xee\x9b\x32\x3a\x88\xc6\xec\x8e\xaf\xf3\xad\x7a\x3f\xda\xe9\x83\x98\xd0\x9a\x9c\x8f\xc8\x3c\xdd\xb4\x02\x22\xea\x70\xee\x4f\x1f\x02\x02\x4f\x4e\x1e\x63\x30\x68\xa9\x7d\x4c\x20\x54\x04\xe3\xae\x56\x7e\x35\xdd\xb8\xda\xdb\x8e\xf3\x0c\xd4\xa7\xe6\x71\x1e\xcc\xd4\x6e\x38\xc5\x1a\x44\xd5\x74\x27\xb8\x89\x5e\x90\xcc\x04\xf4\xbc\x07\xcd\xad\xe3\x0d\x11\xb0\xad\x1b\xe0\xee\x0d\xd9\x6f\xa6\x43\xd8\x21\x52\xaa\xeb\xaa\xc1\xee\x36\x05\xcf\x78\xbc\x8e\x74\x2b\x46\x17\xb6\x05\x02\x28\xd7\x54\x94\x50\xe1\x29\x8d\xd2\xe4\x21\x68\x99\xae\x83\x0e\x02\x48\x98\xd8\xd6\x87\x72\xa7\x8a\x08\xee\x97\x39\x89\xe8\xd2\xfb\xca\x1c\x72\x56\x4d\x20\x02\x79\x2f\x79\x03\xc8\xaf\xee\xbe\x7a\x39\x01\xf4\x94\xd9\x0a\xba\xe6\x19\x16\x7b\x2d\x0a\x99\x51\xd7\x1f\x05\x9c\x6f\x96\xb7\xfe\x8e\xac\xe4\x2d\xbb\x61\x0a\x5a\xb2\x3a\x0e\x17\x73\x5d\x19\x6a\x78\x86\x31\xc3\x0e\x1a\x96\x86\x03\x8a\x40\xb5\xb3\x32\x2b\xaa\x83\x82\x8a\x78\x04\xa4\x8b\xd1\xf6\xa2\xa1\xbd\x5f\x28\xb5\x7c\x6d\x99\x58\xdb\xbc\x54\xf3\x96\x0e\xd5\x26\x32\xb2\x2f\x5f\x76\xec\x8f\x7a\xe4\xd7\x21\xfe\x9d\xa4\x02\x1c\x77\x63\x45\xbc\x8e\x09\x95\xd2\xca\x65\xbd\xa1\x19\xbb\x64\x05\xcb\x8c\xec\x88\x1e\x6a\xba\xaf\xda\xe3\x5b\x09\xcf\x1d\xbf\x7f\xa0\xcc\x67\xa8\xc3\xf5\xe2\xce\xee\xb1\xee\xae\x3c\xb3\x33\xd9\xf6\x23\xcd\x12\x34\x05\x9d\xb3\x82\x68\x3f\x4f\x87\x55\x7b\x82\xa2\xd2\xd8\x1e\xac\x70\x1c\x9c\x9d\xb3\xd7\xcf\xbb\xcb\x29\xf4\x34\x84\x6e\x4c\xf5\x2c\x31\x1d\x9c\x76\xf5\x4b\xf2\x96\xdd\xf1\x17\x5f\x2f\xdc\x2a\x79\xd7\x6c\x8b\x6c\x9c\x0a\x17\xea\xee\x80\x10\xc5\x8a\xca\x5d\x7c\xcd\xe2\x5d\x8f\xed\xc3\x2e\xa8\x39\xee\x10\xec\xbd\xf8\xbe\x66\x91\xba\x74\x1d\xe8\xb8\x66\x55\x26\x3d\xe2\xc5\x7e\x51\x19\xba\x15\x2a\xf0\xb6\x3f\x7d\xe9\x69\x64\xca\x72\x1f\xe4\xee\xf7\x58\x1b\x3c\xfd\x0a\xcd\x8a\x6d\xd0\x06\xd4\xb8\x11\x0f\x34\x22\xdd\x52\xe3\x8a\x27\xca\x58\xe2\x87\x42\xa2\xae\x5c\x54\x21\xe5\x68\xad\x79\xf0\x48\x7f\x2f\xc5\x11\x79\x2d\x8d\xfd\xdf\x8b\x3b\xae\x7b\x42\x25\xec\x5e\x5a\xfd\xe3\xb5\x34\x30\xfa\xbd\x91\x83\x53\x1b\x8c\x1a\x77\x8d\x60\x49\x5a\x60\x21\x08\xbb\x3e\xe7\x26\xf1\xcb\x84\xc2\x9d\xe9\x9b\x9c\x0a\xc5\x90\x4c\x43\xa4\xf2\x38\xa8\x42\xec\xb5\x03\xef\xbb\x0a\x08\x29\xc6\xe0\x94\x49\x5f\xa4\xe1\xbb\x1b\xf0\x11\xad\xf6\x1d\x21\xe6\xc2\x57\xa5\x51\xde\x98\x06\x4e\x01\x13\xb6\xf1\x17\xd0\x5c\x37\x05\xcd\xea\x7e\xe9\xd4\x1b\xea\x3d\x71\x48\x6b\xa6\x96\x90\x63\x93\x45\x2b\x63\x92\x21\x8d\xe9\x07\x5f\xdd\xf6\x37\xa7\xef\xbb\x1b\x1b\x27\x58\xcd\xb8\x42\x7b\xcc\x0e\xe8\xbb\x37\x4b\xcf\x0f\x04\xc2\x2b\xcb\x50\x22\xd8\xa8\x03\xd7\x2e\x7a\x39\x5a\x2f\xc6\x76\x65\x11\xbe\x1a\xf9\xf9\x9a\x6e\x2c\xe5\xff\xc3\xb2\x67\x20\xa2\x7f\x92\x0d\xe5\x4a\x5b\x6d\xd0\xaa\xad\xd1\x36\x7b\xe1\x13\x2e\x4e\x25\x04\x6e\xe1\x72\x4d\xec\x2e\xdc\xd0\xc2\x8a\x0f\x23\xc1\xce\x29\x52\xf6\xab\x5c\xec\x48\xcb\x23\x72\x0b\xce\x78\xcb\x7a\xab\x0a\xba\xa3\x6b\xb6\x1d\x1d\x35\x4e\x48\xdc\x22\x1e\xbd\x14\xee\x12\x7b\xe7\x50\x56\x72\x0a\x2c\xe4\x11\xfc\x36\x9a\xec\x08\xd8\x84\x97\x20\x21\x76\x07\x94\xa0\xe9\xfc\xd1\x97\xdf\xfb\x16\xea\xe8\xc2\xbe\xa5\xb5\xa1\xdd\x07\x3a\xca\x0e\xba\xb2\xbc\x58\x78\xd1\x3d\x90\x2c\xc6\xd7\x51\x22\x36\x95\x79\x03\x49\x92\x03\xd5\xb7\xc6\xd8\x96\xea\xd6\xfa\x2d\x52\x4e\xe7\x93\x92\xf6\x49\x49\xfb\xa4\xa4\x85\x9f\x4f\x4a\x5a\xf4\xf3\x49\x49\x23\x9f\x94\xb4\x7d\xe7\xf7\x49\x49\xfb\xa4\xa4\x25\x7e\xd4\xc3\x34\x1d\x1d\x28\x39\xb4\x25\xb7\x8f\xec\x96\x38\x6e\x86\x7a\xd7\x91\x0b\x38\x88\xb9\xe0\xdb\x0d\x0f\x32\xa9\xf0\xb2\x21\x2c\xbf\xdc\x11\x20\x71\x55\x75\x23\xe8\x00\xea\xb7\xd3\x82\x00\xa8\x2e\xbb\x1f\x8a\x07\x07\xa9\xe2\x72\xb1\x5b\x37\x7a\xaf\x3a\x87\xa6\xd0\x17\xd8\x46\xbb\x07\x69\xf5\x40\x2f\xd8\xaf\x5e\x5d\x86\xed\x5b\x9a\x89\x1f\xd8\x5d\xb0\xb3\xcf\xb1\x20\x6c\xb1\x60\x59\x87\xe4\xea\x69\x44\x8c\xfd\xc6\x07\xf5\xbd\x75\xad\xc9\xb9\xae\x0a\xa4\xf9\x4e\xf4\x50\xce\x03\x7e\x26\xb4\x58\x4a\xc5\xcd\x6a\x1d\x23\xd3\xba\xd5\x15\x5b\x4a\x48\xbe\xab\x18\xbc\x47\xc0\x8a\x8a\x5c\xaf\xe8\x35\x9b\x90\xaa\x6a\xa9\x4e\xb4\xff\x54\x6c\x2d\x6f\x18\x61\xc2\x28\x17\xf2\xc1\x5d\x95\x09\x91\x6b\x92\xcb\xb0\x7f\xf9\x84\x40\x29\x0a\x76\x47\xd7\x9b\x82\x45\xef\xd4\x30\xb0\xed\xf9\x8b\xcb\xf1\xf9\xb3\xf3\xc7\xe3\xcb\xef\xce\x08\x39\xd8\xd2\x75\x71\xe8\x32\xab\x3c\xe2\x60\xf8\xb8\x31\xb2\xdb\x19\x9e\x14\x37\x03\x03\x30\xa2\x2c\x9c\x8b\xab\x57\x97\x3f\x30\xa5\x3b\x6d\x28\xb2\xb3\x97\x8d\x07\x62\x3b\x0a\x15\x0a\x68\x41\x6e\x70\x58\xfa\xa6\xc0\xee\x5b\x15\x7d\xe6\x08\x75\xd0\x16\x47\xc0\x86\x9b\xe4\xb7\xc3\x3e\xe8\x26\xa3\xc9\xf1\xe4\xf8\x88\x1c\x4f\x1e\x01\x2b\x3d\x9e\x3c\x0e\xb6\x27\x02\x72\xd6\x46\x94\x05\x78\x73\x3c\x39\x86\x16\xf9\x6f\xae\x5e\x9c\x06\x77\xca\x50\x45\x84\x2f\x57\x4c\x9b\xe6\x53\x31\xee\xeb\xe2\xd4\xb9\x26\x6e\xe0\xd5\xab\xcb\xe3\x47\xfb\x5f\x8c\x04\x4f\x77\x97\xd8\x6c\x0c\xe9\x2e\x1e\xd1\x18\xd2\x5d\xa9\xa2\x31\xa4\xbb\xc6\xe2\xfd\xfa\x2f\x74\xfe\xb0\xf3\x25\x9a\xe5\x41\x17\x02\x6d\xa4\xa2\xcb\xb0\xdf\x81\x2e\xe7\x55\x49\x78\x8f\x2c\x9d\xd1\x90\xa9\x82\x98\xf1\x26\xf4\x05\x35\xab\xd3\xaa\x6a\xbc\x97\x3d\xd5\x58\x4b\xd9\x9e\x9b\xfb\xa1\x61\xb5\xf8\x7a\x20\x00\x68\x0f\x4d\xd7\xa2\x77\xe5\x7f\xc9\x3f\xfe\xf9\x9b\xff\x2f\x00\x00\xff\xff\x6f\xfd\xe3\x06\x70\x96\x01\x00") func manifests00CustomResourceDefinitionYamlBytes() ([]byte, error) { return bindataRead( @@ -457,8 +457,8 @@ func manifests00CustomResourceDefinitionYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "manifests/00-custom-resource-definition.yaml", size: 91138, mode: os.FileMode(420), modTime: time.Unix(1, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4c, 0x4a, 0x57, 0xd4, 0x71, 0xd8, 0x31, 0xec, 0x6, 0x52, 0x5b, 0x3, 0xa0, 0xda, 0x91, 0x8b, 0x82, 0x5c, 0x4d, 0xf1, 0xf4, 0x8b, 0xef, 0xaa, 0x2b, 0x49, 0x4d, 0xc8, 0x9e, 0x4b, 0xff, 0x5c}} + info := bindataFileInfo{name: "manifests/00-custom-resource-definition.yaml", size: 104048, mode: os.FileMode(420), modTime: time.Unix(1, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x81, 0xb0, 0x3, 0x8b, 0x91, 0xf6, 0x45, 0xa9, 0x9c, 0x3a, 0x37, 0xa9, 0x43, 0x76, 0x2d, 0x88, 0x5c, 0xed, 0x7e, 0xea, 0x34, 0x1b, 0xac, 0xc0, 0xe7, 0x1, 0x78, 0x71, 0x70, 0xeb, 0xa5, 0xb4}} return a, nil } diff --git a/test/e2e/operator_test.go b/test/e2e/operator_test.go index 6a695a925b..3266409c3f 100644 --- a/test/e2e/operator_test.go +++ b/test/e2e/operator_test.go @@ -2631,6 +2631,11 @@ func newHostNetworkController(name types.NamespacedName, domain string) *operato Replicas: &repl, EndpointPublishingStrategy: &operatorv1.EndpointPublishingStrategy{ Type: operatorv1.HostNetworkStrategyType, + HostNetwork: &operatorv1.HostNetworkStrategy{ + HTTPPort: 8080, + HTTPSPort: 8443, + StatsPort: 19360, + }, }, }, } diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index ad825f5f0a..ab59311813 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -105,14 +105,18 @@ with higher verbosity means more (and less important) logs will be generated. There are implementations for the following logging libraries: - **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) - **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) +- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) - **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) - **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) +- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) +- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) ## FAQ diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index 44cd398c9f..c3b56b3d2c 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -43,7 +43,9 @@ limitations under the License. // // Info() and Error() are very similar, but they are separate methods so that // LogSink implementations can choose to do things like attach additional -// information (such as stack traces) on calls to Error(). +// information (such as stack traces) on calls to Error(). Error() messages are +// always logged, regardless of the current verbosity. If there is no error +// instance available, passing nil is valid. // // Verbosity // @@ -53,6 +55,7 @@ limitations under the License. // Log-lines with V-levels that are not enabled (as per the LogSink) will not // be written. Level V(0) is the default, and logger.V(0).Info() has the same // meaning as logger.Info(). Negative V-levels have the same meaning as V(0). +// Error messages do not have a verbosity level and are always logged. // // Where we might have written: // if flVerbose >= 2 { @@ -112,6 +115,15 @@ limitations under the License. // may be any Go value, but how the value is formatted is determined by the // LogSink implementation. // +// Logger instances are meant to be passed around by value. Code that receives +// such a value can call its methods without having to check whether the +// instance is ready for use. +// +// Calling methods with the null logger (Logger{}) as instance will crash +// because it has no LogSink. Therefore this null logger should never be passed +// around. For cases where passing a logger is optional, a pointer to Logger +// should be used. +// // Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but @@ -253,11 +265,13 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { // Error logs an error, with the given message and key/value pairs as context. // It functions similarly to Info, but may have unique behavior, and should be // preferred for logging errors (see the package documentations for more -// information). +// information). The log message will always be emitted, regardless of +// verbosity level. // // The msg argument should be used to add context to any underlying error, // while the err argument should be used to attach the actual error that -// triggered this log line, if present. +// triggered this log line, if present. The err parameter is optional +// and nil may be passed instead of an error instance. func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go index e4ffca838a..62837c991b 100644 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go @@ -111,7 +111,7 @@ type timeApproximator struct { func (a timeApproximator) compare(x, y time.Time) bool { // Avoid subtracting times to avoid overflow when the - // difference is larger than the largest representible duration. + // difference is larger than the largest representable duration. if x.After(y) { // Ensure x is always before y x, y = y, x diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go index 26fe25d6af..8eb2b845f4 100644 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.13 // +build go1.13 package cmpopts diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go index 6eeb8d6e65..60b0727fc7 100644 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.13 // +build !go1.13 // TODO(≥go1.13): For support on 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { out = opts.formatString("", string(b)) + skipType = true return opts.WithTypeMode(emitType).FormatType(t, out) } } diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 2ad3bc85ba..68b5c1ae16 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -80,7 +80,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { } // Use specialized string diffing for longer slices or strings. - const minLength = 64 + const minLength = 32 return vx.Len() >= minLength && vy.Len() >= minLength } @@ -563,10 +563,10 @@ func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []d nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified ny := ds.NumIdentical + ds.NumInserted + ds.NumModified var numLeadingIdentical, numTrailingIdentical int - for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ { + for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ { numLeadingIdentical++ } - for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ { + for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ { numTrailingIdentical++ } if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml index f8684d99fc..061d72ae07 100644 --- a/vendor/github.com/google/gofuzz/.travis.yml +++ b/vendor/github.com/google/gofuzz/.travis.yml @@ -1,13 +1,10 @@ language: go go: - - 1.4 - - 1.3 - - 1.2 - - tip - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - 1.11.x + - 1.12.x + - 1.13.x + - master script: - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md index 51cf5cd1ad..97c1b34fd5 100644 --- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md @@ -1,7 +1,7 @@ # How to contribute # We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. +just a few small guidelines you need to follow. ## Contributor License Agreement ## diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md index 386c2a457a..b503aae7d7 100644 --- a/vendor/github.com/google/gofuzz/README.md +++ b/vendor/github.com/google/gofuzz/README.md @@ -68,4 +68,22 @@ f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. See more examples in ```example_test.go```. +You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing. +go-fuzz provides the user a byte-slice, which should be converted to different inputs +for the tested function. This library can help convert the byte slice. Consider for +example a fuzz test for a the function `mypackage.MyFunc` that takes an int arguments: +```go +// +build gofuzz +package mypackage + +import fuzz "github.com/google/gofuzz" + +func Fuzz(data []byte) int { + var i int + fuzz.NewFromGoFuzz(data).Fuzz(&i) + MyFunc(i) + return 0 +} +``` + Happy testing! diff --git a/vendor/github.com/google/gofuzz/bytesource/bytesource.go b/vendor/github.com/google/gofuzz/bytesource/bytesource.go new file mode 100644 index 0000000000..5bb3659496 --- /dev/null +++ b/vendor/github.com/google/gofuzz/bytesource/bytesource.go @@ -0,0 +1,81 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package bytesource provides a rand.Source64 that is determined by a slice of bytes. +package bytesource + +import ( + "bytes" + "encoding/binary" + "io" + "math/rand" +) + +// ByteSource implements rand.Source64 determined by a slice of bytes. The random numbers are +// generated from each 8 bytes in the slice, until the last bytes are consumed, from which a +// fallback pseudo random source is created in case more random numbers are required. +// It also exposes a `bytes.Reader` API, which lets callers consume the bytes directly. +type ByteSource struct { + *bytes.Reader + fallback rand.Source +} + +// New returns a new ByteSource from a given slice of bytes. +func New(input []byte) *ByteSource { + s := &ByteSource{ + Reader: bytes.NewReader(input), + fallback: rand.NewSource(0), + } + if len(input) > 0 { + s.fallback = rand.NewSource(int64(s.consumeUint64())) + } + return s +} + +func (s *ByteSource) Uint64() uint64 { + // Return from input if it was not exhausted. + if s.Len() > 0 { + return s.consumeUint64() + } + + // Input was exhausted, return random number from fallback (in this case fallback should not be + // nil). Try first having a Uint64 output (Should work in current rand implementation), + // otherwise return a conversion of Int63. + if s64, ok := s.fallback.(rand.Source64); ok { + return s64.Uint64() + } + return uint64(s.fallback.Int63()) +} + +func (s *ByteSource) Int63() int64 { + return int64(s.Uint64() >> 1) +} + +func (s *ByteSource) Seed(seed int64) { + s.fallback = rand.NewSource(seed) + s.Reader = bytes.NewReader(nil) +} + +// consumeUint64 reads 8 bytes from the input and convert them to a uint64. It assumes that the the +// bytes reader is not empty. +func (s *ByteSource) consumeUint64() uint64 { + var bytes [8]byte + _, err := s.Read(bytes[:]) + if err != nil && err != io.EOF { + panic("failed reading source") // Should not happen. + } + return binary.BigEndian.Uint64(bytes[:]) +} diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go index da0a5f9380..761520a8ce 100644 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -22,6 +22,9 @@ import ( "reflect" "regexp" "time" + + "github.com/google/gofuzz/bytesource" + "strings" ) // fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. @@ -61,6 +64,34 @@ func NewWithSeed(seed int64) *Fuzzer { return f } +// NewFromGoFuzz is a helper function that enables using gofuzz (this +// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous +// fuzzing. Essentially, it enables translating the fuzzing bytes from +// go-fuzz to any Go object using this library. +// +// This implementation promises a constant translation from a given slice of +// bytes to the fuzzed objects. This promise will remain over future +// versions of Go and of this library. +// +// Note: the returned Fuzzer should not be shared between multiple goroutines, +// as its deterministic output will no longer be available. +// +// Example: use go-fuzz to test the function `MyFunc(int)` in the package +// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content: +// +// // +build gofuzz +// package mypacakge +// import fuzz "github.com/google/gofuzz" +// func Fuzz(data []byte) int { +// var i int +// fuzz.NewFromGoFuzz(data).Fuzz(&i) +// MyFunc(i) +// return 0 +// } +func NewFromGoFuzz(data []byte) *Fuzzer { + return New().RandSource(bytesource.New(data)) +} + // Funcs adds each entry in fuzzFuncs as a custom fuzzing function. // // Each entry in fuzzFuncs must be a function taking two parameters. @@ -141,7 +172,7 @@ func (f *Fuzzer) genElementCount() int { } func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() > f.nilChance + return f.r.Float64() >= f.nilChance } // MaxDepth sets the maximum number of recursive fuzz calls that will be made @@ -240,6 +271,7 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { fn(v, fc.fuzzer.r) return } + switch v.Kind() { case reflect.Map: if fc.fuzzer.genShouldFill() { @@ -450,10 +482,10 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ v.SetFloat(r.Float64()) }, reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) }, reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex(r.Float64(), r.Float64())) }, reflect.String: func(v reflect.Value, r *rand.Rand) { v.SetString(randString(r)) @@ -465,38 +497,105 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ // randBool returns true or false randomly. func randBool(r *rand.Rand) bool { - if r.Int()&1 == 1 { - return true - } - return false + return r.Int31()&(1<<30) == 0 +} + +type int63nPicker interface { + Int63n(int64) int64 } -type charRange struct { - first, last rune +// UnicodeRange describes a sequential range of unicode characters. +// Last must be numerically greater than First. +type UnicodeRange struct { + First, Last rune } +// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters. +// To be useful, each range must have at least one character (First <= Last) and +// there must be at least one range. +type UnicodeRanges []UnicodeRange + // choose returns a random unicode character from the given range, using the // given randomness source. -func (r *charRange) choose(rand *rand.Rand) rune { - count := int64(r.last - r.first) - return r.first + rune(rand.Int63n(count)) +func (ur UnicodeRange) choose(r int63nPicker) rune { + count := int64(ur.Last - ur.First + 1) + return ur.First + rune(r.Int63n(count)) +} + +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from the range ur. If there are no characters +// in the range (cr.Last < cr.First), this will panic. +func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) { + ur.check() + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } } -var unicodeRanges = []charRange{ +// check is a function that used to check whether the first of ur(UnicodeRange) +// is greater than the last one. +func (ur UnicodeRange) check() { + if ur.Last < ur.First { + panic("The last encoding must be greater than the first one.") + } +} + +// randString of UnicodeRange makes a random string up to 20 characters long. +// Each character is selected form ur(UnicodeRange). +func (ur UnicodeRange) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur.choose(r)) + } + return sb.String() +} + +// defaultUnicodeRanges sets a default unicode range when user do not set +// CustomStringFuzzFunc() but wants fuzz string. +var defaultUnicodeRanges = UnicodeRanges{ {' ', '~'}, // ASCII characters {'\u00a0', '\u02af'}, // Multi-byte encoded characters {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) } +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from one of the ranges of ur(UnicodeRanges). +// Each range has an equal probability of being chosen. If there are no ranges, +// or a selected range has no characters (.Last < .First), this will panic. +// Do not modify any of the ranges in ur after calling this function. +func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) { + // Check unicode ranges slice is empty. + if len(ur) == 0 { + panic("UnicodeRanges is empty.") + } + // if not empty, each range should be checked. + for i := range ur { + ur[i].check() + } + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } +} + +// randString of UnicodeRanges makes a random string up to 20 characters long. +// Each character is selected form one of the ranges of ur(UnicodeRanges), +// and each range has an equal probability of being chosen. +func (ur UnicodeRanges) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) + } + return sb.String() +} + // randString makes a random string up to 20 characters long. The returned string // may include a variety of (valid) UTF-8 encodings. func randString(r *rand.Rand) string { - n := r.Intn(20) - runes := make([]rune, n) - for i := range runes { - runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) - } - return string(runes) + return defaultUnicodeRanges.randString(r) } // randUint64 makes random 64 bit numbers. diff --git a/vendor/github.com/openshift/api/Dockerfile.build b/vendor/github.com/openshift/api/Dockerfile.build index bd89d6cf08..960f39d9ba 100644 --- a/vendor/github.com/openshift/api/Dockerfile.build +++ b/vendor/github.com/openshift/api/Dockerfile.build @@ -3,7 +3,7 @@ FROM fedora:latest ENV GOPATH=/go ENV PATH=/go/bin:/usr/local/go/bin:$PATH -RUN dnf -y install make git unzip wget findutils +RUN dnf -y install make git unzip wget findutils gcc diffutils RUN wget https://go.dev/dl/go1.17.6.linux-amd64.tar.gz && \ rm -rf /usr/local/go && \ diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile index f049eda2d7..3d74577a8d 100644 --- a/vendor/github.com/openshift/api/Makefile +++ b/vendor/github.com/openshift/api/Makefile @@ -66,6 +66,10 @@ update-scripts: hack/update-swagger-docs.sh .PHONY: update-scripts +verify-with-container: Dockerfile.build + $(RUNTIME) build -t $(RUNTIME_IMAGE_NAME) -f Dockerfile.build . + $(RUNTIME) run -ti --rm -v $(PWD):/go/src/github.com/openshift/api:z -w /go/src/github.com/openshift/api $(RUNTIME_IMAGE_NAME) make verify + generate-with-container: Dockerfile.build $(RUNTIME) build -t $(RUNTIME_IMAGE_NAME) -f Dockerfile.build . $(RUNTIME) run -ti --rm -v $(PWD):/go/src/github.com/openshift/api:z -w /go/src/github.com/openshift/api $(RUNTIME_IMAGE_NAME) make update diff --git a/vendor/github.com/openshift/api/README.md b/vendor/github.com/openshift/api/README.md index 330c1d27d6..fe65d98010 100644 --- a/vendor/github.com/openshift/api/README.md +++ b/vendor/github.com/openshift/api/README.md @@ -8,35 +8,6 @@ conventions](https://github.com/openshift/enhancements/blob/master/CONVENTIONS.m and then follow the instructions below to regenerate CRDs (if necessary) and submit a pull request with your new API definitions and generated files. -## pull request process - -Pull requests that change API types in this repo that have corresponding "internal" API objects in the -[openshift/origin](https://github.com/openshift/origin) repo must be paired with a pull request to -[openshift/origin](https://github.com/openshift/origin). - -To ensure the corresponding origin pull request is ready to merge as soon as the pull request to this repo is merged: -1. Base your pull request to this repo on latest [openshift/api#master](https://github.com/openshift/api/commits/master) and ensure CI is green -2. Base your pull request to openshift/origin on latest [openshift/origin#master](https://github.com/openshift/origin/commits/master) -3. In your openshift/origin pull request: - 1. Add a TMP commit that points [glide.yaml](https://github.com/openshift/origin/blob/master/glide.yaml#L39-L41) at your fork of openshift/api, and the branch of your pull request: - - ``` - - package: github.com/openshift/api - repo: https://github.com//api.git - version: "" - ``` - - 2. Update your `bump(*)` commit to include the result of running `hack/update-deps.sh`, which will pull in the changes from your openshift/api pull request - 3. Make sure CI is green on your openshift/origin pull request - 4. Get LGTM on your openshift/api pull request (for API changes) and your openshift/origin pull request (for code changes) - -Once both pull requests are ready, the openshift/api pull request can be merged. - -Then do the following with your openshift/origin pull request: -1. Drop the TMP commit (pointing glide back at openshift/api#master) -2. Rerun `hack/update-deps.sh` and update your `bump(*)` commit -3. It can then be tagged and merged by CI - ## generating CRD schemas Since Kubernetes 1.16, every CRD created in `apiextensions.k8s.io/v1` is required to have a [structural OpenAPIV3 schema](https://kubernetes.io/blog/2019/06/20/crd-structural-schema/). The schemas provide server-side validation for fields, as well as providing the descriptions for `oc explain`. Moreover, schemas ensure structural consistency of data in etcd. Without it anything can be stored in a resource which can have security implications. As we host many of our CRDs in this repo along with their corresponding Go types we also require them to have schemas. However, the following instructions apply for CRDs that are not hosted here as well. diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml index ceb8b7e6d5..24f379c132 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml @@ -65,6 +65,7 @@ spec: enum: - openshift-samples - baremetal + - marketplace x-kubernetes-list-type: atomic baselineCapabilitySet: description: baselineCapabilitySet selects an initial set of optional capabilities to enable, which can be extended via additionalEnabledCapabilities. If unset, the cluster will choose a default, and the default may change over time. The current default is vCurrent. @@ -167,6 +168,7 @@ spec: enum: - openshift-samples - baremetal + - marketplace x-kubernetes-list-type: atomic knownCapabilities: description: knownCapabilities lists all the capabilities known to the current cluster. @@ -177,6 +179,7 @@ spec: enum: - openshift-samples - baremetal + - marketplace x-kubernetes-list-type: atomic conditionalUpdates: description: conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified. diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml index 4ba6c01cf4..e9895002f3 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml @@ -6,6 +6,7 @@ metadata: include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + capability.openshift.io/name: "marketplace" name: operatorhubs.config.openshift.io spec: group: config.openshift.io diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagedigestmirrorset.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagedigestmirrorset.crd.yaml deleted file mode 100644 index 29ce33c8b0..0000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagedigestmirrorset.crd.yaml +++ /dev/null @@ -1,71 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1126 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: imagedigestmirrorsets.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ImageDigestMirrorSet - listKind: ImageDigestMirrorSetList - plural: imagedigestmirrorsets - singular: imagedigestmirrorset - shortNames: - - idms - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "ImageDigestMirrorSet holds cluster-wide information about how to handle registry mirror rules on using digest pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - type: object - properties: - imageDigestMirrors: - description: "imageDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using tag specification, users should configure a list of mirrors using \"ImageTagMirrorSet\" CRD. \n If the image pull specification matches the repository of \"source\" in multiple imagedigestmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the \"source\", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact. \n If the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec. \n When multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a specific order of mirrors, should configure them into one list of mirrors using the expected order." - type: array - items: - description: ImageDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. - type: object - required: - - source - properties: - mirrorSourcePolicy: - description: mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list. - type: string - enum: - - NeverContactSource - - AllowContactingSource - mirrors: - description: 'mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their digests. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. The order of mirrors in this list is treated as the user''s desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy" Other cluster configuration, including (but not limited to) other imageDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. "mirrors" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' - type: array - items: - type: string - pattern: ^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ - x-kubernetes-list-type: set - source: - description: 'source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. "source" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' - type: string - pattern: ^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ - x-kubernetes-list-type: atomic - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagetagmirrorset.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagetagmirrorset.crd.yaml deleted file mode 100644 index 8471f50fc5..0000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagetagmirrorset.crd.yaml +++ /dev/null @@ -1,71 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1126 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: imagetagmirrorsets.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ImageTagMirrorSet - listKind: ImageTagMirrorSetList - plural: imagetagmirrorsets - singular: imagetagmirrorset - shortNames: - - itms - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - type: object - properties: - imageTagMirrors: - description: "imageTagMirrors allows images referenced by image tags in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageTagMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using digest specification only, users should configure a list of mirrors using \"ImageDigestMirrorSet\" CRD. \n If the image pull specification matches the repository of \"source\" in multiple imagetagmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the \"source\", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact. \n If the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec. \n When multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a deterministic order of mirrors, should configure them into one list of mirrors using the expected order." - type: array - items: - description: ImageTagMirrors holds cluster-wide information about how to handle mirrors in the registries config. - type: object - required: - - source - properties: - mirrorSourcePolicy: - description: mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list. - type: string - enum: - - NeverContactSource - - AllowContactingSource - mirrors: - description: 'mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their tags. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Configuring a list of mirrors using "ImageDigestMirrorSet" CRD and forcing digest-pulls for mirrors avoids that issue. The order of mirrors in this list is treated as the user''s desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy". Other cluster configuration, including (but not limited to) other imageTagMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. "mirrors" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' - type: array - items: - type: string - pattern: ^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ - x-kubernetes-list-type: set - source: - description: 'source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. "source" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' - type: string - pattern: ^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ - x-kubernetes-list-type: atomic - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml index e91245de60..7db041fbab 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml @@ -92,6 +92,61 @@ spec: nutanix: description: Nutanix contains settings specific to the Nutanix infrastructure provider. type: object + required: + - prismCentral + - prismElements + properties: + prismCentral: + description: prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. + type: object + required: + - address + - port + properties: + address: + description: address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) + type: string + maxLength: 256 + port: + description: port is the port number to access the Nutanix Prism Central or Element (cluster) + type: integer + format: int32 + maximum: 65535 + minimum: 1 + prismElements: + description: prismElements holds one or more endpoint address and port data to access the Nutanix Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) spread over multiple Prism Elements (clusters) of the Prism Central. + type: array + items: + description: NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster) + type: object + required: + - endpoint + - name + properties: + endpoint: + description: endpoint holds the endpoint address and port data of the Prism Element (cluster). When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. + type: object + required: + - address + - port + properties: + address: + description: address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) + type: string + maxLength: 256 + port: + description: port is the port number to access the Nutanix Prism Central or Element (cluster) + type: integer + format: int32 + maximum: 65535 + minimum: 1 + name: + description: name is the name of the Prism Element (cluster). This value will correspond with the cluster field configured on other resources (eg Machines, PVCs, etc). + type: string + maxLength: 256 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map openstack: description: OpenStack contains settings specific to the OpenStack infrastructure provider. type: object diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml index 95fe8dfd99..ffc3666076 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml @@ -53,7 +53,7 @@ spec: hostname: description: hostname is the hostname that should be used by the route. type: string - format: hostname + pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ name: description: "name is the logical name of the route to customize. \n The namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized." type: string @@ -226,13 +226,13 @@ spec: type: array minItems: 1 items: - description: Hostname is an alias for hostname string validation. + description: "Hostname is an alias for hostname string validation. \n The left operand of the | is the original kubebuilder hostname validation format, which is incorrect because it allows upper case letters, disallows hyphen or number in the TLD, and allows labels to start/end in non-alphanumeric characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256. ^([a-zA-Z0-9\\p{S}\\p{L}]((-?[a-zA-Z0-9\\p{S}\\p{L}]{0,62})?)|([a-zA-Z0-9\\p{S}\\p{L}](([a-zA-Z0-9-\\p{S}\\p{L}]{0,61}[a-zA-Z0-9\\p{S}\\p{L}])?)(\\.)){1,}([a-zA-Z\\p{L}]){2,63})$ \n The right operand of the | is a new pattern that mimics the current API route admission validation on hostname, except that it allows hostnames longer than the maximum length: ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ \n Both operand patterns are made available so that modifications on ingress spec can still happen after an invalid hostname was saved via validation by the incorrect left operand of the | operator." type: string - format: hostname + pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ defaultHostname: description: defaultHostname is the hostname of this route prior to customization. type: string - format: hostname + pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ name: description: "name is the logical name of the route to customize. It does not have to be the actual name of a route resource but it cannot be renamed. \n The namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized." type: string @@ -268,6 +268,13 @@ spec: resource: description: resource of the referent. type: string + defaultPlacement: + description: "defaultPlacement is set at installation time to control which nodes will host the ingress router pods by default. The options are control-plane nodes or worker nodes. \n This field works by dictating how the Cluster Ingress Operator will consider unset replicas and nodePlacement fields in IngressController resources when creating the corresponding Deployments. \n See the documentation for the IngressController replicas and nodePlacement fields for more information. \n When omitted, the default value is Workers" + type: string + enum: + - ControlPlane + - Workers + - "" served: true storage: true subresources: diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_node.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_node.crd.yaml index 67c78fec90..915961e149 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_node.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_node.crd.yaml @@ -14,7 +14,7 @@ spec: listKind: NodeList plural: nodes singular: node - scope: Namespaced + scope: Cluster versions: - name: v1 schema: @@ -39,9 +39,17 @@ spec: cgroupMode: description: CgroupMode determines the cgroups version on the node type: string + enum: + - v1 + - v2 + - "" workerLatencyProfile: description: WorkerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster type: string + enum: + - Default + - MediumUpdateAverageReaction + - LowUpdateSlowReaction status: description: status holds observed values. type: object @@ -51,40 +59,50 @@ spec: type: object properties: conditions: - description: conditions describes the state of the WorkerLatencyProfile and related components (Kubelet or Controller Manager or Kube API Server) + description: conditions describes the state of the WorkerLatencyProfile and related components (Kubelet or Controller Manager or Kube API Server) whether progressing, degraded or complete type: array items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" type: object required: - lastTransitionTime - - owner + - message + - reason - status - type properties: lastTransitionTime: - description: lastTransitionTime is the time of the last update to the current status property. + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. type: string format: date-time message: - description: message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. - type: string - owner: - description: Owner specifies the operator that is updating this condition + description: message is a human readable message indicating details about the transition. This may be an empty string. type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 reason: - description: reason is the CamelCase reason for the condition's current status. + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ status: description: status of the condition, one of True, False, Unknown. type: string + enum: + - "True" + - "False" + - Unknown type: - description: type specifies the aspect reported by this condition. + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index cd907699fe..52248a68ef 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -225,7 +225,7 @@ type UpdateHistory struct { type ClusterID string // ClusterVersionCapability enumerates optional, core cluster components. -// +kubebuilder:validation:Enum=openshift-samples;baremetal +// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace type ClusterVersionCapability string const ( @@ -240,6 +240,11 @@ const ( // baremetal operator which is responsible for running the metal3 // deployment. ClusterVersionCapabilityBaremetal ClusterVersionCapability = "baremetal" + + // ClusterVersionCapabilityMarketplace manages the Marketplace operator which + // supplies Operator Lifecycle Manager (OLM) users with default catalogs of + // "optional" operators. + ClusterVersionCapabilityMarketplace ClusterVersionCapability = "marketplace" ) // ClusterVersionCapabilitySet defines sets of cluster version capabilities. @@ -269,10 +274,12 @@ var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVers ClusterVersionCapabilitySet4_11: { ClusterVersionCapabilityOpenShiftSamples, ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityMarketplace, }, ClusterVersionCapabilitySetCurrent: { ClusterVersionCapabilityOpenShiftSamples, ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityMarketplace, }, } diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 52015391d6..7c34a1ef1d 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -111,7 +111,6 @@ var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ Disabled: []string{}, }, TechPreviewNoUpgrade: newDefaultFeatures(). - with("CSIDriverAzureFile"). // sig-storage, fbertina, OCP specific with("CSIMigrationAWS"). // sig-storage, jsafrane, Kubernetes feature gate with("CSIMigrationGCE"). // sig-storage, fbertina, Kubernetes feature gate with("CSIMigrationAzureFile"). // sig-storage, fbertina, Kubernetes feature gate diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 30e808c919..5cfa463f04 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -705,7 +705,54 @@ type AlibabaCloudResourceTag struct { // NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. // This only includes fields that can be modified in the cluster. -type NutanixPlatformSpec struct{} +type NutanixPlatformSpec struct { + // prismCentral holds the endpoint address and port to access the Nutanix Prism Central. + // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. + // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the + // proxy spec.noProxy list. + // +kubebuilder:validation:Required + PrismCentral NutanixPrismEndpoint `json:"prismCentral"` + + // prismElements holds one or more endpoint address and port data to access the Nutanix + // Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one + // Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) + // used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) + // spread over multiple Prism Elements (clusters) of the Prism Central. + // +kubebuilder:validation:Required + // +listType=map + // +listMapKey=name + PrismElements []NutanixPrismElementEndpoint `json:"prismElements"` +} + +// NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster) +type NutanixPrismEndpoint struct { + // address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=256 + Address string `json:"address"` + + // port is the port number to access the Nutanix Prism Central or Element (cluster) + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + Port int32 `json:"port"` +} + +// NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster) +type NutanixPrismElementEndpoint struct { + // name is the name of the Prism Element (cluster). This value will correspond with + // the cluster field configured on other resources (eg Machines, PVCs, etc). + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=256 + Name string `json:"name"` + + // endpoint holds the endpoint address and port data of the Prism Element (cluster). + // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. + // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the + // proxy spec.noProxy list. + // +kubebuilder:validation:Required + Endpoint NutanixPrismEndpoint `json:"endpoint"` +} // NutanixPlatformStatus holds the current status of the Nutanix infrastructure provider. type NutanixPlatformStatus struct { diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go index 2c6bed3cb6..c7fec9c033 100644 --- a/vendor/github.com/openshift/api/config/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -91,7 +91,20 @@ type IngressSpec struct { type ConsumingUser string // Hostname is an alias for hostname string validation. -// +kubebuilder:validation:Format=hostname +// +// The left operand of the | is the original kubebuilder hostname validation format, which is incorrect because it +// allows upper case letters, disallows hyphen or number in the TLD, and allows labels to start/end in non-alphanumeric +// characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256. +// ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$ +// +// The right operand of the | is a new pattern that mimics the current API route admission validation on hostname, +// except that it allows hostnames longer than the maximum length: +// ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ +// +// Both operand patterns are made available so that modifications on ingress spec can still happen after an invalid hostname +// was saved via validation by the incorrect left operand of the | operator. +// +// +kubebuilder:validation:Pattern=`^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$` type Hostname string type IngressStatus struct { @@ -99,6 +112,23 @@ type IngressStatus struct { // hostnames and serving certificates can be customized by the cluster-admin. // +optional ComponentRoutes []ComponentRouteStatus `json:"componentRoutes,omitempty"` + + // defaultPlacement is set at installation time to control which + // nodes will host the ingress router pods by default. The options are + // control-plane nodes or worker nodes. + // + // This field works by dictating how the Cluster Ingress Operator will + // consider unset replicas and nodePlacement fields in IngressController + // resources when creating the corresponding Deployments. + // + // See the documentation for the IngressController replicas and nodePlacement + // fields for more information. + // + // When omitted, the default value is Workers + // + // +kubebuilder:validation:Enum:="ControlPlane";"Workers";"" + // +optional + DefaultPlacement DefaultPlacement `json:"defaultPlacement"` } // ComponentRouteSpec allows for configuration of a route's hostname and serving certificate. @@ -209,3 +239,14 @@ type IngressList struct { Items []Ingress `json:"items"` } + +// DefaultPlacement defines the default placement of ingress router pods. +type DefaultPlacement string + +const ( + // "Workers" is for having router pods placed on worker nodes by default. + DefaultPlacementWorkers DefaultPlacement = "Workers" + + // "ControlPlane" is for having router pods placed on control-plane nodes by default. + DefaultPlacementControlPlane DefaultPlacement = "ControlPlane" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go index 011bdba5f3..d927bd8944 100644 --- a/vendor/github.com/openshift/api/config/v1/types_node.go +++ b/vendor/github.com/openshift/api/config/v1/types_node.go @@ -1,6 +1,10 @@ package v1 -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) // +genclient // +genclient:nonNamespaced @@ -10,6 +14,8 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +kubebuilder:resource:path=nodes,scope=Cluster +// +kubebuilder:subresource:status type Node struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -41,6 +47,7 @@ type NodeStatus struct { WorkerLatencyProfileStatus WorkerLatencyProfileStatus `json:"workerLatencyProfileStatus,omitempty"` } +// +kubebuilder:validation:Enum=v1;v2;"" type CgroupMode string const ( @@ -50,6 +57,7 @@ const ( CgroupModeDefault CgroupMode = CgroupModeV1 ) +// +kubebuilder:validation:Enum=Default;MediumUpdateAverageReaction;LowUpdateSlowReaction type WorkerLatencyProfileType string const ( @@ -63,28 +71,60 @@ const ( DefaultUpdateDefaultReaction WorkerLatencyProfileType = "Default" ) +const ( + // DefaultNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultNodeStatusUpdateFrequency = 10 * time.Second + // DefaultNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultNodeMonitorGracePeriod = 40 * time.Second + // DefaultNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultNotReadyTolerationSeconds = 300 + // DefaultUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultUnreachableTolerationSeconds = 300 + + // MediumNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumNodeStatusUpdateFrequency = 20 * time.Second + // MediumNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumNodeMonitorGracePeriod = 2 * time.Minute + // MediumNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumNotReadyTolerationSeconds = 60 + // MediumUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumUnreachableTolerationSeconds = 60 + + // LowNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowNodeStatusUpdateFrequency = 1 * time.Minute + // LowNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowNodeMonitorGracePeriod = 5 * time.Minute + // LowNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowNotReadyTolerationSeconds = 60 + // LowUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowUnreachableTolerationSeconds = 60 +) + // WorkerLatencyProfileStatus provides status information about the WorkerLatencyProfile rollout type WorkerLatencyProfileStatus struct { // conditions describes the state of the WorkerLatencyProfile and related components - // (Kubelet or Controller Manager or Kube API Server) + // (Kubelet or Controller Manager or Kube API Server) whether progressing, degraded or complete // +patchMergeKey=type // +patchStrategy=merge // +optional - Conditions []WorkerLatencyStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } -// WorkerLatencyStatusConditionType is an aspect of WorkerLatencyProfile state. -type WorkerLatencyStatusConditionType string - +// Different condition types to be used by KubeletOperator, Kube Controller +// Manager Operator and Kube API Server Operator const ( // Progressing indicates that the updates to component (Kubelet or Controller // Manager or Kube API Server) is actively rolling out, propagating changes to the // respective arguments. - WorkerLatencyProfileProgressing WorkerLatencyStatusConditionType = "Progressing" + KubeletProgressing = "KubeletProgressing" + KubeControllerManagerProgressing = "KubeControllerManagerProgressing" + KubeAPIServerProgressing = "KubeAPIServerProgressing" // Complete indicates whether the component (Kubelet or Controller Manager or Kube API Server) // is successfully updated the respective arguments. - WorkerLatencyProfileComplete WorkerLatencyStatusConditionType = "Complete" + KubeletComplete = "KubeletComplete" + KubeControllerManagerComplete = "KubeControllerManagerComplete" + KubeAPIServerComplete = "KubeAPIServerComplete" // Degraded indicates that the component (Kubelet or Controller Manager or Kube API Server) // does not reach the state 'Complete' over a period of time @@ -92,54 +132,11 @@ const ( // If the component enters in this state, "Default" WorkerLatencyProfileType // rollout will be initiated to restore the respective default arguments of all // components. - WorkerLatencyProfileDegraded WorkerLatencyStatusConditionType = "Degraded" + KubeletDegraded = "KubeletDegraded" + KubeControllerManagerDegraded = "KubeControllerManagerDegraded" + KubeAPIServerDegraded = "KubeAPIServerDegraded" ) -type WorkerLatencyStatusConditionOwner string - -const ( - // Machine Config Operator will update condition status by setting this as owner - MachineConfigOperator WorkerLatencyStatusConditionOwner = "MachineConfigOperator" - - // Kube Controller Manager Operator will update condition status by setting this as owner - KubeControllerManagerOperator WorkerLatencyStatusConditionOwner = "KubeControllerManagerOperator" - - // Kube API Server Operator will update condition status by setting this as owner - KubeAPIServerOperator WorkerLatencyStatusConditionOwner = "KubeAPIServerOperator" -) - -type WorkerLatencyStatusCondition struct { - // Owner specifies the operator that is updating this condition - // +kubebuilder:validation:Required - // +required - Owner WorkerLatencyStatusConditionOwner `json:"owner"` - - // type specifies the aspect reported by this condition. - // +kubebuilder:validation:Required - // +required - Type WorkerLatencyStatusConditionType `json:"type"` - - // status of the condition, one of True, False, Unknown. - // +kubebuilder:validation:Required - // +required - Status ConditionStatus `json:"status"` - - // lastTransitionTime is the time of the last update to the current status property. - // +kubebuilder:validation:Required - // +required - LastTransitionTime metav1.Time `json:"lastTransitionTime"` - - // reason is the CamelCase reason for the condition's current status. - // +optional - Reason string `json:"reason,omitempty"` - - // message provides additional information about the current condition. - // This is only to be consumed by humans. It may contain Line Feed - // characters (U+000A), which should be rendered as new lines. - // +optional - Message string `json:"message,omitempty"` -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index eaec4318ad..d5f73e01f5 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -3350,6 +3350,12 @@ func (in *NodeStatus) DeepCopy() *NodeStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NutanixPlatformSpec) DeepCopyInto(out *NutanixPlatformSpec) { *out = *in + out.PrismCentral = in.PrismCentral + if in.PrismElements != nil { + in, out := &in.PrismElements, &out.PrismElements + *out = make([]NutanixPrismElementEndpoint, len(*in)) + copy(*out, *in) + } return } @@ -3379,6 +3385,39 @@ func (in *NutanixPlatformStatus) DeepCopy() *NutanixPlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPrismElementEndpoint) DeepCopyInto(out *NutanixPrismElementEndpoint) { + *out = *in + out.Endpoint = in.Endpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPrismElementEndpoint. +func (in *NutanixPrismElementEndpoint) DeepCopy() *NutanixPrismElementEndpoint { + if in == nil { + return nil + } + out := new(NutanixPrismElementEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPrismEndpoint) DeepCopyInto(out *NutanixPrismEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPrismEndpoint. +func (in *NutanixPrismEndpoint) DeepCopy() *NutanixPrismEndpoint { + if in == nil { + return nil + } + out := new(NutanixPrismEndpoint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OAuth) DeepCopyInto(out *OAuth) { *out = *in @@ -3867,7 +3906,7 @@ func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { if in.Nutanix != nil { in, out := &in.Nutanix, &out.Nutanix *out = new(NutanixPlatformSpec) - **out = **in + (*in).DeepCopyInto(*out) } return } @@ -4767,7 +4806,7 @@ func (in *WorkerLatencyProfileStatus) DeepCopyInto(out *WorkerLatencyProfileStat *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]WorkerLatencyStatusCondition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -4784,20 +4823,3 @@ func (in *WorkerLatencyProfileStatus) DeepCopy() *WorkerLatencyProfileStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkerLatencyStatusCondition) DeepCopyInto(out *WorkerLatencyStatusCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerLatencyStatusCondition. -func (in *WorkerLatencyStatusCondition) DeepCopy() *WorkerLatencyStatusCondition { - if in == nil { - return nil - } - out := new(WorkerLatencyStatusCondition) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 6f894f6732..1e789e4752 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -1205,7 +1205,9 @@ func (KubevirtPlatformStatus) SwaggerDoc() map[string]string { } var map_NutanixPlatformSpec = map[string]string{ - "": "NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. This only includes fields that can be modified in the cluster.", + "": "NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. This only includes fields that can be modified in the cluster.", + "prismCentral": "prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list.", + "prismElements": "prismElements holds one or more endpoint address and port data to access the Nutanix Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) spread over multiple Prism Elements (clusters) of the Prism Central.", } func (NutanixPlatformSpec) SwaggerDoc() map[string]string { @@ -1222,6 +1224,26 @@ func (NutanixPlatformStatus) SwaggerDoc() map[string]string { return map_NutanixPlatformStatus } +var map_NutanixPrismElementEndpoint = map[string]string{ + "": "NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster)", + "name": "name is the name of the Prism Element (cluster). This value will correspond with the cluster field configured on other resources (eg Machines, PVCs, etc).", + "endpoint": "endpoint holds the endpoint address and port data of the Prism Element (cluster). When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list.", +} + +func (NutanixPrismElementEndpoint) SwaggerDoc() map[string]string { + return map_NutanixPrismElementEndpoint +} + +var map_NutanixPrismEndpoint = map[string]string{ + "": "NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster)", + "address": "address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster)", + "port": "port is the port number to access the Nutanix Prism Central or Element (cluster)", +} + +func (NutanixPrismEndpoint) SwaggerDoc() map[string]string { + return map_NutanixPrismEndpoint +} + var map_OpenStackPlatformSpec = map[string]string{ "": "OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. This only includes fields that can be modified in the cluster.", } @@ -1412,7 +1434,8 @@ func (IngressSpec) SwaggerDoc() map[string]string { } var map_IngressStatus = map[string]string{ - "componentRoutes": "componentRoutes is where participating operators place the current route status for routes whose hostnames and serving certificates can be customized by the cluster-admin.", + "componentRoutes": "componentRoutes is where participating operators place the current route status for routes whose hostnames and serving certificates can be customized by the cluster-admin.", + "defaultPlacement": "defaultPlacement is set at installation time to control which nodes will host the ingress router pods by default. The options are control-plane nodes or worker nodes.\n\nThis field works by dictating how the Cluster Ingress Operator will consider unset replicas and nodePlacement fields in IngressController resources when creating the corresponding Deployments.\n\nSee the documentation for the IngressController replicas and nodePlacement fields for more information.\n\nWhen omitted, the default value is Workers", } func (IngressStatus) SwaggerDoc() map[string]string { @@ -1560,26 +1583,13 @@ func (NodeStatus) SwaggerDoc() map[string]string { var map_WorkerLatencyProfileStatus = map[string]string{ "": "WorkerLatencyProfileStatus provides status information about the WorkerLatencyProfile rollout", - "conditions": "conditions describes the state of the WorkerLatencyProfile and related components (Kubelet or Controller Manager or Kube API Server)", + "conditions": "conditions describes the state of the WorkerLatencyProfile and related components (Kubelet or Controller Manager or Kube API Server) whether progressing, degraded or complete", } func (WorkerLatencyProfileStatus) SwaggerDoc() map[string]string { return map_WorkerLatencyProfileStatus } -var map_WorkerLatencyStatusCondition = map[string]string{ - "owner": "Owner specifies the operator that is updating this condition", - "type": "type specifies the aspect reported by this condition.", - "status": "status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status property.", - "reason": "reason is the CamelCase reason for the condition's current status.", - "message": "message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", -} - -func (WorkerLatencyStatusCondition) SwaggerDoc() map[string]string { - return map_WorkerLatencyStatusCondition -} - var map_BasicAuthIdentityProvider = map[string]string{ "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials", } diff --git a/vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml b/vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml index c1091da575..f91fad9476 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml +++ b/vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml @@ -1380,6 +1380,107 @@ spec: type: string type: object type: array + topologySpreadConstraints: + description: topologySpreadConstraints specify how to spread matching + pods among the given topology. + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods + that match this label selector are counted to determine the + number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may + be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global minimum. + For example, in a 3-zone cluster, MaxSkew is set to 1, and + pods with the same labelSelector spread as 1/1/0: | zone1 + | zone2 | zone3 | | P | P | | - if MaxSkew is + 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + scheduling it onto zone1(zone2) would make the ActualSkew(2-0) + on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming + pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that satisfy + it. It''s a required field. Default value is 1 and 0 is not + allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that + have a label with this key and identical values are considered + to be in the same topology. We consider each + as a "bucket", and try to put balanced number of pods into + each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a + pod if it doesn''t satisfy the spread constraint. - DoNotSchedule + (default) tells the scheduler not to schedule it. - ScheduleAnyway + tells the scheduler to schedule the pod in any location, but + giving higher precedence to topologies that would help reduce + the skew. A constraint is considered "Unsatisfiable" for + an incoming pod if and only if every possible node assignment + for that pod would violate "MaxSkew" on some topology. For + example, in a 3-zone cluster, MaxSkew is set to 1, and pods + with the same labelSelector spread as 3/1/1: | zone1 | zone2 + | zone3 | | P P P | P | P | If WhenUnsatisfiable is + set to DoNotSchedule, incoming pod can only be scheduled to + zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on + zone2(zone3) satisfies MaxSkew(1). In other words, the cluster + can still be imbalanced, but scheduler won''t make it *more* + imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array unsupportedConfigOverrides: description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the diff --git a/vendor/github.com/openshift/api/imageregistry/v1/types.go b/vendor/github.com/openshift/api/imageregistry/v1/types.go index 777832af03..dd9d727cf0 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/types.go +++ b/vendor/github.com/openshift/api/imageregistry/v1/types.go @@ -107,6 +107,9 @@ type ImageRegistrySpec struct { // affinity is a group of node affinity scheduling rules for the image registry pod(s). // +optional Affinity *corev1.Affinity `json:"affinity,omitempty"` + // topologySpreadConstraints specify how to spread matching pods among the given topology. + // +optional + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` } // ImageRegistryStatus reports image registry operational status. diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go index 2f2baafd26..af4ddc31a0 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go @@ -565,6 +565,13 @@ func (in *ImageRegistrySpec) DeepCopyInto(out *ImageRegistrySpec) { *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go index 95f5ddad0e..0bfc130a30 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go @@ -206,23 +206,24 @@ func (ImageRegistryConfigStorageSwift) SwaggerDoc() map[string]string { } var map_ImageRegistrySpec = map[string]string{ - "": "ImageRegistrySpec defines the specs for the running registry.", - "managementState": "managementState indicates whether the registry instance represented by this config instance is under operator management or not. Valid values are Managed, Unmanaged, and Removed.", - "httpSecret": "httpSecret is the value needed by the registry to secure uploads, generated by default.", - "proxy": "proxy defines the proxy to be used when calling master api, upstream registries, etc.", - "storage": "storage details for configuring registry storage, e.g. S3 bucket coordinates.", - "readOnly": "readOnly indicates whether the registry instance should reject attempts to push new images or delete existing ones.", - "disableRedirect": "disableRedirect controls whether to route all data through the Registry, rather than redirecting to the backend.", - "requests": "requests controls how many parallel requests a given registry instance will handle before queuing additional requests.", - "defaultRoute": "defaultRoute indicates whether an external facing route for the registry should be created using the default generated hostname.", - "routes": "routes defines additional external facing routes which should be created for the registry.", - "replicas": "replicas determines the number of registry instances to run.", - "logging": "logging is deprecated, use logLevel instead.", - "resources": "resources defines the resource requests+limits for the registry pod.", - "nodeSelector": "nodeSelector defines the node selection constraints for the registry pod.", - "tolerations": "tolerations defines the tolerations for the registry pod.", - "rolloutStrategy": "rolloutStrategy defines rollout strategy for the image registry deployment.", - "affinity": "affinity is a group of node affinity scheduling rules for the image registry pod(s).", + "": "ImageRegistrySpec defines the specs for the running registry.", + "managementState": "managementState indicates whether the registry instance represented by this config instance is under operator management or not. Valid values are Managed, Unmanaged, and Removed.", + "httpSecret": "httpSecret is the value needed by the registry to secure uploads, generated by default.", + "proxy": "proxy defines the proxy to be used when calling master api, upstream registries, etc.", + "storage": "storage details for configuring registry storage, e.g. S3 bucket coordinates.", + "readOnly": "readOnly indicates whether the registry instance should reject attempts to push new images or delete existing ones.", + "disableRedirect": "disableRedirect controls whether to route all data through the Registry, rather than redirecting to the backend.", + "requests": "requests controls how many parallel requests a given registry instance will handle before queuing additional requests.", + "defaultRoute": "defaultRoute indicates whether an external facing route for the registry should be created using the default generated hostname.", + "routes": "routes defines additional external facing routes which should be created for the registry.", + "replicas": "replicas determines the number of registry instances to run.", + "logging": "logging is deprecated, use logLevel instead.", + "resources": "resources defines the resource requests+limits for the registry pod.", + "nodeSelector": "nodeSelector defines the node selection constraints for the registry pod.", + "tolerations": "tolerations defines the tolerations for the registry pod.", + "rolloutStrategy": "rolloutStrategy defines rollout strategy for the image registry deployment.", + "affinity": "affinity is a group of node affinity scheduling rules for the image registry pod(s).", + "topologySpreadConstraints": "topologySpreadConstraints specify how to spread matching pods among the given topology.", } func (ImageRegistrySpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go index 7d789f698e..d814f10d12 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go @@ -74,6 +74,11 @@ type AWSMachineProviderConfig struct { // SpotMarketOptions allows users to configure instances to be run using AWS Spot instances. // +optional SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"` + // MetadataServiceOptions allows users to configure instance metadata service interaction options. + // If nothing specified, default AWS IMDS settings will be applied. + // https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html + // +optional + MetadataServiceOptions MetadataServiceOptions `json:"metadataServiceOptions,omitempty"` } // BlockDeviceMappingSpec describes a block device mapping @@ -160,6 +165,30 @@ type SpotMarketOptions struct { MaxPrice *string `json:"maxPrice,omitempty"` } +type MetadataServiceAuthentication string + +const ( + // MetadataServiceAuthenticationRequired enforces sending of a signed token header with any instance metadata retrieval (GET) requests. + // Enforces IMDSv2 usage. + MetadataServiceAuthenticationRequired = "Required" + // MetadataServiceAuthenticationOptional allows IMDSv1 usage along with IMDSv2 + MetadataServiceAuthenticationOptional = "Optional" +) + +// MetadataServiceOptions defines the options available to a user when configuring +// Instance Metadata Service (IMDS) Options. +type MetadataServiceOptions struct { + // Authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. + // When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. + // When omitted, this means the user has no opinion and the value is left to the platform to choose a good + // default, which is subject to change over time. The current default is optional. + // At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API + // https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html + // +kubebuilder:validation:Enum=Required;Optional + // +optional + Authentication MetadataServiceAuthentication `json:"authentication,omitempty"` +} + // AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. // Only one of ID, ARN or Filters may be specified. Specifying more than one will result in // a validation error. @@ -296,25 +325,5 @@ type AWSMachineProviderStatus struct { // Conditions is a set of conditions associated with the Machine to indicate // errors or other status // +optional - Conditions []AWSMachineProviderCondition `json:"conditions,omitempty"` -} - -// AWSMachineProviderCondition is a condition in a AWSMachineProviderStatus. -type AWSMachineProviderCondition struct { - // Type is the type of the condition. - Type ConditionType `json:"type"` - // Status is the status of the condition. - Status corev1.ConditionStatus `json:"status"` - // LastProbeTime is the last time we probed the condition. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` - // LastTransitionTime is the last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - // Reason is a unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // Message is a human-readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go index d326a2b841..914e73478e 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go @@ -143,7 +143,7 @@ type AzureMachineProviderStatus struct { // Conditions is a set of conditions associated with the Machine to indicate // errors or other status. // +optional - Conditions []AzureMachineProviderCondition `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // VMState describes the state of an Azure virtual machine. @@ -282,8 +282,26 @@ type DataDisk struct { // +optional // +kubebuilder:validation:Enum=None;ReadOnly;ReadWrite CachingType CachingTypeOption `json:"cachingType,omitempty"` + // DeletionPolicy specifies the data disk deletion policy upon Machine deletion. + // Possible values are "Delete","Detach". + // When "Delete" is used the data disk is deleted when the Machine is deleted. + // When "Detach" is used the data disk is detached from the Machine and retained when the Machine is deleted. + // +kubebuilder:validation:Enum=Delete;Detach + // +kubebuilder:validation:Required + DeletionPolicy DiskDeletionPolicyType `json:"deletionPolicy"` } +// DiskDeletionPolicyType defines the possible values for DeletionPolicy. +type DiskDeletionPolicyType string + +// These are the valid DiskDeletionPolicyType values. +const ( + // DiskDeletionPolicyTypeDelete means the DiskDeletionPolicyType is "Delete". + DiskDeletionPolicyTypeDelete DiskDeletionPolicyType = "Delete" + // DiskDeletionPolicyTypeDetach means the DiskDeletionPolicyType is "Detach". + DiskDeletionPolicyTypeDetach DiskDeletionPolicyType = "Detach" +) + // CachingTypeOption defines the different values for a CachingType. type CachingTypeOption string @@ -364,33 +382,13 @@ type SecurityProfile struct { EncryptionAtHost *bool `json:"encryptionAtHost,omitempty"` } -// AzureMachineProviderCondition is a condition in a AzureMachineProviderStatus -type AzureMachineProviderCondition struct { - // Type is the type of the condition. - Type ConditionType `json:"type"` - // Status is the status of the condition. - Status corev1.ConditionStatus `json:"status"` - // LastProbeTime is the last time we probed the condition. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime"` - // LastTransitionTime is the last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime"` - // Reason is a unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason string `json:"reason"` - // Message is a human-readable message indicating details about last transition. - // +optional - Message string `json:"message"` -} - // AzureUltraSSDCapabilityState defines the different states of an UltraSSDCapability type AzureUltraSSDCapabilityState string // These are the valid AzureUltraSSDCapabilityState states. const ( - // "AzureUltraSSDCapabilityTrue" means the Azure UltraSSDCapability is Enabled - AzureUltraSSDCapabilityTrue AzureUltraSSDCapabilityState = "Enabled" - // "AzureUltraSSDCapabilityFalse" means the Azure UltraSSDCapability is Disabled - AzureUltraSSDCapabilityFalse AzureUltraSSDCapabilityState = "Disabled" + // "AzureUltraSSDCapabilityEnabled" means the Azure UltraSSDCapability is Enabled + AzureUltraSSDCapabilityEnabled AzureUltraSSDCapabilityState = "Enabled" + // "AzureUltraSSDCapabilityDisabled" means the Azure UltraSSDCapability is Disabled + AzureUltraSSDCapabilityDisabled AzureUltraSSDCapabilityState = "Disabled" ) diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go index 4970bd7e22..6daac65eb0 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go @@ -199,25 +199,5 @@ type GCPMachineProviderStatus struct { // Conditions is a set of conditions associated with the Machine to indicate // errors or other status // +optional - Conditions []GCPMachineProviderCondition `json:"conditions,omitempty"` -} - -// GCPMachineProviderCondition is a condition in a GCPMachineProviderStatus -type GCPMachineProviderCondition struct { - // Type is the type of the condition. - Type ConditionType `json:"type"` - // Status is the status of the condition. - Status corev1.ConditionStatus `json:"status"` - // LastProbeTime is the last time we probed the condition. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` - // LastTransitionTime is the last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - // Reason is a unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // Message is a human-readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go index 9d28632f80..2cf75675d8 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go @@ -114,26 +114,6 @@ type Workspace struct { ResourcePool string `gcfg:"resourcepool-path,omitempty" json:"resourcePool,omitempty"` } -// VSphereMachineProviderCondition is a condition in a VSphereMachineProviderStatus. -type VSphereMachineProviderCondition struct { - // Type is the type of the condition. - Type ConditionType `json:"type"` - // Status is the status of the condition. - Status corev1.ConditionStatus `json:"status"` - // LastProbeTime is the last time we probed the condition. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` - // LastTransitionTime is the last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - // Reason is a unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // Message is a human-readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` -} - // VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. // It contains VSphere-specific status information. // Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). @@ -149,7 +129,7 @@ type VSphereMachineProviderStatus struct { InstanceState *string `json:"instanceState,omitempty"` // Conditions is a set of conditions associated with the Machine to indicate // errors or other status - Conditions []VSphereMachineProviderCondition `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` // TaskRef is a managed object reference to a Task related to the machine. // This value is set automatically at runtime and should not be set or // modified by users. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go index fdabf1112b..4af81f9d86 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go @@ -12,24 +12,6 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSMachineProviderCondition) DeepCopyInto(out *AWSMachineProviderCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderCondition. -func (in *AWSMachineProviderCondition) DeepCopy() *AWSMachineProviderCondition { - if in == nil { - return nil - } - out := new(AWSMachineProviderCondition) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSMachineProviderConfig) DeepCopyInto(out *AWSMachineProviderConfig) { *out = *in @@ -92,6 +74,7 @@ func (in *AWSMachineProviderConfig) DeepCopyInto(out *AWSMachineProviderConfig) *out = new(SpotMarketOptions) (*in).DeepCopyInto(*out) } + out.MetadataServiceOptions = in.MetadataServiceOptions return } @@ -154,7 +137,7 @@ func (in *AWSMachineProviderStatus) DeepCopyInto(out *AWSMachineProviderStatus) } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]AWSMachineProviderCondition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -205,24 +188,6 @@ func (in *AWSResourceReference) DeepCopy() *AWSResourceReference { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureMachineProviderCondition) DeepCopyInto(out *AzureMachineProviderCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineProviderCondition. -func (in *AzureMachineProviderCondition) DeepCopy() *AzureMachineProviderCondition { - if in == nil { - return nil - } - out := new(AzureMachineProviderCondition) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AzureMachineProviderSpec) DeepCopyInto(out *AzureMachineProviderSpec) { *out = *in @@ -317,7 +282,7 @@ func (in *AzureMachineProviderStatus) DeepCopyInto(out *AzureMachineProviderStat } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]AzureMachineProviderCondition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -624,24 +589,6 @@ func (in *GCPKMSKeyReference) DeepCopy() *GCPKMSKeyReference { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPMachineProviderCondition) DeepCopyInto(out *GCPMachineProviderCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineProviderCondition. -func (in *GCPMachineProviderCondition) DeepCopy() *GCPMachineProviderCondition { - if in == nil { - return nil - } - out := new(GCPMachineProviderCondition) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCPMachineProviderSpec) DeepCopyInto(out *GCPMachineProviderSpec) { *out = *in @@ -757,7 +704,7 @@ func (in *GCPMachineProviderStatus) DeepCopyInto(out *GCPMachineProviderStatus) } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]GCPMachineProviderCondition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1371,6 +1318,22 @@ func (in *MachineTemplateSpec) DeepCopy() *MachineTemplateSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataServiceOptions) DeepCopyInto(out *MetadataServiceOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataServiceOptions. +func (in *MetadataServiceOptions) DeepCopy() *MetadataServiceOptions { + if in == nil { + return nil + } + out := new(MetadataServiceOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetworkDeviceSpec) DeepCopyInto(out *NetworkDeviceSpec) { *out = *in @@ -1618,24 +1581,6 @@ func (in *UnhealthyCondition) DeepCopy() *UnhealthyCondition { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VSphereMachineProviderCondition) DeepCopyInto(out *VSphereMachineProviderCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereMachineProviderCondition. -func (in *VSphereMachineProviderCondition) DeepCopy() *VSphereMachineProviderCondition { - if in == nil { - return nil - } - out := new(VSphereMachineProviderCondition) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VSphereMachineProviderSpec) DeepCopyInto(out *VSphereMachineProviderSpec) { *out = *in @@ -1694,7 +1639,7 @@ func (in *VSphereMachineProviderStatus) DeepCopyInto(out *VSphereMachineProvider } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]VSphereMachineProviderCondition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go index 8d3a010f01..5fff12fcbb 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -11,38 +11,25 @@ package v1beta1 // Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE -var map_AWSMachineProviderCondition = map[string]string{ - "": "AWSMachineProviderCondition is a condition in a AWSMachineProviderStatus.", - "type": "Type is the type of the condition.", - "status": "Status is the status of the condition.", - "lastProbeTime": "LastProbeTime is the last time we probed the condition.", - "lastTransitionTime": "LastTransitionTime is the last time the condition transitioned from one status to another.", - "reason": "Reason is a unique, one-word, CamelCase reason for the condition's last transition.", - "message": "Message is a human-readable message indicating details about last transition.", -} - -func (AWSMachineProviderCondition) SwaggerDoc() map[string]string { - return map_AWSMachineProviderCondition -} - var map_AWSMachineProviderConfig = map[string]string{ - "": "AWSMachineProviderConfig is the Schema for the awsmachineproviderconfigs API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "ami": "AMI is the reference to the AMI from which to create the machine instance.", - "instanceType": "InstanceType is the type of instance to create. Example: m4.xlarge", - "tags": "Tags is the set of tags to add to apply to an instance, in addition to the ones added by default by the actuator. These tags are additive. The actuator will ensure these tags are present, but will not remove any other tags that may exist on the instance.", - "iamInstanceProfile": "IAMInstanceProfile is a reference to an IAM role to assign to the instance", - "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", - "credentialsSecret": "CredentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions provided by attached IAM role where the actuator is running.", - "keyName": "KeyName is the name of the KeyPair to use for SSH", - "deviceIndex": "DeviceIndex is the index of the device on the instance for the network interface attachment. Defaults to 0.", - "publicIp": "PublicIP specifies whether the instance should get a public IP. If not present, it should use the default of its subnet.", - "networkInterfaceType": "NetworkInterfaceType specifies the type of network interface to be used for the primary network interface. Valid values are \"ENA\", \"EFA\", and omitted, which means no opinion and the platform chooses a good default which may change over time. The current default value is \"ENA\". Please visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html to learn more about the AWS Elastic Fabric Adapter interface option.", - "securityGroups": "SecurityGroups is an array of references to security groups that should be applied to the instance.", - "subnet": "Subnet is a reference to the subnet to use for this instance", - "placement": "Placement specifies where to create the instance in AWS", - "loadBalancers": "LoadBalancers is the set of load balancers to which the new instance should be added once it is created.", - "blockDevices": "BlockDevices is the set of block device mapping associated to this instance, block device without a name will be used as a root device and only one device without a name is allowed https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html", - "spotMarketOptions": "SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.", + "": "AWSMachineProviderConfig is the Schema for the awsmachineproviderconfigs API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "ami": "AMI is the reference to the AMI from which to create the machine instance.", + "instanceType": "InstanceType is the type of instance to create. Example: m4.xlarge", + "tags": "Tags is the set of tags to add to apply to an instance, in addition to the ones added by default by the actuator. These tags are additive. The actuator will ensure these tags are present, but will not remove any other tags that may exist on the instance.", + "iamInstanceProfile": "IAMInstanceProfile is a reference to an IAM role to assign to the instance", + "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "CredentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions provided by attached IAM role where the actuator is running.", + "keyName": "KeyName is the name of the KeyPair to use for SSH", + "deviceIndex": "DeviceIndex is the index of the device on the instance for the network interface attachment. Defaults to 0.", + "publicIp": "PublicIP specifies whether the instance should get a public IP. If not present, it should use the default of its subnet.", + "networkInterfaceType": "NetworkInterfaceType specifies the type of network interface to be used for the primary network interface. Valid values are \"ENA\", \"EFA\", and omitted, which means no opinion and the platform chooses a good default which may change over time. The current default value is \"ENA\". Please visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html to learn more about the AWS Elastic Fabric Adapter interface option.", + "securityGroups": "SecurityGroups is an array of references to security groups that should be applied to the instance.", + "subnet": "Subnet is a reference to the subnet to use for this instance", + "placement": "Placement specifies where to create the instance in AWS", + "loadBalancers": "LoadBalancers is the set of load balancers to which the new instance should be added once it is created.", + "blockDevices": "BlockDevices is the set of block device mapping associated to this instance, block device without a name will be used as a root device and only one device without a name is allowed https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html", + "spotMarketOptions": "SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.", + "metadataServiceOptions": "MetadataServiceOptions allows users to configure instance metadata service interaction options. If nothing specified, default AWS IMDS settings will be applied. https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", } func (AWSMachineProviderConfig) SwaggerDoc() map[string]string { @@ -132,6 +119,15 @@ func (LocalAWSPlacementGroupReference) SwaggerDoc() map[string]string { return map_LocalAWSPlacementGroupReference } +var map_MetadataServiceOptions = map[string]string{ + "": "MetadataServiceOptions defines the options available to a user when configuring Instance Metadata Service (IMDS) Options.", + "authentication": "Authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. When omitted, this means the user has no opinion and the value is left to the platform to choose a good default, which is subject to change over time. The current default is optional. At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", +} + +func (MetadataServiceOptions) SwaggerDoc() map[string]string { + return map_MetadataServiceOptions +} + var map_Placement = map[string]string{ "": "Placement indicates where to create the instance in AWS", "region": "Region is the region to use to create the instance", @@ -164,20 +160,6 @@ func (TagSpecification) SwaggerDoc() map[string]string { return map_TagSpecification } -var map_AzureMachineProviderCondition = map[string]string{ - "": "AzureMachineProviderCondition is a condition in a AzureMachineProviderStatus", - "type": "Type is the type of the condition.", - "status": "Status is the status of the condition.", - "lastProbeTime": "LastProbeTime is the last time we probed the condition.", - "lastTransitionTime": "LastTransitionTime is the last time the condition transitioned from one status to another.", - "reason": "Reason is a unique, one-word, CamelCase reason for the condition's last transition.", - "message": "Message is a human-readable message indicating details about last transition.", -} - -func (AzureMachineProviderCondition) SwaggerDoc() map[string]string { - return map_AzureMachineProviderCondition -} - var map_AzureMachineProviderSpec = map[string]string{ "": "AzureMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an Azure virtual machine. It is used by the Azure machine actuator to create a single Machine. Required parameters such as location that are not specified by this configuration, will be defaulted by the actuator. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", @@ -224,12 +206,13 @@ func (AzureMachineProviderStatus) SwaggerDoc() map[string]string { } var map_DataDisk = map[string]string{ - "": "DataDisk specifies the parameters that are used to add one or more data disks to the machine. A Data Disk is a managed disk that's attached to a virtual machine to store application data. It differs from an OS Disk as it doesn't come with a pre-installed OS, and it cannot contain the boot volume. It is registered as SCSI drive and labeled with the chosen `lun`. e.g. for `lun: 0` the raw disk device will be available at `/dev/disk/azure/scsi1/lun0`.\n\nAs the Data Disk disk device is attached raw to the virtual machine, it will need to be partitioned, formatted with a filesystem and mounted, in order for it to be usable. This can be done by creating a custom userdata Secret with custom Ignition configuration to achieve the desired initialization. At this stage the previously defined `lun` is to be used as the \"device\" key for referencing the raw disk device to be initialized. Once the custom userdata Secret has been created, it can be referenced in the Machine's `.providerSpec.userDataSecret`. For further guidance and examples, please refer to the official OpenShift docs.", - "nameSuffix": "NameSuffix is the suffix to be appended to the machine name to generate the disk name. Each disk name will be in format _. NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. The overall disk name must not exceed 80 chars in length.", - "diskSizeGB": "DiskSizeGB is the size in GB to assign to the data disk.", - "managedDisk": "ManagedDisk specifies the Managed Disk parameters for the data disk. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a ManagedDisk with with storageAccountType: \"Premium_LRS\" and diskEncryptionSet.id: \"Default\".", - "lun": "Lun Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount). The value must be between 0 and 63.", - "cachingType": "CachingType specifies the caching requirements. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is CachingTypeNone.", + "": "DataDisk specifies the parameters that are used to add one or more data disks to the machine. A Data Disk is a managed disk that's attached to a virtual machine to store application data. It differs from an OS Disk as it doesn't come with a pre-installed OS, and it cannot contain the boot volume. It is registered as SCSI drive and labeled with the chosen `lun`. e.g. for `lun: 0` the raw disk device will be available at `/dev/disk/azure/scsi1/lun0`.\n\nAs the Data Disk disk device is attached raw to the virtual machine, it will need to be partitioned, formatted with a filesystem and mounted, in order for it to be usable. This can be done by creating a custom userdata Secret with custom Ignition configuration to achieve the desired initialization. At this stage the previously defined `lun` is to be used as the \"device\" key for referencing the raw disk device to be initialized. Once the custom userdata Secret has been created, it can be referenced in the Machine's `.providerSpec.userDataSecret`. For further guidance and examples, please refer to the official OpenShift docs.", + "nameSuffix": "NameSuffix is the suffix to be appended to the machine name to generate the disk name. Each disk name will be in format _. NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. The overall disk name must not exceed 80 chars in length.", + "diskSizeGB": "DiskSizeGB is the size in GB to assign to the data disk.", + "managedDisk": "ManagedDisk specifies the Managed Disk parameters for the data disk. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a ManagedDisk with with storageAccountType: \"Premium_LRS\" and diskEncryptionSet.id: \"Default\".", + "lun": "Lun Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount). The value must be between 0 and 63.", + "cachingType": "CachingType specifies the caching requirements. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is CachingTypeNone.", + "deletionPolicy": "DeletionPolicy specifies the data disk deletion policy upon Machine deletion. Possible values are \"Delete\",\"Detach\". When \"Delete\" is used the data disk is deleted when the Machine is deleted. When \"Detach\" is used the data disk is detached from the Machine and retained when the Machine is deleted.", } func (DataDisk) SwaggerDoc() map[string]string { @@ -365,20 +348,6 @@ func (GCPKMSKeyReference) SwaggerDoc() map[string]string { return map_GCPKMSKeyReference } -var map_GCPMachineProviderCondition = map[string]string{ - "": "GCPMachineProviderCondition is a condition in a GCPMachineProviderStatus", - "type": "Type is the type of the condition.", - "status": "Status is the status of the condition.", - "lastProbeTime": "LastProbeTime is the last time we probed the condition.", - "lastTransitionTime": "LastTransitionTime is the last time the condition transitioned from one status to another.", - "reason": "Reason is a unique, one-word, CamelCase reason for the condition's last transition.", - "message": "Message is a human-readable message indicating details about last transition.", -} - -func (GCPMachineProviderCondition) SwaggerDoc() map[string]string { - return map_GCPMachineProviderCondition -} - var map_GCPMachineProviderSpec = map[string]string{ "": "GCPMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an GCP virtual machine. It is used by the GCP machine actuator to create a single Machine. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", @@ -687,20 +656,6 @@ func (NetworkSpec) SwaggerDoc() map[string]string { return map_NetworkSpec } -var map_VSphereMachineProviderCondition = map[string]string{ - "": "VSphereMachineProviderCondition is a condition in a VSphereMachineProviderStatus.", - "type": "Type is the type of the condition.", - "status": "Status is the status of the condition.", - "lastProbeTime": "LastProbeTime is the last time we probed the condition.", - "lastTransitionTime": "LastTransitionTime is the last time the condition transitioned from one status to another.", - "reason": "Reason is a unique, one-word, CamelCase reason for the condition's last transition.", - "message": "Message is a human-readable message indicating details about last transition.", -} - -func (VSphereMachineProviderCondition) SwaggerDoc() map[string]string { - return map_VSphereMachineProviderCondition -} - var map_VSphereMachineProviderSpec = map[string]string{ "": "VSphereMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an VSphere virtual machine. It is used by the vSphere machine actuator to create a single Machine. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml index acb2c40587..2532678006 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml @@ -144,6 +144,28 @@ spec: description: hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork. properties: + httpPort: + default: 80 + description: httpPort is the port on the host which should + be used to listen for HTTP requests. This field should be + set when port 80 is already in use. The value should not + coincide with the NodePort range of the cluster. When not + specified the value defaults to 80. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + httpsPort: + default: 443 + description: httpsPort is the port on the host which should + be used to listen for HTTPS requests. This field should + be set when port 443 is already in use. The value should + not coincide with the NodePort range of the cluster. When + not specified the value defaults to 443. + format: int32 + maximum: 65535 + minimum: 1 + type: integer protocol: description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether @@ -167,6 +189,29 @@ spec: - TCP - PROXY type: string + statsPort: + default: 1936 + description: statsPort is the port on the host where the stats + from the router are published. The value should not coincide + with the NodePort range of the cluster. If an external load + balancer is configured to forward connections to this IngressController, + the load balancer should use this port for health checks. + The load balancer can send HTTP probes on this port on a + given node, with the path /healthz/ready to determine if + the ingress controller is ready to receive traffic on the + node. For proper operation the load balancer must not forward + traffic to a node until the health check reports ready. + The load balancer should also stop forwarding requests within + a maximum of 45 seconds after /healthz/ready starts reporting + not-ready. Probing every 5 to 10 seconds, with a 5-second + timeout and with a threshold of two successful or failed + requests to become healthy or unhealthy respectively, are + well-tested values. When not specified the value defaults + to 1936. + format: int32 + maximum: 65535 + minimum: 1 + type: integer type: object loadBalancer: description: loadBalancer holds parameters for the load balancer. @@ -237,12 +282,13 @@ spec: type: description: type is the underlying infrastructure provider for the load balancer. Allowed values are "AWS", "Azure", - "BareMetal", "GCP", "OpenStack", and "VSphere". + "BareMetal", "GCP", "Nutanix", "OpenStack", and "VSphere". enum: - AWS - Azure - BareMetal - GCP + - Nutanix - OpenStack - VSphere - IBM @@ -806,9 +852,14 @@ spec: properties: nodeSelector: description: "nodeSelector is the node selector applied to ingress - controller deployments. \n If unset, the default is: \n kubernetes.io/os: - linux node-role.kubernetes.io/worker: '' \n If set, the specified - selector is used and replaces the default." + controller deployments. \n If set, the specified selector is + used and replaces the default. \n If unset, the default depends + on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses + status. \n When defaultPlacement is Workers, the default is: + \n kubernetes.io/os: linux node-role.kubernetes.io/worker: + '' \n When defaultPlacement is ControlPlane, the default is: + \n kubernetes.io/os: linux node-role.kubernetes.io/master: + '' \n These defaults are subject to change." properties: matchExpressions: description: matchExpressions is a list of label selector @@ -896,8 +947,16 @@ spec: type: array type: object replicas: - description: replicas is the desired number of ingress controller - replicas. If unset, defaults to 2. + description: "replicas is the desired number of ingress controller + replicas. If unset, the default depends on the value of the defaultPlacement + field in the cluster config.openshift.io/v1/ingresses status. \n + The value of replicas is set based on the value of a chosen field + in the Infrastructure CR. If defaultPlacement is set to ControlPlane, + the chosen field will be controlPlaneTopology. If it is set to Workers + the chosen field will be infrastructureTopology. Replicas will then + be set to 1 or 2 based whether the chosen field's value is SingleReplica + or HighlyAvailable, respectively. \n These defaults are subject + to change." format: int32 type: integer routeAdmission: @@ -1126,6 +1185,24 @@ spec: format: int32 minimum: 4096 type: integer + healthCheckInterval: + description: "healthCheckInterval defines how long the router + waits between two consecutive health checks on its configured + backends. This value is applied globally as a default for all + routes, but may be overridden per-route by the route annotation + \"router.openshift.io/haproxy.health.check.interval\". \n Setting + this to less than 5s can cause excess traffic due to too frequent + TCP health checks and accompanying SYN packet storms. Alternatively, + setting this too high can result in increased latency, due to + backend servers that are no longer available, but haven't yet + been detected as such. \n An empty or zero healthCheckInterval + means no opinion and IngressController chooses a default, which + is subject to change over time. Currently the default healthCheckInterval + value is 5s. \n Currently the minimum allowed value is 1s and + the maximum allowed value is 2147483647ms (24.85 days). Both + are subject to change over time." + format: duration + type: string serverFinTimeout: description: "serverFinTimeout defines how long a connection will be held open while waiting for the server/backend response to @@ -1233,6 +1310,28 @@ spec: description: hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork. properties: + httpPort: + default: 80 + description: httpPort is the port on the host which should + be used to listen for HTTP requests. This field should be + set when port 80 is already in use. The value should not + coincide with the NodePort range of the cluster. When not + specified the value defaults to 80. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + httpsPort: + default: 443 + description: httpsPort is the port on the host which should + be used to listen for HTTPS requests. This field should + be set when port 443 is already in use. The value should + not coincide with the NodePort range of the cluster. When + not specified the value defaults to 443. + format: int32 + maximum: 65535 + minimum: 1 + type: integer protocol: description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether @@ -1256,6 +1355,29 @@ spec: - TCP - PROXY type: string + statsPort: + default: 1936 + description: statsPort is the port on the host where the stats + from the router are published. The value should not coincide + with the NodePort range of the cluster. If an external load + balancer is configured to forward connections to this IngressController, + the load balancer should use this port for health checks. + The load balancer can send HTTP probes on this port on a + given node, with the path /healthz/ready to determine if + the ingress controller is ready to receive traffic on the + node. For proper operation the load balancer must not forward + traffic to a node until the health check reports ready. + The load balancer should also stop forwarding requests within + a maximum of 45 seconds after /healthz/ready starts reporting + not-ready. Probing every 5 to 10 seconds, with a 5-second + timeout and with a threshold of two successful or failed + requests to become healthy or unhealthy respectively, are + well-tested values. When not specified the value defaults + to 1936. + format: int32 + maximum: 65535 + minimum: 1 + type: integer type: object loadBalancer: description: loadBalancer holds parameters for the load balancer. @@ -1326,12 +1448,13 @@ spec: type: description: type is the underlying infrastructure provider for the load balancer. Allowed values are "AWS", "Azure", - "BareMetal", "GCP", "OpenStack", and "VSphere". + "BareMetal", "GCP", "Nutanix", "OpenStack", and "VSphere". enum: - AWS - Azure - BareMetal - GCP + - Nutanix - OpenStack - VSphere - IBM @@ -1418,10 +1541,99 @@ spec: required: - type type: object + namespaceSelector: + description: namespaceSelector is the actual namespaceSelector in + use. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object observedGeneration: description: observedGeneration is the most recent generation observed. format: int64 type: integer + routeSelector: + description: routeSelector is the actual routeSelector in use. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object selector: description: selector is a label selector, in string format, for ingress controller pods corresponding to the IngressController. The number diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml index 5c088482c0..6ca60be271 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml @@ -200,9 +200,10 @@ spec: Kuryr prepopulate each newly created port pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact that it takes a significant amount of time to create - one. Instead of creating it when pod is being deployed, - Kuryr keeps a number of ports ready to be attached to pods. - By default port prepopulation is disabled. + one. It creates a number of ports when the first pod that + is configured to use the dedicated network for pods is created + in a namespace, and keeps them ready to be attached to pods. + Port prepopulation is disabled by default. type: boolean mtu: description: mtu is the MTU that Kuryr should use when creating diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00.crd.yaml index 158a99bd7a..f743eff6be 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00.crd.yaml @@ -176,6 +176,75 @@ spec: - RoundRobin - Sequential type: string + transportConfig: + description: "transportConfig is used to configure the transport + type, server name, and optional custom CA or CA bundle + to use when forwarding DNS requests to an upstream resolver. + \n The default value is \"\" (empty) which results in + a standard cleartext connection being used when forwarding + DNS requests to an upstream resolver." + properties: + tls: + description: tls contains the additional configuration + options to use when Transport is set to "TLS". + properties: + caBundle: + description: "caBundle references a ConfigMap that + must contain either a single CA Certificate or + a CA Bundle (in the case of multiple upstreams + signed by different CAs). This allows cluster + administrators to provide their own CA or CA bundle + for validating the certificate of upstream resolvers. + \n 1. The configmap must contain a `ca-bundle.crt` + key. 2. The value must be a PEM encoded CA certificate + or CA bundle. 3. The administrator must create + this configmap in the openshift-config namespace." + properties: + name: + description: name is the metadata.name of the + referenced config map + type: string + required: + - name + type: object + serverName: + description: serverName is the upstream server to + connect to when forwarding DNS queries. This is + required when Transport is set to "TLS". ServerName + will be validated against the DNS naming conventions + in RFC 1123 and should match the TLS certificate + installed in the upstream resolver(s). + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - serverName + type: object + transport: + description: "transport allows cluster administrators + to opt-in to using a DNS-over-TLS connection between + cluster DNS and an upstream resolver(s). Configuring + TLS as the transport at this level without configuring + a CABundle will result in the system certificates + being used to verify the serving certificate of the + upstream resolver(s). \n Possible values: \"\" (empty) + - This means no explicit choice has been made and + the platform chooses the default which is subject + to change over time. The current default is \"Cleartext\". + \"Cleartext\" - Cluster admin specified cleartext + option. This results in the same functionality as + an empty value but may be useful when a cluster admin + wants to be more explicit about the transport, or + wants to switch from \"TLS\" to \"Cleartext\" explicitly. + \"TLS\" - This indicates that DNS queries should be + sent over a TLS connection. If Transport is set to + TLS, you MUST also set ServerName." + enum: + - TLS + - Cleartext + - "" + type: string + type: object upstreams: description: "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Each instance of @@ -228,6 +297,72 @@ spec: - RoundRobin - Sequential type: string + transportConfig: + description: "transportConfig is used to configure the transport + type, server name, and optional custom CA or CA bundle to use + when forwarding DNS requests to an upstream resolver. \n The + default value is \"\" (empty) which results in a standard cleartext + connection being used when forwarding DNS requests to an upstream + resolver." + properties: + tls: + description: tls contains the additional configuration options + to use when Transport is set to "TLS". + properties: + caBundle: + description: "caBundle references a ConfigMap that must + contain either a single CA Certificate or a CA Bundle + (in the case of multiple upstreams signed by different + CAs). This allows cluster administrators to provide + their own CA or CA bundle for validating the certificate + of upstream resolvers. \n 1. The configmap must contain + a `ca-bundle.crt` key. 2. The value must be a PEM encoded + CA certificate or CA bundle. 3. The administrator must + create this configmap in the openshift-config namespace." + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + required: + - name + type: object + serverName: + description: serverName is the upstream server to connect + to when forwarding DNS queries. This is required when + Transport is set to "TLS". ServerName will be validated + against the DNS naming conventions in RFC 1123 and should + match the TLS certificate installed in the upstream + resolver(s). + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - serverName + type: object + transport: + description: "transport allows cluster administrators to opt-in + to using a DNS-over-TLS connection between cluster DNS and + an upstream resolver(s). Configuring TLS as the transport + at this level without configuring a CABundle will result + in the system certificates being used to verify the serving + certificate of the upstream resolver(s). \n Possible values: + \"\" (empty) - This means no explicit choice has been made + and the platform chooses the default which is subject to + change over time. The current default is \"Cleartext\". + \"Cleartext\" - Cluster admin specified cleartext option. + This results in the same functionality as an empty value + but may be useful when a cluster admin wants to be more + explicit about the transport, or wants to switch from \"TLS\" + to \"Cleartext\" explicitly. \"TLS\" - This indicates that + DNS queries should be sent over a TLS connection. If Transport + is set to TLS, you MUST also set ServerName." + enum: + - TLS + - Cleartext + - "" + type: string + type: object upstreams: default: - type: SystemResolvConf diff --git a/vendor/github.com/openshift/api/operator/v1/types_dns.go b/vendor/github.com/openshift/api/operator/v1/types_dns.go index c05a826cdf..086045c84e 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_dns.go +++ b/vendor/github.com/openshift/api/operator/v1/types_dns.go @@ -1,6 +1,7 @@ package v1 import ( + v1 "github.com/openshift/api/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1 "k8s.io/api/core/v1" @@ -130,6 +131,72 @@ type Server struct { ForwardPlugin ForwardPlugin `json:"forwardPlugin"` } +// DNSTransport indicates what type of connection should be used. +// +kubebuilder:validation:Enum=TLS;Cleartext;"" +type DNSTransport string + +const ( + // TLSTransport indicates that TLS should be used for the connection. + TLSTransport DNSTransport = "TLS" + + // CleartextTransport indicates that no encryption should be used for + // the connection. + CleartextTransport DNSTransport = "Cleartext" +) + +// DNSTransportConfig groups related configuration parameters used for configuring +// forwarding to upstream resolvers that support DNS-over-TLS. +// +union +type DNSTransportConfig struct { + // transport allows cluster administrators to opt-in to using a DNS-over-TLS + // connection between cluster DNS and an upstream resolver(s). Configuring + // TLS as the transport at this level without configuring a CABundle will + // result in the system certificates being used to verify the serving + // certificate of the upstream resolver(s). + // + // Possible values: + // "" (empty) - This means no explicit choice has been made and the platform chooses the default which is subject + // to change over time. The current default is "Cleartext". + // "Cleartext" - Cluster admin specified cleartext option. This results in the same functionality + // as an empty value but may be useful when a cluster admin wants to be more explicit about the transport, + // or wants to switch from "TLS" to "Cleartext" explicitly. + // "TLS" - This indicates that DNS queries should be sent over a TLS connection. If Transport is set to TLS, + // you MUST also set ServerName. + // + // +optional + // +unionDiscriminator + Transport DNSTransport `json:"transport,omitempty"` + + // tls contains the additional configuration options to use when Transport is set to "TLS". + TLS *DNSOverTLSConfig `json:"tls,omitempty"` +} + +// DNSOverTLSConfig describes optional DNSTransportConfig fields that should be captured. +type DNSOverTLSConfig struct { + // serverName is the upstream server to connect to when forwarding DNS queries. This is required when Transport is + // set to "TLS". ServerName will be validated against the DNS naming conventions in RFC 1123 and should match the + // TLS certificate installed in the upstream resolver(s). + // + // + --- + // + Inspired by the DNS1123 patterns in Kubernetes: https://github.com/kubernetes/kubernetes/blob/7c46f40bdf89a437ecdbc01df45e235b5f6d9745/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L178-L218 + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + ServerName string `json:"serverName"` + + // caBundle references a ConfigMap that must contain either a single + // CA Certificate or a CA Bundle (in the case of multiple upstreams signed + // by different CAs). This allows cluster administrators to provide their + // own CA or CA bundle for validating the certificate of upstream resolvers. + // + // 1. The configmap must contain a `ca-bundle.crt` key. + // 2. The value must be a PEM encoded CA certificate or CA bundle. + // 3. The administrator must create this configmap in the openshift-config namespace. + // + // +optional + CABundle v1.ConfigMapNameReference `json:"caBundle,omitempty"` +} + // ForwardingPolicy is the policy to use when forwarding DNS requests. // +kubebuilder:validation:Enum=Random;RoundRobin;Sequential type ForwardingPolicy string @@ -170,6 +237,15 @@ type ForwardPlugin struct { // +optional // +kubebuilder:default:="Random" Policy ForwardingPolicy `json:"policy,omitempty"` + + // transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use + // when forwarding DNS requests to an upstream resolver. + // + // The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS + // requests to an upstream resolver. + // + // +optional + TransportConfig DNSTransportConfig `json:"transportConfig,omitempty"` } // UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the @@ -203,6 +279,15 @@ type UpstreamResolvers struct { // +optional // +kubebuilder:default="Sequential" Policy ForwardingPolicy `json:"policy,omitempty"` + + // transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use + // when forwarding DNS requests to an upstream resolver. + // + // The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS + // requests to an upstream resolver. + // + // +optional + TransportConfig DNSTransportConfig `json:"transportConfig,omitempty"` } // Upstream can either be of type SystemResolvConf, or of type Network. diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index a50b2b78d2..ae43ec8dbc 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -76,7 +76,17 @@ type IngressControllerSpec struct { HttpErrorCodePages configv1.ConfigMapNameReference `json:"httpErrorCodePages,omitempty"` // replicas is the desired number of ingress controller replicas. If unset, - // defaults to 2. + // the default depends on the value of the defaultPlacement field in the + // cluster config.openshift.io/v1/ingresses status. + // + // The value of replicas is set based on the value of a chosen field in the + // Infrastructure CR. If defaultPlacement is set to ControlPlane, the + // chosen field will be controlPlaneTopology. If it is set to Workers the + // chosen field will be infrastructureTopology. Replicas will then be set to 1 + // or 2 based whether the chosen field's value is SingleReplica or + // HighlyAvailable, respectively. + // + // These defaults are subject to change. // // +optional Replicas *int32 `json:"replicas,omitempty"` @@ -292,12 +302,22 @@ type NodePlacement struct { // nodeSelector is the node selector applied to ingress controller // deployments. // - // If unset, the default is: + // If set, the specified selector is used and replaces the default. + // + // If unset, the default depends on the value of the defaultPlacement + // field in the cluster config.openshift.io/v1/ingresses status. + // + // When defaultPlacement is Workers, the default is: // // kubernetes.io/os: linux // node-role.kubernetes.io/worker: '' // - // If set, the specified selector is used and replaces the default. + // When defaultPlacement is ControlPlane, the default is: + // + // kubernetes.io/os: linux + // node-role.kubernetes.io/master: '' + // + // These defaults are subject to change. // // +optional NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` @@ -371,8 +391,8 @@ type LoadBalancerStrategy struct { // +union type ProviderLoadBalancerParameters struct { // type is the underlying infrastructure provider for the load balancer. - // Allowed values are "AWS", "Azure", "BareMetal", "GCP", "OpenStack", - // and "VSphere". + // Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Nutanix", + // "OpenStack", and "VSphere". // // +unionDiscriminator // +kubebuilder:validation:Required @@ -399,10 +419,10 @@ type ProviderLoadBalancerParameters struct { } // LoadBalancerProviderType is the underlying infrastructure provider for the -// load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", +// load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Nutanix", // "OpenStack", and "VSphere". // -// +kubebuilder:validation:Enum=AWS;Azure;BareMetal;GCP;OpenStack;VSphere;IBM +// +kubebuilder:validation:Enum=AWS;Azure;BareMetal;GCP;Nutanix;OpenStack;VSphere;IBM type LoadBalancerProviderType string const ( @@ -414,6 +434,7 @@ const ( IBMLoadBalancerProvider LoadBalancerProviderType = "IBM" BareMetalLoadBalancerProvider LoadBalancerProviderType = "BareMetal" AlibabaCloudLoadBalancerProvider LoadBalancerProviderType = "AlibabaCloud" + NutanixLoadBalancerProvider LoadBalancerProviderType = "Nutanix" ) // AWSLoadBalancerParameters provides configuration settings that are @@ -536,6 +557,46 @@ type HostNetworkStrategy struct { // +kubebuilder:validation:Optional // +optional Protocol IngressControllerProtocol `json:"protocol,omitempty"` + + // httpPort is the port on the host which should be used to listen for + // HTTP requests. This field should be set when port 80 is already in use. + // The value should not coincide with the NodePort range of the cluster. + // When not specified the value defaults to 80. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:default=80 + HTTPPort int32 `json:"httpPort"` + + // httpsPort is the port on the host which should be used to listen for + // HTTPS requests. This field should be set when port 443 is already in use. + // The value should not coincide with the NodePort range of the cluster. + // When not specified the value defaults to 443. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:default=443 + HTTPSPort int32 `json:"httpsPort"` + + // statsPort is the port on the host where the stats from the router are + // published. The value should not coincide with the NodePort range of the + // cluster. If an external load balancer is configured to forward connections + // to this IngressController, the load balancer should use this port for + // health checks. The load balancer can send HTTP probes on this port on a + // given node, with the path /healthz/ready to determine if the ingress + // controller is ready to receive traffic on the node. For proper operation + // the load balancer must not forward traffic to a node until the health + // check reports ready. The load balancer should also stop forwarding requests + // within a maximum of 45 seconds after /healthz/ready starts reporting + // not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with + // a threshold of two successful or failed requests to become healthy or + // unhealthy respectively, are well-tested values. When not specified the + // value defaults to 1936. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:default=1936 + StatsPort int32 `json:"statsPort"` } // PrivateStrategy holds parameters for the Private endpoint publishing @@ -1306,6 +1367,28 @@ type IngressControllerTuningOptions struct { // +kubebuilder:validation:Format=duration // +optional TLSInspectDelay *metav1.Duration `json:"tlsInspectDelay,omitempty"` + + // healthCheckInterval defines how long the router waits between two consecutive + // health checks on its configured backends. This value is applied globally as + // a default for all routes, but may be overridden per-route by the route annotation + // "router.openshift.io/haproxy.health.check.interval". + // + // Setting this to less than 5s can cause excess traffic due to too frequent + // TCP health checks and accompanying SYN packet storms. Alternatively, setting + // this too high can result in increased latency, due to backend servers that are no + // longer available, but haven't yet been detected as such. + // + // An empty or zero healthCheckInterval means no opinion and IngressController chooses + // a default, which is subject to change over time. + // Currently the default healthCheckInterval value is 5s. + // + // Currently the minimum allowed value is 1s and the maximum allowed value is + // 2147483647ms (24.85 days). Both are subject to change over time. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Format=duration + // +optional + HealthCheckInterval *metav1.Duration `json:"healthCheckInterval,omitempty"` } // HTTPEmptyRequestsPolicy indicates how HTTP connections for which no request @@ -1397,6 +1480,14 @@ type IngressControllerStatus struct { // observedGeneration is the most recent generation observed. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // namespaceSelector is the actual namespaceSelector in use. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` + + // routeSelector is the actual routeSelector in use. + // +optional + RouteSelector *metav1.LabelSelector `json:"routeSelector,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index b3e397ab0b..e3bd64b0ea 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -343,9 +343,9 @@ type KuryrConfig struct { // enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port // pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact - // that it takes a significant amount of time to create one. Instead of creating it when - // pod is being deployed, Kuryr keeps a number of ports ready to be attached to pods. By - // default port prepopulation is disabled. + // that it takes a significant amount of time to create one. It creates a number of ports when + // the first pod that is configured to use the dedicated network for pods is created in a namespace, + // and keeps them ready to be attached to pods. Port prepopulation is disabled by default. // +optional EnablePortPoolsPrepopulation bool `json:"enablePortPoolsPrepopulation,omitempty"` diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index 23859af6d2..8b68d29533 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -918,6 +918,23 @@ func (in *DNSNodePlacement) DeepCopy() *DNSNodePlacement { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSOverTLSConfig) DeepCopyInto(out *DNSOverTLSConfig) { + *out = *in + out.CABundle = in.CABundle + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOverTLSConfig. +func (in *DNSOverTLSConfig) DeepCopy() *DNSOverTLSConfig { + if in == nil { + return nil + } + out := new(DNSOverTLSConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { *out = *in @@ -966,6 +983,27 @@ func (in *DNSStatus) DeepCopy() *DNSStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSTransportConfig) DeepCopyInto(out *DNSTransportConfig) { + *out = *in + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(DNSOverTLSConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSTransportConfig. +func (in *DNSTransportConfig) DeepCopy() *DNSTransportConfig { + if in == nil { + return nil + } + out := new(DNSTransportConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DefaultNetworkDefinition) DeepCopyInto(out *DefaultNetworkDefinition) { *out = *in @@ -1235,6 +1273,7 @@ func (in *ForwardPlugin) DeepCopyInto(out *ForwardPlugin) { *out = make([]string, len(*in)) copy(*out, *in) } + in.TransportConfig.DeepCopyInto(&out.TransportConfig) return } @@ -1703,6 +1742,16 @@ func (in *IngressControllerStatus) DeepCopyInto(out *IngressControllerStatus) { *out = new(configv1.TLSProfileSpec) (*in).DeepCopyInto(*out) } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.RouteSelector != nil { + in, out := &in.RouteSelector, &out.RouteSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } return } @@ -1749,6 +1798,11 @@ func (in *IngressControllerTuningOptions) DeepCopyInto(out *IngressControllerTun *out = new(metav1.Duration) **out = **in } + if in.HealthCheckInterval != nil { + in, out := &in.HealthCheckInterval, &out.HealthCheckInterval + *out = new(metav1.Duration) + **out = **in + } return } @@ -3743,6 +3797,7 @@ func (in *UpstreamResolvers) DeepCopyInto(out *UpstreamResolvers) { *out = make([]Upstream, len(*in)) copy(*out, *in) } + in.TransportConfig.DeepCopyInto(&out.TransportConfig) return } diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index e42c685eb9..11e7d1562f 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -421,6 +421,16 @@ func (DNSNodePlacement) SwaggerDoc() map[string]string { return map_DNSNodePlacement } +var map_DNSOverTLSConfig = map[string]string{ + "": "DNSOverTLSConfig describes optional DNSTransportConfig fields that should be captured.", + "serverName": "serverName is the upstream server to connect to when forwarding DNS queries. This is required when Transport is set to \"TLS\". ServerName will be validated against the DNS naming conventions in RFC 1123 and should match the TLS certificate installed in the upstream resolver(s).", + "caBundle": "caBundle references a ConfigMap that must contain either a single CA Certificate or a CA Bundle (in the case of multiple upstreams signed by different CAs). This allows cluster administrators to provide their own CA or CA bundle for validating the certificate of upstream resolvers.\n\n1. The configmap must contain a `ca-bundle.crt` key. 2. The value must be a PEM encoded CA certificate or CA bundle. 3. The administrator must create this configmap in the openshift-config namespace.", +} + +func (DNSOverTLSConfig) SwaggerDoc() map[string]string { + return map_DNSOverTLSConfig +} + var map_DNSSpec = map[string]string{ "": "DNSSpec is the specification of the desired behavior of the DNS.", "servers": "servers is a list of DNS resolvers that provide name query delegation for one or more subdomains outside the scope of the cluster domain. If servers consists of more than one Server, longest suffix match will be used to determine the Server.\n\nFor example, if there are two Servers, one for \"foo.com\" and another for \"a.foo.com\", and the name query is for \"www.a.foo.com\", it will be routed to the Server with Zone \"a.foo.com\".\n\nIf this field is nil, no servers are created.", @@ -446,10 +456,21 @@ func (DNSStatus) SwaggerDoc() map[string]string { return map_DNSStatus } +var map_DNSTransportConfig = map[string]string{ + "": "DNSTransportConfig groups related configuration parameters used for configuring forwarding to upstream resolvers that support DNS-over-TLS.", + "transport": "transport allows cluster administrators to opt-in to using a DNS-over-TLS connection between cluster DNS and an upstream resolver(s). Configuring TLS as the transport at this level without configuring a CABundle will result in the system certificates being used to verify the serving certificate of the upstream resolver(s).\n\nPossible values: \"\" (empty) - This means no explicit choice has been made and the platform chooses the default which is subject to change over time. The current default is \"Cleartext\". \"Cleartext\" - Cluster admin specified cleartext option. This results in the same functionality as an empty value but may be useful when a cluster admin wants to be more explicit about the transport, or wants to switch from \"TLS\" to \"Cleartext\" explicitly. \"TLS\" - This indicates that DNS queries should be sent over a TLS connection. If Transport is set to TLS, you MUST also set ServerName.", + "tls": "tls contains the additional configuration options to use when Transport is set to \"TLS\".", +} + +func (DNSTransportConfig) SwaggerDoc() map[string]string { + return map_DNSTransportConfig +} + var map_ForwardPlugin = map[string]string{ - "": "ForwardPlugin defines a schema for configuring the CoreDNS forward plugin.", - "upstreams": "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin.", - "policy": "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Random\"", + "": "ForwardPlugin defines a schema for configuring the CoreDNS forward plugin.", + "upstreams": "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin.", + "policy": "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Random\"", + "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.", } func (ForwardPlugin) SwaggerDoc() map[string]string { @@ -479,9 +500,10 @@ func (Upstream) SwaggerDoc() map[string]string { } var map_UpstreamResolvers = map[string]string{ - "": "UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the specific case of the default (\".\") server. It defers from ForwardPlugin in the default values it accepts: * At least one upstream should be specified. * the default policy is Sequential", - "upstreams": "Upstreams is a list of resolvers to forward name queries for the \".\" domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default", - "policy": "Policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Sequential\"", + "": "UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the specific case of the default (\".\") server. It defers from ForwardPlugin in the default values it accepts: * At least one upstream should be specified. * the default policy is Sequential", + "upstreams": "Upstreams is a list of resolvers to forward name queries for the \".\" domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default", + "policy": "Policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Sequential\"", + "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.", } func (UpstreamResolvers) SwaggerDoc() map[string]string { @@ -596,8 +618,11 @@ func (HTTPCompressionPolicy) SwaggerDoc() map[string]string { } var map_HostNetworkStrategy = map[string]string{ - "": "HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing strategy.", - "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.", + "": "HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing strategy.", + "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.", + "httpPort": "httpPort is the port on the host which should be used to listen for HTTP requests. This field should be set when port 80 is already in use. The value should not coincide with the NodePort range of the cluster. When not specified the value defaults to 80.", + "httpsPort": "httpsPort is the port on the host which should be used to listen for HTTPS requests. This field should be set when port 443 is already in use. The value should not coincide with the NodePort range of the cluster. When not specified the value defaults to 443.", + "statsPort": "statsPort is the port on the host where the stats from the router are published. The value should not coincide with the NodePort range of the cluster. If an external load balancer is configured to forward connections to this IngressController, the load balancer should use this port for health checks. The load balancer can send HTTP probes on this port on a given node, with the path /healthz/ready to determine if the ingress controller is ready to receive traffic on the node. For proper operation the load balancer must not forward traffic to a node until the health check reports ready. The load balancer should also stop forwarding requests within a maximum of 45 seconds after /healthz/ready starts reporting not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with a threshold of two successful or failed requests to become healthy or unhealthy respectively, are well-tested values. When not specified the value defaults to 1936.", } func (HostNetworkStrategy) SwaggerDoc() map[string]string { @@ -696,7 +721,7 @@ var map_IngressControllerSpec = map[string]string{ "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", "httpErrorCodePages": "httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format \"error-page-.http\", where is an HTTP error code. For example, \"error-page-503.http\" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages.", - "replicas": "replicas is the desired number of ingress controller replicas. If unset, defaults to 2.", + "replicas": "replicas is the desired number of ingress controller replicas. If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nThe value of replicas is set based on the value of a chosen field in the Infrastructure CR. If defaultPlacement is set to ControlPlane, the chosen field will be controlPlaneTopology. If it is set to Workers the chosen field will be infrastructureTopology. Replicas will then be set to 1 or 2 based whether the chosen field's value is SingleReplica or HighlyAvailable, respectively.\n\nThese defaults are subject to change.", "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n AlibabaCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", "defaultCertificate": "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used.\n\nThe secret must contain the following keys and data:\n\n tls.crt: certificate file contents\n tls.key: key file contents\n\nIf unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store.\n\nIf a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing.\n\nThe in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server.", "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", @@ -726,6 +751,8 @@ var map_IngressControllerStatus = map[string]string{ "conditions": "conditions is a list of conditions and their status.\n\nAvailable means the ingress controller deployment is available and servicing route and ingress resources (i.e, .status.availableReplicas equals .spec.replicas)\n\nThere are additional conditions which indicate the status of other ingress controller features and capabilities.\n\n * LoadBalancerManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy requires a service load balancer.\n - False if any of those conditions are unsatisfied.\n\n * LoadBalancerReady\n - True if the following conditions are met:\n * A load balancer is managed.\n * The load balancer is ready.\n - False if any of those conditions are unsatisfied.\n\n * DNSManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy and platform support DNS.\n * The ingress controller domain is set.\n * dns.config.openshift.io/cluster configures DNS zones.\n - False if any of those conditions are unsatisfied.\n\n * DNSReady\n - True if the following conditions are met:\n * DNS is managed.\n * DNS records have been successfully created.\n - False if any of those conditions are unsatisfied.", "tlsProfile": "tlsProfile is the TLS connection configuration that is in effect.", "observedGeneration": "observedGeneration is the most recent generation observed.", + "namespaceSelector": "namespaceSelector is the actual namespaceSelector in use.", + "routeSelector": "routeSelector is the actual routeSelector in use.", } func (IngressControllerStatus) SwaggerDoc() map[string]string { @@ -743,6 +770,7 @@ var map_IngressControllerTuningOptions = map[string]string{ "serverFinTimeout": "serverFinTimeout defines how long a connection will be held open while waiting for the server/backend response to the client closing the connection.\n\nIf unset, the default timeout is 1s", "tunnelTimeout": "tunnelTimeout defines how long a tunnel connection (including websockets) will be held open while the tunnel is idle.\n\nIf unset, the default timeout is 1h", "tlsInspectDelay": "tlsInspectDelay defines how long the router can hold data to find a matching route.\n\nSetting this too short can cause the router to fall back to the default certificate for edge-terminated or reencrypt routes even when a better matching certificate could be used.\n\nIf unset, the default inspect delay is 5s", + "healthCheckInterval": "healthCheckInterval defines how long the router waits between two consecutive health checks on its configured backends. This value is applied globally as a default for all routes, but may be overridden per-route by the route annotation \"router.openshift.io/haproxy.health.check.interval\".\n\nSetting this to less than 5s can cause excess traffic due to too frequent TCP health checks and accompanying SYN packet storms. Alternatively, setting this too high can result in increased latency, due to backend servers that are no longer available, but haven't yet been detected as such.\n\nAn empty or zero healthCheckInterval means no opinion and IngressController chooses a default, which is subject to change over time. Currently the default healthCheckInterval value is 5s.\n\nCurrently the minimum allowed value is 1s and the maximum allowed value is 2147483647ms (24.85 days). Both are subject to change over time.", } func (IngressControllerTuningOptions) SwaggerDoc() map[string]string { @@ -772,7 +800,7 @@ func (LoggingDestination) SwaggerDoc() map[string]string { var map_NodePlacement = map[string]string{ "": "NodePlacement describes node scheduling configuration for an ingress controller.", - "nodeSelector": "nodeSelector is the node selector applied to ingress controller deployments.\n\nIf unset, the default is:\n\n kubernetes.io/os: linux\n node-role.kubernetes.io/worker: ''\n\nIf set, the specified selector is used and replaces the default.", + "nodeSelector": "nodeSelector is the node selector applied to ingress controller deployments.\n\nIf set, the specified selector is used and replaces the default.\n\nIf unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nWhen defaultPlacement is Workers, the default is:\n\n kubernetes.io/os: linux\n node-role.kubernetes.io/worker: ''\n\nWhen defaultPlacement is ControlPlane, the default is:\n\n kubernetes.io/os: linux\n node-role.kubernetes.io/master: ''\n\nThese defaults are subject to change.", "tolerations": "tolerations is a list of tolerations applied to ingress controller deployments.\n\nThe default is an empty list.\n\nSee https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/", } @@ -799,7 +827,7 @@ func (PrivateStrategy) SwaggerDoc() map[string]string { var map_ProviderLoadBalancerParameters = map[string]string{ "": "ProviderLoadBalancerParameters holds desired load balancer information specific to the underlying infrastructure provider.", - "type": "type is the underlying infrastructure provider for the load balancer. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"OpenStack\", and \"VSphere\".", + "type": "type is the underlying infrastructure provider for the load balancer. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Nutanix\", \"OpenStack\", and \"VSphere\".", "aws": "aws provides configuration settings that are specific to AWS load balancers.\n\nIf empty, defaults will be applied. See specific aws fields for details about their defaults.", "gcp": "gcp provides configuration settings that are specific to GCP load balancers.\n\nIf empty, defaults will be applied. See specific gcp fields for details about their defaults.", } @@ -977,7 +1005,7 @@ var map_KuryrConfig = map[string]string{ "daemonProbesPort": "The port kuryr-daemon will listen for readiness and liveness requests.", "controllerProbesPort": "The port kuryr-controller will listen for readiness and liveness requests.", "openStackServiceNetwork": "openStackServiceNetwork contains the CIDR of network from which to allocate IPs for OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses two IPs from that network for each loadbalancer - one given by OpenShift and second for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix size by 1.", - "enablePortPoolsPrepopulation": "enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact that it takes a significant amount of time to create one. Instead of creating it when pod is being deployed, Kuryr keeps a number of ports ready to be attached to pods. By default port prepopulation is disabled.", + "enablePortPoolsPrepopulation": "enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact that it takes a significant amount of time to create one. It creates a number of ports when the first pod that is configured to use the dedicated network for pods is created in a namespace, and keeps them ready to be attached to pods. Port prepopulation is disabled by default.", "poolMaxPorts": "poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. If the number of ports exceeds this setting, free ports will get deleted. Setting 0 will disable this upper bound, effectively preventing pools from shrinking and this is the default value. For more information about port pools see enablePortPoolsPrepopulation setting.", "poolMinPorts": "poolMinPorts sets a minimum number of free ports that should be kept in a port pool. If the number of ports is lower than this setting, new ports will get created and added to pool. The default is 1. For more information about port pools see enablePortPoolsPrepopulation setting.", "poolBatchPorts": "poolBatchPorts sets a number of ports that should be created in a single batch request to extend the port pool. The default is 3. For more information about port pools see enablePortPoolsPrepopulation setting.", diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index a3c021d3f8..cf66309c4a 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -21,9 +21,9 @@ // explicitly to each function that needs it. The Context should be the first // parameter, typically named ctx: // -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } // // Do not pass a nil Context, even if a function permits it. Pass context.TODO // if you are unsure about which Context to use. diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go index 344bd14334..0a54bdbcc6 100644 --- a/vendor/golang.org/x/net/context/go17.go +++ b/vendor/golang.org/x/net/context/go17.go @@ -54,11 +54,11 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete: // -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return WithDeadline(parent, time.Now().Add(timeout)) } diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go index 5270db5db7..7b6b685114 100644 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -264,11 +264,11 @@ func (c *timerCtx) cancel(removeFromParent bool, err error) { // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete: // -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return WithDeadline(parent, time.Now().Add(timeout)) } diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index c79aa73f28..6e071e8524 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -173,13 +173,15 @@ func tokenEqual(t1, t2 string) bool { // isLWS reports whether b is linear white space, according // to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// LWS = [CRLF] 1*( SP | HT ) +// +// LWS = [CRLF] 1*( SP | HT ) func isLWS(b byte) bool { return b == ' ' || b == '\t' } // isCTL reports whether b is a control byte, according // to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// CTL = +// +// CTL = func isCTL(b byte) bool { const del = 0x7f // a CTL return b < ' ' || b == del @@ -189,12 +191,13 @@ func isCTL(b byte) bool { // HTTP/2 imposes the additional restriction that uppercase ASCII // letters are not allowed. // -// RFC 7230 says: -// header-field = field-name ":" OWS field-value OWS -// field-name = token -// token = 1*tchar -// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / -// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +// RFC 7230 says: +// +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false @@ -267,27 +270,28 @@ var validHostByte = [256]bool{ // ValidHeaderFieldValue reports whether v is a valid "field-value" according to // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : // -// message-header = field-name ":" [ field-value ] -// field-value = *( field-content | LWS ) -// field-content = +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = // // http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : // -// TEXT = -// LWS = [CRLF] 1*( SP | HT ) -// CTL = +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = // // RFC 7230 says: -// field-value = *( field-content / obs-fold ) -// obj-fold = N/A to http2, and deprecated -// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] -// field-vchar = VCHAR / obs-text -// obs-text = %x80-FF -// VCHAR = "any visible [USASCII] character" +// +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" // // http2 further says: "Similarly, HTTP/2 allows header field values // that are not valid. While most of the values that can be encoded diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index 652bc11a02..c936843eaf 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -16,6 +16,12 @@ import ( // ClientConnPool manages a pool of HTTP/2 client connections. type ClientConnPool interface { + // GetClientConn returns a specific HTTP/2 connection (usually + // a TLS-TCP connection) to an HTTP/2 server. On success, the + // returned ClientConn accounts for the upcoming RoundTrip + // call, so the caller should not omit it. If the caller needs + // to, ClientConn.RoundTrip can be called with a bogus + // new(http.Request) to release the stream reservation. GetClientConn(req *http.Request, addr string) (*ClientConn, error) MarkDead(*ClientConn) } @@ -42,7 +48,7 @@ type clientConnPool struct { conns map[string][]*ClientConn // key is host:port dialing map[string]*dialCall // currently in-flight dials keys map[*ClientConn][]string - addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls + addConnCalls map[string]*addConnCall // in-flight addConnIfNeeded calls } func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { @@ -54,28 +60,8 @@ const ( noDialOnMiss = false ) -// shouldTraceGetConn reports whether getClientConn should call any -// ClientTrace.GetConn hook associated with the http.Request. -// -// This complexity is needed to avoid double calls of the GetConn hook -// during the back-and-forth between net/http and x/net/http2 (when the -// net/http.Transport is upgraded to also speak http2), as well as support -// the case where x/net/http2 is being used directly. -func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool { - // If our Transport wasn't made via ConfigureTransport, always - // trace the GetConn hook if provided, because that means the - // http2 package is being used directly and it's the one - // dialing, as opposed to net/http. - if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok { - return true - } - // Otherwise, only use the GetConn hook if this connection has - // been used previously for other requests. For fresh - // connections, the net/http package does the dialing. - return !st.freshConn -} - func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { + // TODO(dneil): Dial a new connection when t.DisableKeepAlives is set? if isConnectionCloseRequest(req) && dialOnMiss { // It gets its own connection. traceGetConn(req, addr) @@ -89,10 +75,14 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis for { p.mu.Lock() for _, cc := range p.conns[addr] { - if st := cc.idleState(); st.canTakeNewRequest { - if p.shouldTraceGetConn(st) { + if cc.ReserveNewRequest() { + // When a connection is presented to us by the net/http package, + // the GetConn hook has already been called. + // Don't call it a second time here. + if !cc.getConnCalled { traceGetConn(req, addr) } + cc.getConnCalled = false p.mu.Unlock() return cc, nil } @@ -108,7 +98,13 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis if shouldRetryDial(call, req) { continue } - return call.res, call.err + cc, err := call.res, call.err + if err != nil { + return nil, err + } + if cc.ReserveNewRequest() { + return cc, nil + } } } @@ -205,6 +201,7 @@ func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { if err != nil { c.err = err } else { + cc.getConnCalled = true // already called by the net/http package p.addConnLocked(key, cc) } delete(p.addConnCalls, key) diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go index 71f2c46317..f2067dabc5 100644 --- a/vendor/golang.org/x/net/http2/errors.go +++ b/vendor/golang.org/x/net/http2/errors.go @@ -53,6 +53,13 @@ func (e ErrCode) String() string { return fmt.Sprintf("unknown error code 0x%x", uint32(e)) } +func (e ErrCode) stringToken() string { + if s, ok := errCodeName[e]; ok { + return s + } + return fmt.Sprintf("ERR_UNKNOWN_%d", uint32(e)) +} + // ConnectionError is an error that results in the termination of the // entire connection. type ConnectionError ErrCode @@ -67,6 +74,11 @@ type StreamError struct { Cause error // optional additional detail } +// errFromPeer is a sentinel error value for StreamError.Cause to +// indicate that the StreamError was sent from the peer over the wire +// and wasn't locally generated in the Transport. +var errFromPeer = errors.New("received from peer") + func streamError(id uint32, code ErrCode) StreamError { return StreamError{StreamID: id, Code: code} } @@ -124,7 +136,7 @@ func (e headerFieldNameError) Error() string { type headerFieldValueError string func (e headerFieldValueError) Error() string { - return fmt.Sprintf("invalid header field value %q", string(e)) + return fmt.Sprintf("invalid header field value for %q", string(e)) } var ( diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 514c126c5f..0178647ee0 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{ // a frameParser parses a frame given its FrameHeader and payload // bytes. The length of payload will always equal fh.Length (which // might be 0). -type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) +type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) var frameParsers = map[FrameType]frameParser{ FrameData: parseDataFrame, @@ -267,6 +267,11 @@ type Framer struct { lastFrame Frame errDetail error + // countError is a non-nil func that's called on a frame parse + // error with some unique error path token. It's initialized + // from Transport.CountError or Server.CountError. + countError func(errToken string) + // lastHeaderStream is non-zero if the last frame was an // unfinished HEADERS/CONTINUATION. lastHeaderStream uint32 @@ -426,6 +431,7 @@ func NewFramer(w io.Writer, r io.Reader) *Framer { fr := &Framer{ w: w, r: r, + countError: func(string) {}, logReads: logFrameReads, logWrites: logFrameWrites, debugReadLoggerf: log.Printf, @@ -500,7 +506,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { if _, err := io.ReadFull(fr.r, payload); err != nil { return nil, err } - f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) + f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload) if err != nil { if ce, ok := err.(connError); ok { return nil, fr.connError(ce.Code, ce.Reason) @@ -588,13 +594,14 @@ func (f *DataFrame) Data() []byte { return f.data } -func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { +func parseDataFrame(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) { if fh.StreamID == 0 { // DATA frames MUST be associated with a stream. If a // DATA frame is received whose stream identifier // field is 0x0, the recipient MUST respond with a // connection error (Section 5.4.1) of type // PROTOCOL_ERROR. + countError("frame_data_stream_0") return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} } f := fc.getDataFrame() @@ -605,6 +612,7 @@ func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, erro var err error payload, padSize, err = readByte(payload) if err != nil { + countError("frame_data_pad_byte_short") return nil, err } } @@ -613,6 +621,7 @@ func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, erro // length of the frame payload, the recipient MUST // treat this as a connection error. // Filed: https://github.com/http2/http2-spec/issues/610 + countError("frame_data_pad_too_big") return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} } f.data = payload[:len(payload)-int(padSize)] @@ -695,7 +704,7 @@ type SettingsFrame struct { p []byte } -func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { +func parseSettingsFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { // When this (ACK 0x1) bit is set, the payload of the // SETTINGS frame MUST be empty. Receipt of a @@ -703,6 +712,7 @@ func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) // field value other than 0 MUST be treated as a // connection error (Section 5.4.1) of type // FRAME_SIZE_ERROR. + countError("frame_settings_ack_with_length") return nil, ConnectionError(ErrCodeFrameSize) } if fh.StreamID != 0 { @@ -713,14 +723,17 @@ func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) // field is anything other than 0x0, the endpoint MUST // respond with a connection error (Section 5.4.1) of // type PROTOCOL_ERROR. + countError("frame_settings_has_stream") return nil, ConnectionError(ErrCodeProtocol) } if len(p)%6 != 0 { + countError("frame_settings_mod_6") // Expecting even number of 6 byte settings. return nil, ConnectionError(ErrCodeFrameSize) } f := &SettingsFrame{FrameHeader: fh, p: p} if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { + countError("frame_settings_window_size_too_big") // Values above the maximum flow control window size of 2^31 - 1 MUST // be treated as a connection error (Section 5.4.1) of type // FLOW_CONTROL_ERROR. @@ -832,11 +845,13 @@ type PingFrame struct { func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } -func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { +func parsePingFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) { if len(payload) != 8 { + countError("frame_ping_length") return nil, ConnectionError(ErrCodeFrameSize) } if fh.StreamID != 0 { + countError("frame_ping_has_stream") return nil, ConnectionError(ErrCodeProtocol) } f := &PingFrame{FrameHeader: fh} @@ -872,11 +887,13 @@ func (f *GoAwayFrame) DebugData() []byte { return f.debugData } -func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { +func parseGoAwayFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { if fh.StreamID != 0 { + countError("frame_goaway_has_stream") return nil, ConnectionError(ErrCodeProtocol) } if len(p) < 8 { + countError("frame_goaway_short") return nil, ConnectionError(ErrCodeFrameSize) } return &GoAwayFrame{ @@ -912,7 +929,7 @@ func (f *UnknownFrame) Payload() []byte { return f.p } -func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { +func parseUnknownFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { return &UnknownFrame{fh, p}, nil } @@ -923,8 +940,9 @@ type WindowUpdateFrame struct { Increment uint32 // never read with high bit set } -func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { +func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { if len(p) != 4 { + countError("frame_windowupdate_bad_len") return nil, ConnectionError(ErrCodeFrameSize) } inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit @@ -936,8 +954,10 @@ func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, err // control window MUST be treated as a connection // error (Section 5.4.1). if fh.StreamID == 0 { + countError("frame_windowupdate_zero_inc_conn") return nil, ConnectionError(ErrCodeProtocol) } + countError("frame_windowupdate_zero_inc_stream") return nil, streamError(fh.StreamID, ErrCodeProtocol) } return &WindowUpdateFrame{ @@ -988,7 +1008,7 @@ func (f *HeadersFrame) HasPriority() bool { return f.FrameHeader.Flags.Has(FlagHeadersPriority) } -func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { +func parseHeadersFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) { hf := &HeadersFrame{ FrameHeader: fh, } @@ -997,11 +1017,13 @@ func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err er // is received whose stream identifier field is 0x0, the recipient MUST // respond with a connection error (Section 5.4.1) of type // PROTOCOL_ERROR. + countError("frame_headers_zero_stream") return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} } var padLength uint8 if fh.Flags.Has(FlagHeadersPadded) { if p, padLength, err = readByte(p); err != nil { + countError("frame_headers_pad_short") return } } @@ -1009,16 +1031,19 @@ func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err er var v uint32 p, v, err = readUint32(p) if err != nil { + countError("frame_headers_prio_short") return nil, err } hf.Priority.StreamDep = v & 0x7fffffff hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set p, hf.Priority.Weight, err = readByte(p) if err != nil { + countError("frame_headers_prio_weight_short") return nil, err } } - if len(p)-int(padLength) <= 0 { + if len(p)-int(padLength) < 0 { + countError("frame_headers_pad_too_big") return nil, streamError(fh.StreamID, ErrCodeProtocol) } hf.headerFragBuf = p[:len(p)-int(padLength)] @@ -1125,11 +1150,13 @@ func (p PriorityParam) IsZero() bool { return p == PriorityParam{} } -func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { +func parsePriorityFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) { if fh.StreamID == 0 { + countError("frame_priority_zero_stream") return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} } if len(payload) != 5 { + countError("frame_priority_bad_length") return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} } v := binary.BigEndian.Uint32(payload[:4]) @@ -1172,11 +1199,13 @@ type RSTStreamFrame struct { ErrCode ErrCode } -func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { +func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { if len(p) != 4 { + countError("frame_rststream_bad_len") return nil, ConnectionError(ErrCodeFrameSize) } if fh.StreamID == 0 { + countError("frame_rststream_zero_stream") return nil, ConnectionError(ErrCodeProtocol) } return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil @@ -1202,8 +1231,9 @@ type ContinuationFrame struct { headerFragBuf []byte } -func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { +func parseContinuationFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) { if fh.StreamID == 0 { + countError("frame_continuation_zero_stream") return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} } return &ContinuationFrame{fh, p}, nil @@ -1252,7 +1282,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool { return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) } -func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { +func parsePushPromise(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) { pp := &PushPromiseFrame{ FrameHeader: fh, } @@ -1263,6 +1293,7 @@ func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err err // with. If the stream identifier field specifies the value // 0x0, a recipient MUST respond with a connection error // (Section 5.4.1) of type PROTOCOL_ERROR. + countError("frame_pushpromise_zero_stream") return nil, ConnectionError(ErrCodeProtocol) } // The PUSH_PROMISE frame includes optional padding. @@ -1270,18 +1301,21 @@ func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err err var padLength uint8 if fh.Flags.Has(FlagPushPromisePadded) { if p, padLength, err = readByte(p); err != nil { + countError("frame_pushpromise_pad_short") return } } p, pp.PromiseID, err = readUint32(p) if err != nil { + countError("frame_pushpromise_promiseid_short") return } pp.PromiseID = pp.PromiseID & (1<<31 - 1) if int(padLength) > len(p) { // like the DATA frame, error out if padding is longer than the body. + countError("frame_pushpromise_pad_too_big") return nil, ConnectionError(ErrCodeProtocol) } pp.headerFragBuf = p[:len(p)-int(padLength)] @@ -1498,7 +1532,8 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) } if !httpguts.ValidHeaderFieldValue(hf.Value) { - invalid = headerFieldValueError(hf.Value) + // Don't include the value in the error, because it may be sensitive. + invalid = headerFieldValueError(hf.Name) } isPseudo := strings.HasPrefix(hf.Name, ":") if isPseudo { diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go new file mode 100644 index 0000000000..aca4b2b31a --- /dev/null +++ b/vendor/golang.org/x/net/http2/go118.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package http2 + +import ( + "crypto/tls" + "net" +) + +func tlsUnderlyingConn(tc *tls.Conn) net.Conn { + return tc.NetConn() +} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go index a1ab2f0567..fe0b84ccd4 100644 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -140,25 +140,29 @@ func buildRootHuffmanNode() { panic("unexpected size") } lazyRootHuffmanNode = newInternalNode() - for i, code := range huffmanCodes { - addDecoderNode(byte(i), code, huffmanCodeLen[i]) - } -} + // allocate a leaf node for each of the 256 symbols + leaves := new([256]node) + + for sym, code := range huffmanCodes { + codeLen := huffmanCodeLen[sym] + + cur := lazyRootHuffmanNode + for codeLen > 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code< 8 { - codeLen -= 8 - i := uint8(code >> codeLen) - if cur.children[i] == nil { - cur.children[i] = newInternalNode() + leaves[sym].sym = byte(sym) + leaves[sym].codeLen = codeLen + for i := start; i < start+end; i++ { + cur.children[i] = &leaves[sym] } - cur = cur.children[i] - } - shift := 8 - codeLen - start, end := int(uint8(code< 0 { // Check whether the client has flow control quota. if st.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) + return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl)) } st.inflow.take(int32(f.Length)) @@ -1710,7 +1727,7 @@ func (sc *serverConn) processData(f *DataFrame) error { wrote, err := st.body.Write(data) if err != nil { sc.sendWindowUpdate(nil, int(f.Length)-wrote) - return streamError(id, ErrCodeStreamClosed) + return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed)) } if wrote != len(data) { panic("internal error: bad Writer") @@ -1796,7 +1813,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // stream identifier MUST respond with a connection error // (Section 5.4.1) of type PROTOCOL_ERROR. if id%2 != 1 { - return ConnectionError(ErrCodeProtocol) + return sc.countError("headers_even", ConnectionError(ErrCodeProtocol)) } // A HEADERS frame can be used to create a new stream or // send a trailer for an open one. If we already have a stream @@ -1813,7 +1830,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // this state, it MUST respond with a stream error (Section 5.4.2) of // type STREAM_CLOSED. if st.state == stateHalfClosedRemote { - return streamError(id, ErrCodeStreamClosed) + return sc.countError("headers_half_closed", streamError(id, ErrCodeStreamClosed)) } return st.processTrailerHeaders(f) } @@ -1824,7 +1841,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // receives an unexpected stream identifier MUST respond with // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. if id <= sc.maxClientStreamID { - return ConnectionError(ErrCodeProtocol) + return sc.countError("stream_went_down", ConnectionError(ErrCodeProtocol)) } sc.maxClientStreamID = id @@ -1841,14 +1858,14 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { if sc.curClientStreams+1 > sc.advMaxStreams { if sc.unackedSettings == 0 { // They should know better. - return streamError(id, ErrCodeProtocol) + return sc.countError("over_max_streams", streamError(id, ErrCodeProtocol)) } // Assume it's a network race, where they just haven't // received our last SETTINGS update. But actually // this can't happen yet, because we don't yet provide // a way for users to adjust server parameters at // runtime. - return streamError(id, ErrCodeRefusedStream) + return sc.countError("over_max_streams_race", streamError(id, ErrCodeRefusedStream)) } initialState := stateOpen @@ -1858,7 +1875,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { st := sc.newStream(id, 0, initialState) if f.HasPriority() { - if err := checkPriority(f.StreamID, f.Priority); err != nil { + if err := sc.checkPriority(f.StreamID, f.Priority); err != nil { return err } sc.writeSched.AdjustStream(st.id, f.Priority) @@ -1902,15 +1919,15 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { sc := st.sc sc.serveG.check() if st.gotTrailerHeader { - return ConnectionError(ErrCodeProtocol) + return sc.countError("dup_trailers", ConnectionError(ErrCodeProtocol)) } st.gotTrailerHeader = true if !f.StreamEnded() { - return streamError(st.id, ErrCodeProtocol) + return sc.countError("trailers_not_ended", streamError(st.id, ErrCodeProtocol)) } if len(f.PseudoFields()) > 0 { - return streamError(st.id, ErrCodeProtocol) + return sc.countError("trailers_pseudo", streamError(st.id, ErrCodeProtocol)) } if st.trailer != nil { for _, hf := range f.RegularFields() { @@ -1919,7 +1936,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { // TODO: send more details to the peer somehow. But http2 has // no way to send debug data at a stream level. Discuss with // HTTP folk. - return streamError(st.id, ErrCodeProtocol) + return sc.countError("trailers_bogus", streamError(st.id, ErrCodeProtocol)) } st.trailer[key] = append(st.trailer[key], hf.Value) } @@ -1928,13 +1945,13 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { return nil } -func checkPriority(streamID uint32, p PriorityParam) error { +func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error { if streamID == p.StreamDep { // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." // Section 5.3.3 says that a stream can depend on one of its dependencies, // so it's only self-dependencies that are forbidden. - return streamError(streamID, ErrCodeProtocol) + return sc.countError("priority", streamError(streamID, ErrCodeProtocol)) } return nil } @@ -1943,7 +1960,7 @@ func (sc *serverConn) processPriority(f *PriorityFrame) error { if sc.inGoAway { return nil } - if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { + if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil { return err } sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) @@ -2000,7 +2017,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res isConnect := rp.method == "CONNECT" if isConnect { if rp.path != "" || rp.scheme != "" || rp.authority == "" { - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: @@ -2013,13 +2030,13 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res // "All HTTP/2 requests MUST include exactly one valid // value for the :method, :scheme, and :path // pseudo-header fields" - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } bodyOpen := !f.StreamEnded() if rp.method == "HEAD" && bodyOpen { // HEAD requests can't have bodies - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + return nil, nil, sc.countError("head_body", streamError(f.StreamID, ErrCodeProtocol)) } rp.header = make(http.Header) @@ -2102,7 +2119,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var err error url_, err = url.ParseRequestURI(rp.path) if err != nil { - return nil, nil, streamError(st.id, ErrCodeProtocol) + return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol)) } requestURI = rp.path } @@ -2529,8 +2546,9 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { // prior to the headers being written. If the set of trailers is fixed // or known before the header is written, the normal Go trailers mechanism // is preferred: -// https://golang.org/pkg/net/http/#ResponseWriter -// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +// +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers const TrailerPrefix = "Trailer:" // promoteUndeclaredTrailers permits http.Handlers to set trailers @@ -2985,3 +3003,31 @@ func h1ServerKeepAlivesDisabled(hs *http.Server) bool { } return false } + +func (sc *serverConn) countError(name string, err error) error { + if sc == nil || sc.srv == nil { + return err + } + f := sc.srv.CountError + if f == nil { + return err + } + var typ string + var code ErrCode + switch e := err.(type) { + case ConnectionError: + typ = "conn" + code = ErrCode(e) + case StreamError: + typ = "stream" + code = ErrCode(e.Code) + default: + return err + } + codeStr := errCodeName[code] + if codeStr == "" { + codeStr = strconv.Itoa(int(code)) + } + f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name)) + return err +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index b261beb1d0..9180ba3d5c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -24,6 +24,7 @@ import ( "net/http" "net/http/httptrace" "net/textproto" + "os" "sort" "strconv" "strings" @@ -51,6 +52,15 @@ const ( transportDefaultStreamMinRefresh = 4 << 10 defaultUserAgent = "Go-http-client/2.0" + + // initialMaxConcurrentStreams is a connections maxConcurrentStreams until + // it's received servers initial SETTINGS frame, which corresponds with the + // spec's minimum recommended value. + initialMaxConcurrentStreams = 100 + + // defaultMaxConcurrentStreams is a connections default maxConcurrentStreams + // if the server doesn't include one in its initial SETTINGS frame. + defaultMaxConcurrentStreams = 1000 ) // Transport is an HTTP/2 Transport. @@ -121,6 +131,17 @@ type Transport struct { // Defaults to 15s. PingTimeout time.Duration + // WriteByteTimeout is the timeout after which the connection will be + // closed no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + WriteByteTimeout time.Duration + + // CountError, if non-nil, is called on HTTP/2 transport errors. + // It's intended to increment a metric for monitoring, such + // as an expvar or Prometheus metric. + // The errType consists of only ASCII word characters. + CountError func(errType string) + // t1, if non-nil, is the standard library Transport using // this transport. Its settings are used (but not its // RoundTrip method, etc). @@ -227,11 +248,12 @@ func (t *Transport) initConnPool() { // ClientConn is the state of a single HTTP/2 client connection to an // HTTP/2 server. type ClientConn struct { - t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls - tlsState *tls.ConnectionState // nil only for specialized impls - reused uint32 // whether conn is being reused; atomic - singleUse bool // whether being used for a single http.Request + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + reused uint32 // whether conn is being reused; atomic + singleUse bool // whether being used for a single http.Request + getConnCalled bool // used by clientConnPool // readLoop goroutine fields: readerDone chan struct{} // closed on error @@ -244,86 +266,94 @@ type ClientConn struct { cond *sync.Cond // hold mu; broadcast on flow/closed changes flow flow // our conn-level flow control quota (cs.flow is per stream) inflow flow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests closing bool closed bool + seenSettings bool // true if we've seen a settings frame, false otherwise wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received goAwayDebug string // goAway frame's debug data, retained as a string streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip nextStreamID uint32 pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams pings map[[8]byte]chan struct{} // in flight ping data to notification channel - bw *bufio.Writer br *bufio.Reader - fr *Framer lastActive time.Time lastIdle time.Time // time last idle - // Settings from peer: (also guarded by mu) + // Settings from peer: (also guarded by wmu) maxFrameSize uint32 maxConcurrentStreams uint32 peerMaxHeaderListSize uint64 initialWindowSize uint32 + // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. + // Write to reqHeaderMu to lock it, read from it to unlock. + // Lock reqmu BEFORE mu or wmu. + reqHeaderMu chan struct{} + + // wmu is held while writing. + // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes. + // Only acquire both at the same time when changing peer settings. + wmu sync.Mutex + bw *bufio.Writer + fr *Framer + werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder - - wmu sync.Mutex // held while writing; acquire AFTER mu if holding both - werr error // first write error that has occurred } // clientStream is the state for a single HTTP/2 stream. One of these // is created for each Transport.RoundTrip call. type clientStream struct { - cc *ClientConn - req *http.Request + cc *ClientConn + + // Fields of Request that we may access even after the response body is closed. + ctx context.Context + reqCancel <-chan struct{} + trace *httptrace.ClientTrace // or nil ID uint32 - resc chan resAndError bufPipe pipe // buffered pipe with the flow-controlled response payload - startedWrite bool // started request body write; guarded by cc.mu requestedGzip bool - on100 func() // optional code to run if get a 100 continue response + isHead bool + + abortOnce sync.Once + abort chan struct{} // closed to signal stream should end immediately + abortErr error // set if abort is closed + + peerClosed chan struct{} // closed when the peer sends an END_STREAM flag + donec chan struct{} // closed after the stream is in the closed state + on100 chan struct{} // buffered; written to if a 100 is received + + respHeaderRecv chan struct{} // closed when headers are received + res *http.Response // set if respHeaderRecv is closed flow flow // guarded by cc.mu inflow flow // guarded by cc.mu bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read readErr error // sticky read error; owned by transportResponseBody.Read - stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu - didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu - peerReset chan struct{} // closed on peer reset - resetErr error // populated before peerReset is closed + reqBody io.ReadCloser + reqBodyContentLength int64 // -1 means unknown + reqBodyClosed bool // body has been closed; guarded by cc.mu - done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + // owned by writeRequest: + sentEndStream bool // sent an END_STREAM flag to the peer + sentHeaders bool // owned by clientConnReadLoop: firstByte bool // got the first response byte pastHeaders bool // got first MetaHeadersFrame (actual headers) pastTrailers bool // got optional second MetaHeadersFrame (trailers) num1xx uint8 // number of 1xx responses seen + readClosed bool // peer sent an END_STREAM flag + readAborted bool // read loop reset the stream trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer } -// awaitRequestCancel waits for the user to cancel a request or for the done -// channel to be signaled. A non-nil error is returned only if the request was -// canceled. -func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { - ctx := req.Context() - if req.Cancel == nil && ctx.Done() == nil { - return nil - } - select { - case <-req.Cancel: - return errRequestCanceled - case <-ctx.Done(): - return ctx.Err() - case <-done: - return nil - } -} - var got1xxFuncForTests func(int, textproto.MIMEHeader) error // get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, @@ -335,78 +365,65 @@ func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error return traceGot1xxResponseFunc(cs.trace) } -// awaitRequestCancel waits for the user to cancel a request, its context to -// expire, or for the request to be done (any way it might be removed from the -// cc.streams map: peer reset, successful completion, TCP connection breakage, -// etc). If the request is canceled, then cs will be canceled and closed. -func (cs *clientStream) awaitRequestCancel(req *http.Request) { - if err := awaitRequestCancel(req, cs.done); err != nil { - cs.cancelStream() - cs.bufPipe.CloseWithError(err) - } +func (cs *clientStream) abortStream(err error) { + cs.cc.mu.Lock() + defer cs.cc.mu.Unlock() + cs.abortStreamLocked(err) } -func (cs *clientStream) cancelStream() { - cc := cs.cc - cc.mu.Lock() - didReset := cs.didReset - cs.didReset = true - cc.mu.Unlock() - - if !didReset { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - cc.forgetStreamID(cs.ID) +func (cs *clientStream) abortStreamLocked(err error) { + cs.abortOnce.Do(func() { + cs.abortErr = err + close(cs.abort) + }) + if cs.reqBody != nil && !cs.reqBodyClosed { + cs.reqBody.Close() + cs.reqBodyClosed = true } -} - -// checkResetOrDone reports any error sent in a RST_STREAM frame by the -// server, or errStreamClosed if the stream is complete. -func (cs *clientStream) checkResetOrDone() error { - select { - case <-cs.peerReset: - return cs.resetErr - case <-cs.done: - return errStreamClosed - default: - return nil + // TODO(dneil): Clean up tests where cs.cc.cond is nil. + if cs.cc.cond != nil { + // Wake up writeRequestBody if it is waiting on flow control. + cs.cc.cond.Broadcast() } } -func (cs *clientStream) getStartedWrite() bool { +func (cs *clientStream) abortRequestBodyWrite() { cc := cs.cc cc.mu.Lock() defer cc.mu.Unlock() - return cs.startedWrite -} - -func (cs *clientStream) abortRequestBodyWrite(err error) { - if err == nil { - panic("nil error") - } - cc := cs.cc - cc.mu.Lock() - if cs.stopReqBody == nil { - cs.stopReqBody = err - if cs.req.Body != nil { - cs.req.Body.Close() - } + if cs.reqBody != nil && !cs.reqBodyClosed { + cs.reqBody.Close() + cs.reqBodyClosed = true cc.cond.Broadcast() } - cc.mu.Unlock() } type stickyErrWriter struct { - w io.Writer - err *error + conn net.Conn + timeout time.Duration + err *error } func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - n, err = sew.w.Write(p) - *sew.err = err - return + for { + if sew.timeout != 0 { + sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) + } + nn, err := sew.conn.Write(p[n:]) + n += nn + if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { + // Keep extending the deadline so long as we're making progress. + continue + } + if sew.timeout != 0 { + sew.conn.SetWriteDeadline(time.Time{}) + } + *sew.err = err + return n, err + } } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -479,20 +496,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) traceGotConn(req, cc, reused) - res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) + res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { - if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { + if req, err = shouldRetryRequest(req, err); err == nil { // After the first retry, do exponential backoff with 10% jitter. if retry == 0 { + t.vlogf("RoundTrip retrying after failure: %v", err) continue } backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) select { case <-time.After(time.Second * time.Duration(backoff)): + t.vlogf("RoundTrip retrying after failure: %v", err) continue case <-req.Context().Done(): - return nil, req.Context().Err() + err = req.Context().Err() } } } @@ -523,7 +542,7 @@ var ( // response headers. It is always called with a non-nil error. // It returns either a request to retry (either the same request, or a // modified clone), or an error if the request can't be replayed. -func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { +func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { if !canRetryError(err) { return nil, err } @@ -536,7 +555,6 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt // If the request body can be reset back to its original // state via the optional req.GetBody, do that. if req.GetBody != nil { - // TODO: consider a req.Body.Close here? or audit that all caller paths do? body, err := req.GetBody() if err != nil { return nil, err @@ -548,10 +566,8 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt // The Request.Body can't reset back to the beginning, but we // don't seem to have started to read from it yet, so reuse - // the request directly. The "afterBodyWrite" means the - // bodyWrite process has started, which becomes true before - // the first Read. - if !afterBodyWrite { + // the request directly. + if err == errClientConnUnusable { return req, nil } @@ -563,6 +579,10 @@ func canRetryError(err error) bool { return true } if se, ok := err.(StreamError); ok { + if se.Code == ErrCodeProtocol && se.Cause == errFromPeer { + // See golang/go#47635, golang/go#42777 + return true + } return se.Code == ErrCodeRefusedStream } return false @@ -637,14 +657,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro tconn: c, readerDone: make(chan struct{}), nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d @@ -659,9 +680,16 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. - cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.bw = bufio.NewWriter(stickyErrWriter{ + conn: c, + timeout: t.WriteByteTimeout, + err: &cc.werr, + }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) + if t.CountError != nil { + cc.fr.countError = t.CountError + } cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() @@ -706,14 +734,23 @@ func (cc *ClientConn) healthCheck() { // trigger the healthCheck again if there is no frame received. ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) defer cancel() + cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) if err != nil { + cc.vlogf("http2: Transport health check failure: %v", err) cc.closeForLostPing() - cc.t.connPool().MarkDead(cc) - return + } else { + cc.vlogf("http2: Transport health check success") } } +// SetDoNotReuse marks cc as not reusable for future HTTP requests. +func (cc *ClientConn) SetDoNotReuse() { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.doNotReuse = true +} + func (cc *ClientConn) setGoAway(f *GoAwayFrame) { cc.mu.Lock() defer cc.mu.Unlock() @@ -731,27 +768,94 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { last := f.LastStreamID for streamID, cs := range cc.streams { if streamID > last { - select { - case cs.resc <- resAndError{err: errClientConnGotGoAway}: - default: - } + cs.abortStreamLocked(errClientConnGotGoAway) } } } // CanTakeNewRequest reports whether the connection can take a new request, // meaning it has not been closed or received or sent a GOAWAY. +// +// If the caller is going to immediately make a new request on this +// connection, use ReserveNewRequest instead. func (cc *ClientConn) CanTakeNewRequest() bool { cc.mu.Lock() defer cc.mu.Unlock() return cc.canTakeNewRequestLocked() } +// ReserveNewRequest is like CanTakeNewRequest but also reserves a +// concurrent stream in cc. The reservation is decremented on the +// next call to RoundTrip. +func (cc *ClientConn) ReserveNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + if st := cc.idleStateLocked(); !st.canTakeNewRequest { + return false + } + cc.streamsReserved++ + return true +} + +// ClientConnState describes the state of a ClientConn. +type ClientConnState struct { + // Closed is whether the connection is closed. + Closed bool + + // Closing is whether the connection is in the process of + // closing. It may be closing due to shutdown, being a + // single-use connection, being marked as DoNotReuse, or + // having received a GOAWAY frame. + Closing bool + + // StreamsActive is how many streams are active. + StreamsActive int + + // StreamsReserved is how many streams have been reserved via + // ClientConn.ReserveNewRequest. + StreamsReserved int + + // StreamsPending is how many requests have been sent in excess + // of the peer's advertised MaxConcurrentStreams setting and + // are waiting for other streams to complete. + StreamsPending int + + // MaxConcurrentStreams is how many concurrent streams the + // peer advertised as acceptable. Zero means no SETTINGS + // frame has been received yet. + MaxConcurrentStreams uint32 + + // LastIdle, if non-zero, is when the connection last + // transitioned to idle state. + LastIdle time.Time +} + +// State returns a snapshot of cc's state. +func (cc *ClientConn) State() ClientConnState { + cc.wmu.Lock() + maxConcurrent := cc.maxConcurrentStreams + if !cc.seenSettings { + maxConcurrent = 0 + } + cc.wmu.Unlock() + + cc.mu.Lock() + defer cc.mu.Unlock() + return ClientConnState{ + Closed: cc.closed, + Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, + StreamsActive: len(cc.streams), + StreamsReserved: cc.streamsReserved, + StreamsPending: cc.pendingRequests, + LastIdle: cc.lastIdle, + MaxConcurrentStreams: maxConcurrent, + } +} + // clientConnIdleState describes the suitability of a client // connection to initiate a new RoundTrip request. type clientConnIdleState struct { canTakeNewRequest bool - freshConn bool // whether it's unused by any previous request } func (cc *ClientConn) idleState() clientConnIdleState { @@ -772,13 +876,13 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // writing it. maxConcurrentOkay = true } else { - maxConcurrentOkay = int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) + maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) } st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && + !cc.doNotReuse && int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && !cc.tooIdleLocked() - st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest return } @@ -807,9 +911,27 @@ func (cc *ClientConn) onIdleTimeout() { cc.closeIfIdle() } +func (cc *ClientConn) closeConn() error { + t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) + defer t.Stop() + return cc.tconn.Close() +} + +// A tls.Conn.Close can hang for a long time if the peer is unresponsive. +// Try to shut it down more aggressively. +func (cc *ClientConn) forceCloseConn() { + tc, ok := cc.tconn.(*tls.Conn) + if !ok { + return + } + if nc := tlsUnderlyingConn(tc); nc != nil { + nc.Close() + } +} + func (cc *ClientConn) closeIfIdle() { cc.mu.Lock() - if len(cc.streams) > 0 { + if len(cc.streams) > 0 || cc.streamsReserved > 0 { cc.mu.Unlock() return } @@ -821,18 +943,24 @@ func (cc *ClientConn) closeIfIdle() { if VerboseLogs { cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) } - cc.tconn.Close() + cc.closeConn() +} + +func (cc *ClientConn) isDoNotReuseAndIdle() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.doNotReuse && len(cc.streams) == 0 } var shutdownEnterWaitStateHook = func() {} -// Shutdown gracefully close the client connection, waiting for running streams to complete. +// Shutdown gracefully closes the client connection, waiting for running streams to complete. func (cc *ClientConn) Shutdown(ctx context.Context) error { if err := cc.sendGoAway(); err != nil { return err } // Wait for all in-flight streams to complete or connection to close - done := make(chan error, 1) + done := make(chan struct{}) cancelled := false // guarded by cc.mu go func() { cc.mu.Lock() @@ -840,7 +968,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { for { if len(cc.streams) == 0 || cc.closed { cc.closed = true - done <- cc.tconn.Close() + close(done) break } if cancelled { @@ -851,8 +979,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { }() shutdownEnterWaitStateHook() select { - case err := <-done: - return err + case <-done: + return cc.closeConn() case <-ctx.Done(): cc.mu.Lock() // Free the goroutine above @@ -865,15 +993,18 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { func (cc *ClientConn) sendGoAway() error { cc.mu.Lock() - defer cc.mu.Unlock() - cc.wmu.Lock() - defer cc.wmu.Unlock() - if cc.closing { + closing := cc.closing + cc.closing = true + maxStreamID := cc.nextStreamID + cc.mu.Unlock() + if closing { // GOAWAY sent already return nil } + + cc.wmu.Lock() + defer cc.wmu.Unlock() // Send a graceful shutdown frame to server - maxStreamID := cc.nextStreamID if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { return err } @@ -881,7 +1012,6 @@ func (cc *ClientConn) sendGoAway() error { return err } // Prevent new requests - cc.closing = true return nil } @@ -889,18 +1019,13 @@ func (cc *ClientConn) sendGoAway() error { // err is sent to streams. func (cc *ClientConn) closeForError(err error) error { cc.mu.Lock() - defer cc.cond.Broadcast() - defer cc.mu.Unlock() - for id, cs := range cc.streams { - select { - case cs.resc <- resAndError{err: err}: - default: - } - cs.bufPipe.CloseWithError(err) - delete(cc.streams, id) - } cc.closed = true - return cc.tconn.Close() + for _, cs := range cc.streams { + cs.abortStreamLocked(err) + } + cc.cond.Broadcast() + cc.mu.Unlock() + return cc.closeConn() } // Close closes the client connection immediately. @@ -914,6 +1039,9 @@ func (cc *ClientConn) Close() error { // closes the client connection immediately. In-flight requests are interrupted. func (cc *ClientConn) closeForLostPing() error { err := errors.New("http2: client connection lost") + if f := cc.t.CountError; f != nil { + f("conn_close_lost_ping") + } return cc.closeForError(err) } @@ -978,41 +1106,158 @@ func actualContentLength(req *http.Request) int64 { return -1 } +func (cc *ClientConn) decrStreamReservations() { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.decrStreamReservationsLocked() +} + +func (cc *ClientConn) decrStreamReservationsLocked() { + if cc.streamsReserved > 0 { + cc.streamsReserved-- + } +} + func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { - resp, _, err := cc.roundTrip(req) - return resp, err + ctx := req.Context() + cs := &clientStream{ + cc: cc, + ctx: ctx, + reqCancel: req.Cancel, + isHead: req.Method == "HEAD", + reqBody: req.Body, + reqBodyContentLength: actualContentLength(req), + trace: httptrace.ContextClientTrace(ctx), + peerClosed: make(chan struct{}), + abort: make(chan struct{}), + respHeaderRecv: make(chan struct{}), + donec: make(chan struct{}), + } + go cs.doRequest(req) + + waitDone := func() error { + select { + case <-cs.donec: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cs.reqCancel: + return errRequestCanceled + } + } + + handleResponseHeaders := func() (*http.Response, error) { + res := cs.res + if res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + cs.abortRequestBodyWrite() + } + res.Request = req + res.TLS = cc.tlsState + if res.Body == noBody && actualContentLength(req) == 0 { + // If there isn't a request or response body still being + // written, then wait for the stream to be closed before + // RoundTrip returns. + if err := waitDone(); err != nil { + return nil, err + } + } + return res, nil + } + + for { + select { + case <-cs.respHeaderRecv: + return handleResponseHeaders() + case <-cs.abort: + select { + case <-cs.respHeaderRecv: + // If both cs.respHeaderRecv and cs.abort are signaling, + // pick respHeaderRecv. The server probably wrote the + // response and immediately reset the stream. + // golang.org/issue/49645 + return handleResponseHeaders() + default: + waitDone() + return nil, cs.abortErr + } + case <-ctx.Done(): + err := ctx.Err() + cs.abortStream(err) + return nil, err + case <-cs.reqCancel: + cs.abortStream(errRequestCanceled) + return nil, errRequestCanceled + } + } } -func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { +// doRequest runs for the duration of the request lifetime. +// +// It sends the request and performs post-request cleanup (closing Request.Body, etc.). +func (cs *clientStream) doRequest(req *http.Request) { + err := cs.writeRequest(req) + cs.cleanupWriteRequest(err) +} + +// writeRequest sends a request. +// +// It returns nil after the request is written, the response read, +// and the request stream is half-closed by the peer. +// +// It returns non-nil if the request ends otherwise. +// If the returned error is StreamError, the error Code may be used in resetting the stream. +func (cs *clientStream) writeRequest(req *http.Request) (err error) { + cc := cs.cc + ctx := cs.ctx + if err := checkConnHeaders(req); err != nil { - return nil, false, err - } - if cc.idleTimer != nil { - cc.idleTimer.Stop() + return err } - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return nil, false, err + // Acquire the new-request lock by writing to reqHeaderMu. + // This lock guards the critical section covering allocating a new stream ID + // (requires mu) and creating the stream (requires wmu). + if cc.reqHeaderMu == nil { + panic("RoundTrip on uninitialized ClientConn") // for tests + } + select { + case cc.reqHeaderMu <- struct{}{}: + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() } - hasTrailers := trailers != "" cc.mu.Lock() - if err := cc.awaitOpenSlotForRequest(req); err != nil { + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + cc.decrStreamReservationsLocked() + if err := cc.awaitOpenSlotForStreamLocked(cs); err != nil { cc.mu.Unlock() - return nil, false, err + <-cc.reqHeaderMu + return err } - - body := req.Body - contentLen := actualContentLength(req) - hasBody := contentLen != 0 + cc.addStreamLocked(cs) // assigns stream ID + if isConnectionCloseRequest(req) { + cc.doNotReuse = true + } + cc.mu.Unlock() // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - var requestedGzip bool if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && req.Header.Get("Range") == "" && - req.Method != "HEAD" { + !cs.isHead { // Request gzip only, not deflate. Deflate is ambiguous and // not as universally supported anyway. // See: https://zlib.net/zlib_faq.html#faq39 @@ -1025,183 +1270,224 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf // We don't request gzip if the request is for a range, since // auto-decoding a portion of a gzipped document will just fail // anyway. See https://golang.org/issue/8923 - requestedGzip = true + cs.requestedGzip = true } - // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is - // sent by writeRequestBody below, along with any Trailers, - // again in form HEADERS{1}, CONTINUATION{0,}) - hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) + continueTimeout := cc.t.expectContinueTimeout() + if continueTimeout != 0 { + if !httpguts.HeaderValuesContainsToken(req.Header["Expect"], "100-continue") { + continueTimeout = 0 + } else { + cs.on100 = make(chan struct{}, 1) + } + } + + // Past this point (where we send request headers), it is possible for + // RoundTrip to return successfully. Since the RoundTrip contract permits + // the caller to "mutate or reuse" the Request after closing the Response's Body, + // we must take care when referencing the Request from here on. + err = cs.encodeAndWriteHeaders(req) + <-cc.reqHeaderMu if err != nil { - cc.mu.Unlock() - return nil, false, err + return err } - cs := cc.newStream() - cs.req = req - cs.trace = httptrace.ContextClientTrace(req.Context()) - cs.requestedGzip = requestedGzip - bodyWriter := cc.t.getBodyWriterState(cs, body) - cs.on100 = bodyWriter.on100 + hasBody := cs.reqBodyContentLength != 0 + if !hasBody { + cs.sentEndStream = true + } else { + if continueTimeout != 0 { + traceWait100Continue(cs.trace) + timer := time.NewTimer(continueTimeout) + select { + case <-timer.C: + err = nil + case <-cs.on100: + err = nil + case <-cs.abort: + err = cs.abortErr + case <-ctx.Done(): + err = ctx.Err() + case <-cs.reqCancel: + err = errRequestCanceled + } + timer.Stop() + if err != nil { + traceWroteRequest(cs.trace, err) + return err + } + } - defer func() { - cc.wmu.Lock() - werr := cc.werr - cc.wmu.Unlock() - if werr != nil { - cc.Close() + if err = cs.writeRequestBody(req); err != nil { + if err != errStopReqBodyWrite { + traceWroteRequest(cs.trace, err) + return err + } + } else { + cs.sentEndStream = true } - }() + } + + traceWroteRequest(cs.trace, err) + + var respHeaderTimer <-chan time.Time + var respHeaderRecv chan struct{} + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + respHeaderRecv = cs.respHeaderRecv + } + // Wait until the peer half-closes its end of the stream, + // or until the request is aborted (via context, error, or otherwise), + // whichever comes first. + for { + select { + case <-cs.peerClosed: + return nil + case <-respHeaderTimer: + return errTimeout + case <-respHeaderRecv: + respHeaderRecv = nil + respHeaderTimer = nil // keep waiting for END_STREAM + case <-cs.abort: + return cs.abortErr + case <-ctx.Done(): + return ctx.Err() + case <-cs.reqCancel: + return errRequestCanceled + } + } +} + +func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { + cc := cs.cc + ctx := cs.ctx cc.wmu.Lock() + defer cc.wmu.Unlock() + + // If the request was canceled while waiting for cc.mu, just quit. + select { + case <-cs.abort: + return cs.abortErr + case <-ctx.Done(): + return ctx.Err() + case <-cs.reqCancel: + return errRequestCanceled + default: + } + + // Encode headers. + // + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return err + } + hasTrailers := trailers != "" + contentLen := actualContentLength(req) + hasBody := contentLen != 0 + hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) + if err != nil { + return err + } + + // Write the request. endStream := !hasBody && !hasTrailers - werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) - cc.wmu.Unlock() + cs.sentHeaders = true + err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) traceWroteHeaders(cs.trace) - cc.mu.Unlock() + return err +} - if werr != nil { - if hasBody { - req.Body.Close() // per RoundTripper contract - bodyWriter.cancel() - } - cc.forgetStreamID(cs.ID) - // Don't bother sending a RST_STREAM (our write already failed; - // no need to keep writing) - traceWroteRequest(cs.trace, werr) - return nil, false, werr - } +// cleanupWriteRequest performs post-request tasks. +// +// If err (the result of writeRequest) is non-nil and the stream is not closed, +// cleanupWriteRequest will send a reset to the peer. +func (cs *clientStream) cleanupWriteRequest(err error) { + cc := cs.cc - var respHeaderTimer <-chan time.Time - if hasBody { - bodyWriter.scheduleBodyWrite() - } else { - traceWroteRequest(cs.trace, nil) - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } + if cs.ID == 0 { + // We were canceled before creating the stream, so return our reservation. + cc.decrStreamReservations() } - readLoopResCh := cs.resc - bodyWritten := false - ctx := req.Context() + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cc.mu.Lock() + bodyClosed := cs.reqBodyClosed + cs.reqBodyClosed = true + cc.mu.Unlock() + if !bodyClosed && cs.reqBody != nil { + cs.reqBody.Close() + } - handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { - res := re.res - if re.err != nil || res.StatusCode > 299 { - // On error or status code 3xx, 4xx, 5xx, etc abort any - // ongoing write, assuming that the server doesn't care - // about our request body. If the server replied with 1xx or - // 2xx, however, then assume the server DOES potentially - // want our body (e.g. full-duplex streaming: - // golang.org/issue/13444). If it turns out the server - // doesn't, they'll RST_STREAM us soon enough. This is a - // heuristic to avoid adding knobs to Transport. Hopefully - // we can keep it. - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWrite) - if hasBody && !bodyWritten { - <-bodyWriter.resc + if err != nil && cs.sentEndStream { + // If the connection is closed immediately after the response is read, + // we may be aborted before finishing up here. If the stream was closed + // cleanly on both sides, there is no error. + select { + case <-cs.peerClosed: + err = nil + default: + } + } + if err != nil { + cs.abortStream(err) // possibly redundant, but harmless + if cs.sentHeaders { + if se, ok := err.(StreamError); ok { + if se.Cause != errFromPeer { + cc.writeStreamReset(cs.ID, se.Code, err) + } + } else { + cc.writeStreamReset(cs.ID, ErrCodeCancel, err) } } - if re.err != nil { - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), re.err + cs.bufPipe.CloseWithError(err) // no-op if already closed + } else { + if cs.sentHeaders && !cs.sentEndStream { + cc.writeStreamReset(cs.ID, ErrCodeNo, nil) } - res.Request = req - res.TLS = cc.tlsState - return res, false, nil + cs.bufPipe.CloseWithError(errRequestCanceled) } - - handleError := func(err error) (*http.Response, bool, error) { - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - <-bodyWriter.resc - } + if cs.ID != 0 { cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), err } - for { - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - case <-respHeaderTimer: - return handleError(errTimeout) - case <-ctx.Done(): - return handleError(ctx.Err()) - case <-req.Cancel: - return handleError(errRequestCanceled) - case <-cs.peerReset: - // processResetStream already removed the - // stream from the streams map; no need for - // forgetStreamID. - return nil, cs.getStartedWrite(), cs.resetErr - case err := <-bodyWriter.resc: - bodyWritten = true - // Prefer the read loop's response, if available. Issue 16102. - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - default: - } - if err != nil { - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), err - } - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } + cc.wmu.Lock() + werr := cc.werr + cc.wmu.Unlock() + if werr != nil { + cc.Close() } + + close(cs.donec) } -// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. +// awaitOpenSlotForStream waits until len(streams) < maxConcurrentStreams. // Must hold cc.mu. -func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { - var waitingForConn chan struct{} - var waitingForConnErr error // guarded by cc.mu +func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { - if waitingForConn != nil { - close(waitingForConn) - } return errClientConnUnusable } cc.lastIdle = time.Time{} - if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { - if waitingForConn != nil { - close(waitingForConn) - } + if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { return nil } - // Unfortunately, we cannot wait on a condition variable and channel at - // the same time, so instead, we spin up a goroutine to check if the - // request is canceled while we wait for a slot to open in the connection. - if waitingForConn == nil { - waitingForConn = make(chan struct{}) - go func() { - if err := awaitRequestCancel(req, waitingForConn); err != nil { - cc.mu.Lock() - waitingForConnErr = err - cc.cond.Broadcast() - cc.mu.Unlock() - } - }() - } cc.pendingRequests++ cc.cond.Wait() cc.pendingRequests-- - if waitingForConnErr != nil { - return waitingForConnErr + select { + case <-cs.abort: + return cs.abortErr + default: } } } @@ -1228,10 +1514,6 @@ func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize cc.fr.WriteContinuation(streamID, endHeaders, chunk) } } - // TODO(bradfitz): this Flush could potentially block (as - // could the WriteHeaders call(s) above), which means they - // wouldn't respond to Request.Cancel being readable. That's - // rare, but this should probably be in a goroutine. cc.bw.Flush() return cc.werr } @@ -1258,7 +1540,7 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int { if n > max { n = max } - if cl := actualContentLength(cs.req); cl != -1 && cl+1 < n { + if cl := cs.reqBodyContentLength; cl != -1 && cl+1 < n { // Add an extra byte past the declared content-length to // give the caller's Request.Body io.Reader a chance to // give us more bytes than they declared, so we can catch it @@ -1273,31 +1555,13 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int { var bufPool sync.Pool // of *[]byte -func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { +func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { cc := cs.cc + body := cs.reqBody sentEnd := false // whether we sent the final DATA frame w/ END_STREAM - defer func() { - traceWroteRequest(cs.trace, err) - // TODO: write h12Compare test showing whether - // Request.Body is closed by the Transport, - // and in multiple cases: server replies <=299 and >299 - // while still writing request body - var cerr error - cc.mu.Lock() - if cs.stopReqBody == nil { - cs.stopReqBody = errStopReqBodyWrite - cerr = bodyCloser.Close() - } - cc.mu.Unlock() - if err == nil { - err = cerr - } - }() - - req := cs.req hasTrailers := req.Trailer != nil - remainLen := actualContentLength(req) + remainLen := cs.reqBodyContentLength hasContentLen := remainLen != -1 cc.mu.Lock() @@ -1335,29 +1599,29 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( } if remainLen < 0 { err = errReqBodyTooLong - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) return err } } - if err == io.EOF { - sawEOF = true - err = nil - } else if err != nil { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) - return err + if err != nil { + cc.mu.Lock() + bodyClosed := cs.reqBodyClosed + cc.mu.Unlock() + switch { + case bodyClosed: + return errStopReqBodyWrite + case err == io.EOF: + sawEOF = true + err = nil + default: + return err + } } remain := buf[:n] for len(remain) > 0 && err == nil { var allowed int32 allowed, err = cs.awaitFlowControl(len(remain)) - switch { - case err == errStopReqBodyWrite: - return err - case err == errStopReqBodyWriteAndCancel: - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - return err - case err != nil: + if err != nil { return err } cc.wmu.Lock() @@ -1388,21 +1652,27 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( return nil } + // Since the RoundTrip contract permits the caller to "mutate or reuse" + // a request after the Response's Body is closed, verify that this hasn't + // happened before accessing the trailers. + cc.mu.Lock() + trailer := req.Trailer + err = cs.abortErr + cc.mu.Unlock() + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() var trls []byte - if hasTrailers { - cc.mu.Lock() - trls, err = cc.encodeTrailers(req) - cc.mu.Unlock() + if len(trailer) > 0 { + trls, err = cc.encodeTrailers(trailer) if err != nil { - cc.writeStreamReset(cs.ID, ErrCodeInternal, err) - cc.forgetStreamID(cs.ID) return err } } - cc.wmu.Lock() - defer cc.wmu.Unlock() - // Two ways to send END_STREAM: either with trailers, or // with an empty DATA frame. if len(trls) > 0 { @@ -1422,17 +1692,24 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( // if the stream is dead. func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { cc := cs.cc + ctx := cs.ctx cc.mu.Lock() defer cc.mu.Unlock() for { if cc.closed { return 0, errClientConnClosed } - if cs.stopReqBody != nil { - return 0, cs.stopReqBody + if cs.reqBodyClosed { + return 0, errStopReqBodyWrite } - if err := cs.checkResetOrDone(); err != nil { - return 0, err + select { + case <-cs.abort: + return 0, cs.abortErr + case <-ctx.Done(): + return 0, ctx.Err() + case <-cs.reqCancel: + return 0, errRequestCanceled + default: } if a := cs.flow.available(); a > 0 { take := a @@ -1450,9 +1727,14 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } -// requires cc.mu be held. +var errNilRequestURL = errors.New("http2: Request.URI is nil") + +// requires cc.wmu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() + if req.URL == nil { + return nil, errNilRequestURL + } host := req.Host if host == "" { @@ -1488,7 +1770,8 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } for _, v := range vv { if !httpguts.ValidHeaderFieldValue(v) { - return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + // Don't include the value in the error, because it may be sensitive. + return nil, fmt.Errorf("invalid HTTP header value for header %q", k) } } } @@ -1638,12 +1921,12 @@ func shouldSendReqContentLength(method string, contentLength int64) bool { } } -// requires cc.mu be held. -func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { +// requires cc.wmu be held. +func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { cc.hbuf.Reset() hlSize := uint64(0) - for k, vv := range req.Trailer { + for k, vv := range trailer { for _, v := range vv { hf := hpack.HeaderField{Name: k, Value: v} hlSize += uint64(hf.Size()) @@ -1653,7 +1936,7 @@ func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { return nil, errRequestHeaderListSize } - for k, vv := range req.Trailer { + for k, vv := range trailer { lowKey, ascii := asciiToLower(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header @@ -1683,51 +1966,51 @@ type resAndError struct { } // requires cc.mu be held. -func (cc *ClientConn) newStream() *clientStream { - cs := &clientStream{ - cc: cc, - ID: cc.nextStreamID, - resc: make(chan resAndError, 1), - peerReset: make(chan struct{}), - done: make(chan struct{}), - } +func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) cs.inflow.add(transportDefaultStreamFlow) cs.inflow.setConnFlow(&cc.inflow) + cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs - return cs + if cs.ID == 0 { + panic("assigned stream ID 0") + } } func (cc *ClientConn) forgetStreamID(id uint32) { - cc.streamByID(id, true) -} - -func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { cc.mu.Lock() - defer cc.mu.Unlock() - cs := cc.streams[id] - if andRemove && cs != nil && !cc.closed { - cc.lastActive = time.Now() - delete(cc.streams, id) - if len(cc.streams) == 0 && cc.idleTimer != nil { - cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = time.Now() - } - close(cs.done) - // Wake up checkResetOrDone via clientStream.awaitFlowControl and - // wake up RoundTrip if there is a pending request. - cc.cond.Broadcast() + slen := len(cc.streams) + delete(cc.streams, id) + if len(cc.streams) != slen-1 { + panic("forgetting unknown stream id") + } + cc.lastActive = time.Now() + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + cc.lastIdle = time.Now() + } + // Wake up writeRequestBody via clientStream.awaitFlowControl and + // wake up RoundTrip if there is a pending request. + cc.cond.Broadcast() + + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() + if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2) + } + cc.closed = true + defer cc.closeConn() } - return cs + + cc.mu.Unlock() } // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. type clientConnReadLoop struct { - _ incomparable - cc *ClientConn - closeWhenIdle bool + _ incomparable + cc *ClientConn } // readLoop runs in its own goroutine and reads and dispatches frames. @@ -1765,8 +2048,8 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - defer cc.tconn.Close() - defer cc.t.connPool().MarkDead(cc) + cc.t.connPool().MarkDead(cc) + defer cc.closeConn() defer close(cc.readerDone) if cc.idleTimer != nil { @@ -1787,23 +2070,49 @@ func (rl *clientConnReadLoop) cleanup() { } else if err == io.EOF { err = io.ErrUnexpectedEOF } + cc.closed = true for _, cs := range cc.streams { - cs.bufPipe.CloseWithError(err) // no-op if already closed select { - case cs.resc <- resAndError{err: err}: + case <-cs.peerClosed: + // The server closed the stream before closing the conn, + // so no need to interrupt it. default: + cs.abortStreamLocked(err) } - close(cs.done) } - cc.closed = true cc.cond.Broadcast() cc.mu.Unlock() } +// countReadFrameError calls Transport.CountError with a string +// representing err. +func (cc *ClientConn) countReadFrameError(err error) { + f := cc.t.CountError + if f == nil || err == nil { + return + } + if ce, ok := err.(ConnectionError); ok { + errCode := ErrCode(ce) + f(fmt.Sprintf("read_frame_conn_error_%s", errCode.stringToken())) + return + } + if errors.Is(err, io.EOF) { + f("read_frame_eof") + return + } + if errors.Is(err, io.ErrUnexpectedEOF) { + f("read_frame_unexpected_eof") + return + } + if errors.Is(err, ErrFrameTooLarge) { + f("read_frame_too_large") + return + } + f("read_frame_other") +} + func (rl *clientConnReadLoop) run() error { cc := rl.cc - rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse - gotReply := false // ever saw a HEADERS reply gotSettings := false readIdleTimeout := cc.t.ReadIdleTimeout var t *time.Timer @@ -1820,9 +2129,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := cc.streamByID(se.StreamID, false); cs != nil { - cs.cc.writeStreamReset(cs.ID, se.Code, err) - cs.cc.forgetStreamID(cs.ID) + if cs := rl.streamByID(se.StreamID); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -1830,6 +2137,7 @@ func (rl *clientConnReadLoop) run() error { } continue } else if err != nil { + cc.countReadFrameError(err) return err } if VerboseLogs { @@ -1842,22 +2150,16 @@ func (rl *clientConnReadLoop) run() error { } gotSettings = true } - maybeIdle := false // whether frame might transition us to idle switch f := f.(type) { case *MetaHeadersFrame: err = rl.processHeaders(f) - maybeIdle = true - gotReply = true case *DataFrame: err = rl.processData(f) - maybeIdle = true case *GoAwayFrame: err = rl.processGoAway(f) - maybeIdle = true case *RSTStreamFrame: err = rl.processResetStream(f) - maybeIdle = true case *SettingsFrame: err = rl.processSettings(f) case *PushPromiseFrame: @@ -1875,38 +2177,24 @@ func (rl *clientConnReadLoop) run() error { } return err } - if rl.closeWhenIdle && gotReply && maybeIdle { - cc.closeIfIdle() - } } } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, false) + cs := rl.streamByID(f.StreamID) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this // was just something we canceled, ignore it. return nil } - if f.StreamEnded() { - // Issue 20521: If the stream has ended, streamByID() causes - // clientStream.done to be closed, which causes the request's bodyWriter - // to be closed with an errStreamClosed, which may be received by - // clientConn.RoundTrip before the result of processing these headers. - // Deferring stream closure allows the header processing to occur first. - // clientConn.RoundTrip may still receive the bodyWriter error first, but - // the fix for issue 16102 prioritises any response. - // - // Issue 22413: If there is no request body, we should close the - // stream before writing to cs.resc so that the stream is closed - // immediately once RoundTrip returns. - if cs.req.Body != nil { - defer cc.forgetStreamID(f.StreamID) - } else { - cc.forgetStreamID(f.StreamID) - } + if cs.readClosed { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + Cause: errors.New("protocol error: headers after END_STREAM"), + }) + return nil } if !cs.firstByte { if cs.trace != nil { @@ -1930,9 +2218,11 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { return err } // Any other error type is a stream error. - cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) - cc.forgetStreamID(cs.ID) - cs.resc <- resAndError{err: err} + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + Cause: err, + }) return nil // return nil from process* funcs to keep conn alive } if res == nil { @@ -1940,7 +2230,11 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { return nil } cs.resTrailer = &res.Trailer - cs.resc <- resAndError{res: res} + cs.res = res + close(cs.respHeaderRecv) + if f.StreamEnded() { + rl.endStream(cs) + } return nil } @@ -2002,6 +2296,9 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra } if statusCode >= 100 && statusCode <= 199 { + if f.StreamEnded() { + return nil, errors.New("1xx informational response with END_STREAM flag") + } cs.num1xx++ const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http if cs.num1xx > max1xxResponses { @@ -2014,42 +2311,49 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra } if statusCode == 100 { traceGot100Continue(cs.trace) - if cs.on100 != nil { - cs.on100() // forces any write delay timer to fire + select { + case cs.on100 <- struct{}{}: + default: } } cs.pastHeaders = false // do it all again return nil, nil } - streamEnded := f.StreamEnded() - isHead := cs.req.Method == "HEAD" - if !streamEnded || isHead { - res.ContentLength = -1 - if clens := res.Header["Content-Length"]; len(clens) == 1 { - if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil { - res.ContentLength = int64(cl) - } else { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } else if len(clens) > 1 { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil { + res.ContentLength = int64(cl) + } else { // TODO: care? unlike http/1, it won't mess up our framing, so it's // more safe smuggling-wise to ignore. } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } else if f.StreamEnded() && !cs.isHead { + res.ContentLength = 0 } - if streamEnded || isHead { + if cs.isHead { res.Body = noBody return res, nil } - cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} + if f.StreamEnded() { + if res.ContentLength > 0 { + res.Body = missingBody{} + } else { + res.Body = noBody + } + return res, nil + } + + cs.bufPipe.setBuffer(&dataBuffer{expected: res.ContentLength}) cs.bytesRemain = res.ContentLength res.Body = transportResponseBody{cs} - go cs.awaitRequestCancel(cs.req) - if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + if cs.requestedGzip && asciiEqualFold(res.Header.Get("Content-Encoding"), "gzip") { res.Header.Del("Content-Encoding") res.Header.Del("Content-Length") res.ContentLength = -1 @@ -2088,8 +2392,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr } // transportResponseBody is the concrete type of Transport.RoundTrip's -// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. -// On Close it sends RST_STREAM if EOF wasn't already seen. +// Response.Body. It is an io.ReadCloser. type transportResponseBody struct { cs *clientStream } @@ -2107,7 +2410,7 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) { n = int(cs.bytesRemain) if err == nil { err = errors.New("net/http: server replied with more than declared Content-Length; truncated") - cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + cs.abortStream(err) } cs.readErr = err return int(cs.bytesRemain), err @@ -2125,8 +2428,6 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) { } cc.mu.Lock() - defer cc.mu.Unlock() - var connAdd, streamAdd int32 // Check the conn-level first, before the stream-level. if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { @@ -2143,6 +2444,8 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) { cs.inflow.add(streamAdd) } } + cc.mu.Unlock() + if connAdd != 0 || streamAdd != 0 { cc.wmu.Lock() defer cc.wmu.Unlock() @@ -2163,34 +2466,45 @@ func (b transportResponseBody) Close() error { cs := b.cs cc := cs.cc - serverSentStreamEnd := cs.bufPipe.Err() == io.EOF unread := cs.bufPipe.Len() - - if unread > 0 || !serverSentStreamEnd { + if unread > 0 { cc.mu.Lock() - cc.wmu.Lock() - if !serverSentStreamEnd { - cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) - cs.didReset = true - } // Return connection-level flow control. if unread > 0 { cc.inflow.add(int32(unread)) + } + cc.mu.Unlock() + + // TODO(dneil): Acquiring this mutex can block indefinitely. + // Move flow control return to a goroutine? + cc.wmu.Lock() + // Return connection-level flow control. + if unread > 0 { cc.fr.WriteWindowUpdate(0, uint32(unread)) } cc.bw.Flush() cc.wmu.Unlock() - cc.mu.Unlock() } cs.bufPipe.BreakWithError(errClosedResponseBody) - cc.forgetStreamID(cs.ID) + cs.abortStream(errClosedResponseBody) + + select { + case <-cs.donec: + case <-cs.ctx.Done(): + // See golang/go#49366: The net/http package can cancel the + // request context after the response body is fully read. + // Don't treat this as an error. + return nil + case <-cs.reqCancel: + return errRequestCanceled + } return nil } func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) + cs := rl.streamByID(f.StreamID) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2219,6 +2533,14 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { } return nil } + if cs.readClosed { + cc.logf("protocol error: received DATA after END_STREAM") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } if !cs.firstByte { cc.logf("protocol error: received DATA before a HEADERS frame") rl.endStreamError(cs, StreamError{ @@ -2228,7 +2550,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { return nil } if f.Length > 0 { - if cs.req.Method == "HEAD" && len(data) > 0 { + if cs.isHead && len(data) > 0 { cc.logf("protocol error: received DATA on a HEAD request") rl.endStreamError(cs, StreamError{ StreamID: f.StreamID, @@ -2250,30 +2572,39 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { if pad := int(f.Length) - len(data); pad > 0 { refund += pad } - // Return len(data) now if the stream is already closed, - // since data will never be read. - didReset := cs.didReset - if didReset { - refund += len(data) + + didReset := false + var err error + if len(data) > 0 { + if _, err = cs.bufPipe.Write(data); err != nil { + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset = true + refund += len(data) + } } + if refund > 0 { cc.inflow.add(int32(refund)) + if !didReset { + cs.inflow.add(int32(refund)) + } + } + cc.mu.Unlock() + + if refund > 0 { cc.wmu.Lock() cc.fr.WriteWindowUpdate(0, uint32(refund)) if !didReset { - cs.inflow.add(int32(refund)) cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) } cc.bw.Flush() cc.wmu.Unlock() } - cc.mu.Unlock() - if len(data) > 0 && !didReset { - if _, err := cs.bufPipe.Write(data); err != nil { - rl.endStreamError(cs, err) - return err - } + if err != nil { + rl.endStreamError(cs, err) + return nil } } @@ -2286,24 +2617,32 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { func (rl *clientConnReadLoop) endStream(cs *clientStream) { // TODO: check that any declared content-length matches, like // server.go's (*stream).endStream method. - rl.endStreamError(cs, nil) + if !cs.readClosed { + cs.readClosed = true + // Close cs.bufPipe and cs.peerClosed with cc.mu held to avoid a + // race condition: The caller can read io.EOF from Response.Body + // and close the body before we close cs.peerClosed, causing + // cleanupWriteRequest to send a RST_STREAM. + rl.cc.mu.Lock() + defer rl.cc.mu.Unlock() + cs.bufPipe.closeWithErrorAndCode(io.EOF, cs.copyTrailers) + close(cs.peerClosed) + } } func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { - var code func() - if err == nil { - err = io.EOF - code = cs.copyTrailers - } - if isConnectionCloseRequest(cs.req) { - rl.closeWhenIdle = true - } - cs.bufPipe.closeWithErrorAndCode(err, code) + cs.readAborted = true + cs.abortStream(err) +} - select { - case cs.resc <- resAndError{err: err}: - default: +func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { + rl.cc.mu.Lock() + defer rl.cc.mu.Unlock() + cs := rl.cc.streams[id] + if cs != nil && !cs.readAborted { + return cs } + return nil } func (cs *clientStream) copyTrailers() { @@ -2322,12 +2661,33 @@ func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { if f.ErrCode != 0 { // TODO: deal with GOAWAY more. particularly the error code cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + if fn := cc.t.CountError; fn != nil { + fn("recv_goaway_" + f.ErrCode.stringToken()) + } + } cc.setGoAway(f) return nil } func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + // Locking both mu and wmu here allows frame encoding to read settings with only wmu held. + // Acquiring wmu when f.IsAck() is unnecessary, but convenient and mostly harmless. + cc.wmu.Lock() + defer cc.wmu.Unlock() + + if err := rl.processSettingsNoWrite(f); err != nil { + return err + } + if !f.IsAck() { + cc.fr.WriteSettingsAck() + cc.bw.Flush() + } + return nil +} + +func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { cc := rl.cc cc.mu.Lock() defer cc.mu.Unlock() @@ -2340,12 +2700,14 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { return ConnectionError(ErrCodeProtocol) } + var seenMaxConcurrentStreams bool err := f.ForeachSetting(func(s Setting) error { switch s.ID { case SettingMaxFrameSize: cc.maxFrameSize = s.Val case SettingMaxConcurrentStreams: cc.maxConcurrentStreams = s.Val + seenMaxConcurrentStreams = true case SettingMaxHeaderListSize: cc.peerMaxHeaderListSize = uint64(s.Val) case SettingInitialWindowSize: @@ -2377,17 +2739,23 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { return err } - cc.wmu.Lock() - defer cc.wmu.Unlock() + if !cc.seenSettings { + if !seenMaxConcurrentStreams { + // This was the servers initial SETTINGS frame and it + // didn't contain a MAX_CONCURRENT_STREAMS field so + // increase the number of concurrent streams this + // connection can establish to our default. + cc.maxConcurrentStreams = defaultMaxConcurrentStreams + } + cc.seenSettings = true + } - cc.fr.WriteSettingsAck() - cc.bw.Flush() - return cc.werr + return nil } func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := cc.streamByID(f.StreamID, false) + cs := rl.streamByID(f.StreamID) if f.StreamID != 0 && cs == nil { return nil } @@ -2407,24 +2775,22 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.cc.streamByID(f.StreamID, true) + cs := rl.streamByID(f.StreamID) if cs == nil { - // TODO: return error if server tries to RST_STEAM an idle stream + // TODO: return error if server tries to RST_STREAM an idle stream return nil } - select { - case <-cs.peerReset: - // Already reset. - // This is the only goroutine - // which closes this, so there - // isn't a race. - default: - err := streamError(cs.ID, f.ErrCode) - cs.resetErr = err - close(cs.peerReset) - cs.bufPipe.CloseWithError(err) - cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + serr := streamError(cs.ID, f.ErrCode) + serr.Cause = errFromPeer + if f.ErrCode == ErrCodeProtocol { + rl.cc.SetDoNotReuse() } + if fn := cs.cc.t.CountError; fn != nil { + fn("recv_rststream_" + f.ErrCode.stringToken()) + } + cs.abortStream(serr) + + cs.bufPipe.CloseWithError(serr) return nil } @@ -2446,19 +2812,24 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } cc.mu.Unlock() } - cc.wmu.Lock() - if err := cc.fr.WritePing(false, p); err != nil { - cc.wmu.Unlock() - return err - } - if err := cc.bw.Flush(); err != nil { - cc.wmu.Unlock() - return err - } - cc.wmu.Unlock() + errc := make(chan error, 1) + go func() { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(false, p); err != nil { + errc <- err + return + } + if err := cc.bw.Flush(); err != nil { + errc <- err + return + } + }() select { case <-c: return nil + case err := <-errc: + return err case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: @@ -2535,6 +2906,11 @@ func (t *Transport) logf(format string, args ...interface{}) { var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) +type missingBody struct{} + +func (missingBody) Close() error { return nil } +func (missingBody) Read([]byte) (int, error) { return 0, io.ErrUnexpectedEOF } + func strSliceContains(ss []string, s string) bool { for _, v := range ss { if v == s { @@ -2580,87 +2956,6 @@ type errorReader struct{ err error } func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } -// bodyWriterState encapsulates various state around the Transport's writing -// of the request body, particularly regarding doing delayed writes of the body -// when the request contains "Expect: 100-continue". -type bodyWriterState struct { - cs *clientStream - timer *time.Timer // if non-nil, we're doing a delayed write - fnonce *sync.Once // to call fn with - fn func() // the code to run in the goroutine, writing the body - resc chan error // result of fn's execution - delay time.Duration // how long we should delay a delayed write for -} - -func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { - s.cs = cs - if body == nil { - return - } - resc := make(chan error, 1) - s.resc = resc - s.fn = func() { - cs.cc.mu.Lock() - cs.startedWrite = true - cs.cc.mu.Unlock() - resc <- cs.writeRequestBody(body, cs.req.Body) - } - s.delay = t.expectContinueTimeout() - if s.delay == 0 || - !httpguts.HeaderValuesContainsToken( - cs.req.Header["Expect"], - "100-continue") { - return - } - s.fnonce = new(sync.Once) - - // Arm the timer with a very large duration, which we'll - // intentionally lower later. It has to be large now because - // we need a handle to it before writing the headers, but the - // s.delay value is defined to not start until after the - // request headers were written. - const hugeDuration = 365 * 24 * time.Hour - s.timer = time.AfterFunc(hugeDuration, func() { - s.fnonce.Do(s.fn) - }) - return -} - -func (s bodyWriterState) cancel() { - if s.timer != nil { - if s.timer.Stop() { - s.resc <- nil - } - } -} - -func (s bodyWriterState) on100() { - if s.timer == nil { - // If we didn't do a delayed write, ignore the server's - // bogus 100 continue response. - return - } - s.timer.Stop() - go func() { s.fnonce.Do(s.fn) }() -} - -// scheduleBodyWrite starts writing the body, either immediately (in -// the common case) or after the delay timeout. It should not be -// called until after the headers have been written. -func (s bodyWriterState) scheduleBodyWrite() { - if s.timer == nil { - // We're not doing a delayed write (see - // getBodyWriterState), so just start the writing - // goroutine immediately. - go s.fn() - return - } - traceWait100Continue(s.cs.trace) - if s.timer.Stop() { - s.timer.Reset(s.delay) - } -} - // isConnectionCloseRequest reports whether req should use its own // connection for a single request and then close the connection. func isConnectionCloseRequest(req *http.Request) bool { diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index f24d2b1e7d..c7cd001739 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -32,7 +32,8 @@ type WriteScheduler interface { // Pop dequeues the next frame to write. Returns false if no frames can // be written. Frames with a given wr.StreamID() are Pop'd in the same - // order they are Push'd. No frames should be discarded except by CloseStream. + // order they are Push'd, except RST_STREAM frames. No frames should be + // discarded except by CloseStream. Pop() (wr FrameWriteRequest, ok bool) } @@ -52,6 +53,7 @@ type FrameWriteRequest struct { // stream is the stream on which this frame will be written. // nil for non-stream frames like PING and SETTINGS. + // nil for RST_STREAM streams, which use the StreamError.StreamID field instead. stream *stream // done, if non-nil, must be a buffered channel with space for diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go index 9a7b9e581c..f2e55e05ce 100644 --- a/vendor/golang.org/x/net/http2/writesched_random.go +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -45,11 +45,11 @@ func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityP } func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { - id := wr.StreamID() - if id == 0 { + if wr.isControl() { ws.zero.push(wr) return } + id := wr.StreamID() q, ok := ws.sq[id] if !ok { q = ws.queuePool.get() @@ -59,7 +59,7 @@ func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { } func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { - // Control frames first. + // Control and RST_STREAM frames first. if !ws.zero.empty() { return ws.zero.shift(), true } diff --git a/vendor/golang.org/x/net/idna/go118.go b/vendor/golang.org/x/net/idna/go118.go new file mode 100644 index 0000000000..c5c4338dbe --- /dev/null +++ b/vendor/golang.org/x/net/idna/go118.go @@ -0,0 +1,14 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package idna + +// Transitional processing is disabled by default in Go 1.18. +// https://golang.org/issue/47510 +const transitionalLookup = false diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go index 5208ba6cb8..64ccf85feb 100644 --- a/vendor/golang.org/x/net/idna/idna10.0.0.go +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -59,10 +59,10 @@ type Option func(*options) // Transitional sets a Profile to use the Transitional mapping as defined in UTS // #46. This will cause, for example, "ß" to be mapped to "ss". Using the // transitional mapping provides a compromise between IDNA2003 and IDNA2008 -// compatibility. It is used by most browsers when resolving domain names. This +// compatibility. It is used by some browsers when resolving domain names. This // option is only meaningful if combined with MapForLookup. func Transitional(transitional bool) Option { - return func(o *options) { o.transitional = true } + return func(o *options) { o.transitional = transitional } } // VerifyDNSLength sets whether a Profile should fail if any of the IDN parts @@ -284,7 +284,7 @@ var ( punycode = &Profile{} lookup = &Profile{options{ - transitional: true, + transitional: transitionalLookup, useSTD3Rules: true, checkHyphens: true, checkJoiners: true, diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go index 55f718f127..aae6aac872 100644 --- a/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -58,10 +58,10 @@ type Option func(*options) // Transitional sets a Profile to use the Transitional mapping as defined in UTS // #46. This will cause, for example, "ß" to be mapped to "ss". Using the // transitional mapping provides a compromise between IDNA2003 and IDNA2008 -// compatibility. It is used by most browsers when resolving domain names. This +// compatibility. It is used by some browsers when resolving domain names. This // option is only meaningful if combined with MapForLookup. func Transitional(transitional bool) Option { - return func(o *options) { o.transitional = true } + return func(o *options) { o.transitional = transitional } } // VerifyDNSLength sets whether a Profile should fail if any of the IDN parts diff --git a/vendor/golang.org/x/net/idna/pre_go118.go b/vendor/golang.org/x/net/idna/pre_go118.go new file mode 100644 index 0000000000..3aaccab1c5 --- /dev/null +++ b/vendor/golang.org/x/net/idna/pre_go118.go @@ -0,0 +1,12 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package idna + +const transitionalLookup = true diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go index 02c7d59af3..e8e3ac11a9 100644 --- a/vendor/golang.org/x/net/idna/punycode.go +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -49,6 +49,7 @@ func decode(encoded string) (string, error) { } } i, n, bias := int32(0), initialN, initialBias + overflow := false for pos < len(encoded) { oldI, w := i, int32(1) for k := base; ; k += base { @@ -60,29 +61,32 @@ func decode(encoded string) (string, error) { return "", punyError(encoded) } pos++ - i += digit * w - if i < 0 { + i, overflow = madd(i, digit, w) + if overflow { return "", punyError(encoded) } t := k - bias - if t < tmin { + if k <= bias { t = tmin - } else if t > tmax { + } else if k >= bias+tmax { t = tmax } if digit < t { break } - w *= base - t - if w >= math.MaxInt32/base { + w, overflow = madd(0, w, base-t) + if overflow { return "", punyError(encoded) } } + if len(output) >= 1024 { + return "", punyError(encoded) + } x := int32(len(output) + 1) bias = adapt(i-oldI, x, oldI == 0) n += i / x i %= x - if n > utf8.MaxRune || len(output) >= 1024 { + if n < 0 || n > utf8.MaxRune { return "", punyError(encoded) } output = append(output, 0) @@ -115,6 +119,7 @@ func encode(prefix, s string) (string, error) { if b > 0 { output = append(output, '-') } + overflow := false for remaining != 0 { m := int32(0x7fffffff) for _, r := range s { @@ -122,8 +127,8 @@ func encode(prefix, s string) (string, error) { m = r } } - delta += (m - n) * (h + 1) - if delta < 0 { + delta, overflow = madd(delta, m-n, h+1) + if overflow { return "", punyError(s) } n = m @@ -141,9 +146,9 @@ func encode(prefix, s string) (string, error) { q := delta for k := base; ; k += base { t := k - bias - if t < tmin { + if k <= bias { t = tmin - } else if t > tmax { + } else if k >= bias+tmax { t = tmax } if q < t { @@ -164,6 +169,15 @@ func encode(prefix, s string) (string, error) { return string(output), nil } +// madd computes a + (b * c), detecting overflow. +func madd(a, b, c int32) (next int32, overflow bool) { + p := int64(b) * int64(c) + if p > math.MaxInt32-int64(a) { + return 0, true + } + return a + int32(p), false +} + func decodeDigit(x byte) (digit int32, ok bool) { switch { case '0' <= x && x <= '9': diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go index 7a8cf889b5..9c070a44b3 100644 --- a/vendor/golang.org/x/net/idna/trieval.go +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -17,23 +17,23 @@ package idna // // The per-rune values have the following format: // -// if mapped { -// if inlinedXOR { -// 15..13 inline XOR marker -// 12..11 unused -// 10..3 inline XOR mask -// } else { -// 15..3 index into xor or mapping table -// } -// } else { -// 15..14 unused -// 13 mayNeedNorm -// 12..11 attributes -// 10..8 joining type -// 7..3 category type -// } -// 2 use xor pattern -// 1..0 mapped category +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category // // See the definitions below for a more detailed description of the various // bits. diff --git a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go index 87ae9d2a33..c9b69937a0 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.5 // +build go1.5 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_plan9.go index c07c798bc5..98bf56b732 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_plan9.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.5 // +build !go1.5 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/race.go b/vendor/golang.org/x/sys/plan9/race.go index 42edd93ef9..62377d2ff9 100644 --- a/vendor/golang.org/x/sys/plan9/race.go +++ b/vendor/golang.org/x/sys/plan9/race.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build plan9 && race // +build plan9,race package plan9 diff --git a/vendor/golang.org/x/sys/plan9/race0.go b/vendor/golang.org/x/sys/plan9/race0.go index c89cf8fc0d..f8da30876d 100644 --- a/vendor/golang.org/x/sys/plan9/race0.go +++ b/vendor/golang.org/x/sys/plan9/race0.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build plan9 && !race // +build plan9,!race package plan9 diff --git a/vendor/golang.org/x/sys/plan9/str.go b/vendor/golang.org/x/sys/plan9/str.go index 4f7f9ad7c8..55fa8d025e 100644 --- a/vendor/golang.org/x/sys/plan9/str.go +++ b/vendor/golang.org/x/sys/plan9/str.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build plan9 // +build plan9 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go index e7363a2f54..602473cba3 100644 --- a/vendor/golang.org/x/sys/plan9/syscall.go +++ b/vendor/golang.org/x/sys/plan9/syscall.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build plan9 // +build plan9 // Package plan9 contains an interface to the low-level operating system diff --git a/vendor/golang.org/x/sys/plan9/syscall_plan9.go b/vendor/golang.org/x/sys/plan9/syscall_plan9.go index 84e1471481..723b1f4002 100644 --- a/vendor/golang.org/x/sys/plan9/syscall_plan9.go +++ b/vendor/golang.org/x/sys/plan9/syscall_plan9.go @@ -132,8 +132,10 @@ func Pipe(p []int) (err error) { } var pp [2]int32 err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return } diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go index 6819bc2094..3f40b9bd74 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -plan9 -tags plan9,386 syscall_plan9.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build plan9 && 386 // +build plan9,386 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go index 418abbbfc7..0e6a96aa4f 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -plan9 -tags plan9,amd64 syscall_plan9.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build plan9 && amd64 // +build plan9,amd64 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go index 3e8a1a58ca..244c501b77 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -plan9 -tags plan9,arm syscall_plan9.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build plan9 && arm // +build plan9,arm package plan9 diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 474efad0e0..7d3c060e12 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -149,7 +149,7 @@ To add a constant, add the header that includes it to the appropriate variable. Then, edit the regex (if necessary) to match the desired constant. Avoid making the regex too broad to avoid matching unintended constants. -### mkmerge.go +### internal/mkmerge This program is used to extract duplicate const, func, and type declarations from the generated architecture-specific files listed below, and merge these diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 396aadf86d..ee73623489 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) $cmd docker build --tag generate:$GOOS $GOOS - $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")" && /bin/pwd):/build generate:$GOOS + $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && /bin/pwd):/build generate:$GOOS exit fi diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index a74ef58f8c..a47b035f9a 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -239,6 +239,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -260,6 +261,7 @@ struct ltchars { #include #include #include +#include #include #include @@ -520,7 +522,7 @@ ccflags="$@" $2 ~ /^HW_MACHINE$/ || $2 ~ /^SYSCTL_VERS/ || $2 !~ "MNT_BITS" && - $2 ~ /^(MS|MNT|UMOUNT)_/ || + $2 ~ /^(MS|MNT|MOUNT|UMOUNT)_/ || $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|TFD)_/ || @@ -605,6 +607,7 @@ ccflags="$@" $2 ~ /^MTD/ || $2 ~ /^OTP/ || $2 ~ /^MEM/ || + $2 ~ /^WG/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go index 326fb04a52..5f63147e06 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go @@ -67,9 +67,7 @@ func ParseOrigDstAddr(m *SocketControlMessage) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case m.Header.Level == SOL_IPV6 && m.Header.Type == IPV6_ORIGDSTADDR: @@ -78,9 +76,7 @@ func ParseOrigDstAddr(m *SocketControlMessage) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil default: diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index d8efb715ff..4f55c8d999 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -70,9 +70,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil } @@ -85,9 +83,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil } @@ -261,9 +257,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -272,9 +266,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, EAFNOSUPPORT @@ -385,6 +377,11 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys fsyncRange(fd int, how int, start int64, length int64) (err error) = fsync_range +func Fsync(fd int) error { + return fsyncRange(fd, O_SYNC, 0, 0) +} + /* * Direct access */ @@ -401,7 +398,6 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Fdatasync(fd int) (err error) -//sys Fsync(fd int) (err error) // readdir_r //sysnb Getpgid(pid int) (pgid int, err error) @@ -523,8 +519,10 @@ func Pipe(p []int) (err error) { } var pp [2]_C_int err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return } diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 95ac3946b5..0ce4523261 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -163,9 +163,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -179,9 +177,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -210,9 +206,7 @@ func (sa *SockaddrDatalink) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Nlen = sa.Nlen sa.raw.Alen = sa.Alen sa.raw.Slen = sa.Slen - for i := 0; i < len(sa.raw.Data); i++ { - sa.raw.Data[i] = sa.Data[i] - } + sa.raw.Data = sa.Data return unsafe.Pointer(&sa.raw), SizeofSockaddrDatalink, nil } @@ -228,9 +222,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa.Nlen = pp.Nlen sa.Alen = pp.Alen sa.Slen = pp.Slen - for i := 0; i < len(sa.Data); i++ { - sa.Data[i] = pp.Data[i] - } + sa.Data = pp.Data return sa, nil case AF_UNIX: @@ -262,9 +254,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -273,9 +263,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return anyToSockaddrGOOS(fd, rsa) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index a8c13317d7..0eaab91314 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -159,8 +159,10 @@ func Pipe(p []int) (err error) { } var x [2]int32 err = pipe(&x) - p[0] = int(x[0]) - p[1] = int(x[1]) + if err == nil { + p[0] = int(x[0]) + p[1] = int(x[1]) + } return } @@ -430,8 +432,25 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { return x, err } -func SysctlKinfoProcSlice(name string) ([]KinfoProc, error) { - mib, err := sysctlmib(name) +func SysctlKinfoProc(name string, args ...int) (*KinfoProc, error) { + mib, err := sysctlmib(name, args...) + if err != nil { + return nil, err + } + + var kinfo KinfoProc + n := uintptr(SizeofKinfoProc) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&kinfo)), &n, nil, 0); err != nil { + return nil, err + } + if n != SizeofKinfoProc { + return nil, EIO + } + return &kinfo, nil +} + +func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { + mib, err := sysctlmib(name, args...) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 5af108a503..2e37c3167f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -101,7 +101,10 @@ func Pipe(p []int) (err error) { if len(p) != 2 { return EINVAL } - p[0], p[1], err = pipe() + r, w, err := pipe() + if err == nil { + p[0], p[1] = r, w + } return } @@ -114,7 +117,10 @@ func Pipe2(p []int, flags int) (err error) { var pp [2]_C_int // pipe2 on dragonfly takes an fds array as an argument, but still // returns the file descriptors. - p[0], p[1], err = pipe2(&pp, flags) + r, w, err := pipe2(&pp, flags) + if err == nil { + p[0], p[1] = r, w + } return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 18c392cf36..2f650ae665 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -110,8 +110,10 @@ func Pipe2(p []int, flags int) error { } var pp [2]_C_int err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index fff38a84c9..f432b0684b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -131,8 +131,10 @@ func Pipe2(p []int, flags int) error { } var pp [2]_C_int err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return err } @@ -372,9 +374,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil } @@ -387,9 +387,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil } @@ -438,9 +436,7 @@ func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Hatype = sa.Hatype sa.raw.Pkttype = sa.Pkttype sa.raw.Halen = sa.Halen - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil } @@ -855,12 +851,10 @@ func (sa *SockaddrTIPC) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Addr == nil { return nil, 0, EINVAL } - sa.raw.Family = AF_TIPC sa.raw.Scope = int8(sa.Scope) sa.raw.Addrtype = sa.Addr.tipcAddrtype() sa.raw.Addr = sa.Addr.tipcAddr() - return unsafe.Pointer(&sa.raw), SizeofSockaddrTIPC, nil } @@ -874,9 +868,7 @@ type SockaddrL2TPIP struct { func (sa *SockaddrL2TPIP) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_INET sa.raw.Conn_id = sa.ConnId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP, nil } @@ -892,9 +884,7 @@ func (sa *SockaddrL2TPIP6) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_INET6 sa.raw.Conn_id = sa.ConnId sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP6, nil } @@ -990,9 +980,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa.Hatype = pp.Hatype sa.Pkttype = pp.Pkttype sa.Halen = pp.Halen - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_UNIX: @@ -1031,18 +1019,14 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { pp := (*RawSockaddrL2TPIP)(unsafe.Pointer(rsa)) sa := new(SockaddrL2TPIP) sa.ConnId = pp.Conn_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil default: pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } @@ -1058,9 +1042,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrL2TPIP6) sa.ConnId = pp.Conn_id sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil default: pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) @@ -1068,9 +1050,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } @@ -1797,6 +1777,16 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri return mount(source, target, fstype, flags, datap) } +//sys mountSetattr(dirfd int, pathname string, flags uint, attr *MountAttr, size uintptr) (err error) = SYS_MOUNT_SETATTR + +// MountSetattr is a wrapper for mount_setattr(2). +// https://man7.org/linux/man-pages/man2/mount_setattr.2.html +// +// Requires kernel >= 5.12. +func MountSetattr(dirfd int, pathname string, flags uint, attr *MountAttr) error { + return mountSetattr(dirfd, pathname, flags, attr, unsafe.Sizeof(*attr)) +} + func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 853d5f0f43..696fed496f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -110,14 +110,8 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } -//sysnb pipe() (fd1 int, fd2 int, err error) - func Pipe(p []int) (err error) { - if len(p) != 2 { - return EINVAL - } - p[0], p[1], err = pipe() - return + return Pipe2(p, 0) } //sysnb pipe2(p *[2]_C_int, flags int) (err error) @@ -128,8 +122,10 @@ func Pipe2(p []int, flags int) error { } var pp [2]_C_int err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 22b5503850..11b1d419da 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -87,8 +87,10 @@ func Pipe2(p []int, flags int) error { } var pp [2]_C_int err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index d2a6495c7e..5c813921e8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -66,8 +66,10 @@ func Pipe(p []int) (err error) { if n != 0 { return err } - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return nil } @@ -79,8 +81,10 @@ func Pipe2(p []int, flags int) error { } var pp [2]_C_int err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return err } @@ -92,9 +96,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil } @@ -107,9 +109,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil } @@ -417,9 +417,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -428,9 +426,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, EAFNOSUPPORT diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 1ffd8bfcfb..f8616f454e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -67,9 +67,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -83,9 +81,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -144,9 +140,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -155,9 +149,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, EAFNOSUPPORT @@ -587,8 +579,10 @@ func Pipe(p []int) (err error) { } var pp [2]_C_int err = pipe(&pp) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + if err == nil { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } return } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 78d4b85ece..bcc45d1085 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,4 +1,4 @@ -// Code generated by mkmerge.go; DO NOT EDIT. +// Code generated by mkmerge; DO NOT EDIT. //go:build linux // +build linux @@ -116,6 +116,7 @@ const ( ARPHRD_LAPB = 0x204 ARPHRD_LOCALTLK = 0x305 ARPHRD_LOOPBACK = 0x304 + ARPHRD_MCTP = 0x122 ARPHRD_METRICOM = 0x17 ARPHRD_NETLINK = 0x338 ARPHRD_NETROM = 0x0 @@ -472,6 +473,7 @@ const ( DM_DEV_WAIT = 0xc138fd08 DM_DIR = "mapper" DM_GET_TARGET_VERSION = 0xc138fd11 + DM_IMA_MEASUREMENT_FLAG = 0x80000 DM_INACTIVE_PRESENT_FLAG = 0x40 DM_INTERNAL_SUSPEND_FLAG = 0x40000 DM_IOCTL = 0xfd @@ -716,6 +718,7 @@ const ( ETH_P_LOOPBACK = 0x9000 ETH_P_MACSEC = 0x88e5 ETH_P_MAP = 0xf9 + ETH_P_MCTP = 0xfa ETH_P_MOBITEX = 0x15 ETH_P_MPLS_MC = 0x8848 ETH_P_MPLS_UC = 0x8847 @@ -751,6 +754,21 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EV_ABS = 0x3 + EV_CNT = 0x20 + EV_FF = 0x15 + EV_FF_STATUS = 0x17 + EV_KEY = 0x1 + EV_LED = 0x11 + EV_MAX = 0x1f + EV_MSC = 0x4 + EV_PWR = 0x16 + EV_REL = 0x2 + EV_REP = 0x14 + EV_SND = 0x12 + EV_SW = 0x5 + EV_SYN = 0x0 + EV_VERSION = 0x10001 EXABYTE_ENABLE_NEST = 0xf0 EXT2_SUPER_MAGIC = 0xef53 EXT3_SUPER_MAGIC = 0xef53 @@ -789,9 +807,11 @@ const ( FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EPIDFD = -0x2 FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_PIDFD = 0x4 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -811,6 +831,7 @@ const ( FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 + FAN_NOPIDFD = -0x1 FAN_ONDIR = 0x40000000 FAN_OPEN = 0x20 FAN_OPEN_EXEC = 0x1000 @@ -821,6 +842,7 @@ const ( FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 + FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1454,6 +1476,18 @@ const ( MNT_FORCE = 0x1 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 + MOUNT_ATTR_IDMAP = 0x100000 + MOUNT_ATTR_NOATIME = 0x10 + MOUNT_ATTR_NODEV = 0x4 + MOUNT_ATTR_NODIRATIME = 0x80 + MOUNT_ATTR_NOEXEC = 0x8 + MOUNT_ATTR_NOSUID = 0x2 + MOUNT_ATTR_NOSYMFOLLOW = 0x200000 + MOUNT_ATTR_RDONLY = 0x1 + MOUNT_ATTR_RELATIME = 0x0 + MOUNT_ATTR_SIZE_VER0 = 0x20 + MOUNT_ATTR_STRICTATIME = 0x20 + MOUNT_ATTR__ATIME = 0x70 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1997,6 +2031,7 @@ const ( PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 + PR_SPEC_L1D_FLUSH = 0x2 PR_SPEC_NOT_AFFECTED = 0x0 PR_SPEC_PRCTL = 0x1 PR_SPEC_STORE_BYPASS = 0x0 @@ -2432,12 +2467,15 @@ const ( SMART_WRITE_THRESHOLDS = 0xd7 SMB_SUPER_MAGIC = 0x517b SOCKFS_MAGIC = 0x534f434b + SOCK_BUF_LOCK_MASK = 0x3 SOCK_DCCP = 0x6 SOCK_IOC_TYPE = 0x89 SOCK_PACKET = 0xa SOCK_RAW = 0x3 + SOCK_RCVBUF_LOCK = 0x2 SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 + SOCK_SNDBUF_LOCK = 0x1 SOL_AAL = 0x109 SOL_ALG = 0x117 SOL_ATM = 0x108 @@ -2788,6 +2826,13 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 + WGALLOWEDIP_A_MAX = 0x3 + WGDEVICE_A_MAX = 0x8 + WGPEER_A_MAX = 0xa + WG_CMD_MAX = 0x1 + WG_GENL_NAME = "wireguard" + WG_GENL_VERSION = 0x1 + WG_KEY_LEN = 0x20 WIN_ACKMEDIACHANGE = 0xdb WIN_CHECKPOWERMODE1 = 0xe5 WIN_CHECKPOWERMODE2 = 0x98 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 697811a460..3ca40ca7f0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -5,7 +5,7 @@ // +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/_const.go package unix @@ -293,6 +293,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 7d8d93bfc4..ead332091a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -5,7 +5,7 @@ // +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/_const.go package unix @@ -294,6 +294,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index f707d50894..39bdc94558 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -5,7 +5,7 @@ // +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -300,6 +300,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 3a67a9c852..9aec987db1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -5,7 +5,7 @@ // +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go package unix @@ -290,6 +290,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index a7ccef56c5..a8bba9491e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -5,7 +5,7 @@ // +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -293,6 +293,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index f7b7cec910..ee9e7e2020 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -5,7 +5,7 @@ // +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -293,6 +293,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 4fcacf9584..ba4b288a3c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -5,7 +5,7 @@ // +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -293,6 +293,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 6f6c223a2c..bc93afc367 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -5,7 +5,7 @@ // +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -293,6 +293,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 59e522bcf4..9295e69478 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -5,7 +5,7 @@ // +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -348,6 +348,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index d4264a0f73..1fa081c9a6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -5,7 +5,7 @@ // +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -352,6 +352,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 21cbec1dd3..74b3211494 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -5,7 +5,7 @@ // +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -352,6 +352,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 9b05bf12fc..c91c8ac5b0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -5,7 +5,7 @@ // +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -281,6 +281,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index bd82ace09a..b66bf22289 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -5,7 +5,7 @@ // +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go package unix @@ -356,6 +356,7 @@ const ( SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 SO_BUSY_POLL = 0x2e SO_BUSY_POLL_BUDGET = 0x46 SO_CNX_ADVICE = 0x35 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 1f8bded56b..f7fb149b0c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -5,7 +5,7 @@ // +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go package unix @@ -347,6 +347,7 @@ const ( SO_BPF_EXTENSIONS = 0x32 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0x400 + SO_BUF_LOCK = 0x51 SO_BUSY_POLL = 0x30 SO_BUSY_POLL_BUDGET = 0x49 SO_CNX_ADVICE = 0x37 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index 91a23cc728..85e0cc3866 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -17,6 +17,7 @@ int getdirent(int, uintptr_t, size_t); int wait4(int, uintptr_t, int, uintptr_t); int ioctl(int, int, uintptr_t); int fcntl(uintptr_t, int, uintptr_t); +int fsync_range(int, int, long long, long long); int acct(uintptr_t); int chdir(uintptr_t); int chroot(uintptr_t); @@ -29,7 +30,6 @@ int fchmod(int, unsigned int); int fchmodat(int, uintptr_t, unsigned int, int); int fchownat(int, uintptr_t, int, int, int); int fdatasync(int); -int fsync(int); int getpgid(int); int getpgrp(); int getpid(); @@ -255,6 +255,16 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsyncRange(fd int, how int, start int64, length int64) (err error) { + r0, er := C.fsync_range(C.int(fd), C.int(how), C.longlong(start), C.longlong(length)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Acct(path string) (err error) { _p0 := uintptr(unsafe.Pointer(C.CString(path))) r0, er := C.acct(C.uintptr_t(_p0)) @@ -379,16 +389,6 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - r0, er := C.fsync(C.int(fd)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getpgid(pid int) (pgid int, err error) { r0, er := C.getpgid(C.int(pid)) pgid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index 33c2609b8b..f1d4a73b08 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -135,6 +135,16 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsyncRange(fd int, how int, start int64, length int64) (err error) { + _, e1 := callfsync_range(fd, how, start, length) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Acct(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -283,16 +293,6 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - _, e1 := callfsync(fd) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getpgid(pid int) (pgid int, err error) { r0, e1 := callgetpgid(pid) pgid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index 8b737fa971..2caa5adf95 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -18,6 +18,7 @@ import ( //go:cgo_import_dynamic libc_wait4 wait4 "libc.a/shr_64.o" //go:cgo_import_dynamic libc_ioctl ioctl "libc.a/shr_64.o" //go:cgo_import_dynamic libc_fcntl fcntl "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fsync_range fsync_range "libc.a/shr_64.o" //go:cgo_import_dynamic libc_acct acct "libc.a/shr_64.o" //go:cgo_import_dynamic libc_chdir chdir "libc.a/shr_64.o" //go:cgo_import_dynamic libc_chroot chroot "libc.a/shr_64.o" @@ -30,7 +31,6 @@ import ( //go:cgo_import_dynamic libc_fchmodat fchmodat "libc.a/shr_64.o" //go:cgo_import_dynamic libc_fchownat fchownat "libc.a/shr_64.o" //go:cgo_import_dynamic libc_fdatasync fdatasync "libc.a/shr_64.o" -//go:cgo_import_dynamic libc_fsync fsync "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getpgid getpgid "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getpgrp getpgrp "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getpid getpid "libc.a/shr_64.o" @@ -136,6 +136,7 @@ import ( //go:linkname libc_wait4 libc_wait4 //go:linkname libc_ioctl libc_ioctl //go:linkname libc_fcntl libc_fcntl +//go:linkname libc_fsync_range libc_fsync_range //go:linkname libc_acct libc_acct //go:linkname libc_chdir libc_chdir //go:linkname libc_chroot libc_chroot @@ -148,7 +149,6 @@ import ( //go:linkname libc_fchmodat libc_fchmodat //go:linkname libc_fchownat libc_fchownat //go:linkname libc_fdatasync libc_fdatasync -//go:linkname libc_fsync libc_fsync //go:linkname libc_getpgid libc_getpgid //go:linkname libc_getpgrp libc_getpgrp //go:linkname libc_getpid libc_getpid @@ -257,6 +257,7 @@ var ( libc_wait4, libc_ioctl, libc_fcntl, + libc_fsync_range, libc_acct, libc_chdir, libc_chroot, @@ -269,7 +270,6 @@ var ( libc_fchmodat, libc_fchownat, libc_fdatasync, - libc_fsync, libc_getpgid, libc_getpgrp, libc_getpid, @@ -430,6 +430,13 @@ func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callfsync_range(fd int, how int, start int64, length int64) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fsync_range)), 4, uintptr(fd), uintptr(how), uintptr(start), uintptr(length), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callacct(_p0 uintptr) (r1 uintptr, e1 Errno) { r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_acct)), 1, _p0, 0, 0, 0, 0, 0) return @@ -514,13 +521,6 @@ func callfdatasync(fd int) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callfsync(fd int) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func callgetpgid(pid int) (r1 uintptr, e1 Errno) { r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index 3c260917ed..944a714b1a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -16,6 +16,7 @@ int getdirent(int, uintptr_t, size_t); int wait4(int, uintptr_t, int, uintptr_t); int ioctl(int, int, uintptr_t); int fcntl(uintptr_t, int, uintptr_t); +int fsync_range(int, int, long long, long long); int acct(uintptr_t); int chdir(uintptr_t); int chroot(uintptr_t); @@ -28,7 +29,6 @@ int fchmod(int, unsigned int); int fchmodat(int, uintptr_t, unsigned int, int); int fchownat(int, uintptr_t, int, int, int); int fdatasync(int); -int fsync(int); int getpgid(int); int getpgrp(); int getpid(); @@ -199,6 +199,14 @@ func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callfsync_range(fd int, how int, start int64, length int64) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.fsync_range(C.int(fd), C.int(how), C.longlong(start), C.longlong(length))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callacct(_p0 uintptr) (r1 uintptr, e1 Errno) { r1 = uintptr(C.acct(C.uintptr_t(_p0))) e1 = syscall.GetErrno() @@ -295,14 +303,6 @@ func callfdatasync(fd int) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callfsync(fd int) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.fsync(C.int(fd))) - e1 = syscall.GetErrno() - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func callgetpgid(pid int) (r1 uintptr, e1 Errno) { r1 = uintptr(C.getpgid(C.int(pid))) e1 = syscall.GetErrno() diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 4f5da1f54f..93edda4c49 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,4 +1,4 @@ -// Code generated by mkmerge.go; DO NOT EDIT. +// Code generated by mkmerge; DO NOT EDIT. //go:build linux // +build linux @@ -409,6 +409,21 @@ func mount(source string, target string, fstype string, flags uintptr, data *byt // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func mountSetattr(dirfd int, pathname string, flags uint, attr *MountAttr, size uintptr) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT_SETATTR, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(unsafe.Pointer(attr)), uintptr(size), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Acct(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 4726ab30a8..51d0c0742b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -351,18 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (fd1 int, fd2 int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - fd1 = int(r0) - fd2 = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index fe71456dbc..df2efb6db3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -351,18 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (fd1 int, fd2 int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - fd1 = int(r0) - fd2 = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 0b5b2f0143..c8536c2c9f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -351,18 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (fd1 int, fd2 int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - fd1 = int(r0) - fd2 = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index bfca28648f..8b981bfc2e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -351,18 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (fd1 int, fd2 int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - fd1 = int(r0) - fd2 = int(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index aa7ce85d15..31847d2305 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -444,4 +444,5 @@ const ( SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_MEMFD_SECRET = 447 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index b830326386..3503cbbde3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -366,4 +366,5 @@ const ( SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_MEMFD_SECRET = 447 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index d75f65a0aa..5ecd24bf68 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -7,6 +7,7 @@ package unix const ( + SYS_SYSCALL_MASK = 0 SYS_RESTART_SYSCALL = 0 SYS_EXIT = 1 SYS_FORK = 2 @@ -407,4 +408,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 8b02f09e9b..7e5c94cc7f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -311,4 +311,5 @@ const ( SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_MEMFD_SECRET = 447 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 026695abb1..e1e2a2bf59 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -428,4 +428,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 4444 SYS_LANDLOCK_ADD_RULE = 4445 SYS_LANDLOCK_RESTRICT_SELF = 4446 + SYS_PROCESS_MRELEASE = 4448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 7320ba9583..7651915a3a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -358,4 +358,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 5444 SYS_LANDLOCK_ADD_RULE = 5445 SYS_LANDLOCK_RESTRICT_SELF = 5446 + SYS_PROCESS_MRELEASE = 5448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 45082dd67f..a26a2c050b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -358,4 +358,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 5444 SYS_LANDLOCK_ADD_RULE = 5445 SYS_LANDLOCK_RESTRICT_SELF = 5446 + SYS_PROCESS_MRELEASE = 5448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 570a857a56..fda9a6a991 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -428,4 +428,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 4444 SYS_LANDLOCK_ADD_RULE = 4445 SYS_LANDLOCK_RESTRICT_SELF = 4446 + SYS_PROCESS_MRELEASE = 4448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 638498d62e..e8496150d4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -435,4 +435,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 702beebfef..5ee0678a36 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -407,4 +407,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index bfc87ea444..29c0f9a39e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -407,4 +407,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a390e147d3..5c9a9a3b61 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -309,4 +309,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 3e791e6cd2..913f50f98b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -372,4 +372,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 78802a5cf7..0de03a7227 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -386,4 +386,5 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 7efe5ccba3..885842c0eb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -641,13 +641,13 @@ type Eproc struct { Tdev int32 Tpgid int32 Tsess uintptr - Wmesg [8]int8 + Wmesg [8]byte Xsize int32 Xrssize int16 Xccount int16 Xswrss int16 Flag int32 - Login [12]int8 + Login [12]byte Spare [4]int32 _ [4]byte } @@ -688,7 +688,7 @@ type ExternProc struct { P_priority uint8 P_usrpri uint8 P_nice int8 - P_comm [17]int8 + P_comm [17]byte P_pgrp uintptr P_addr uintptr P_xstat uint16 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index b23a2efe81..b23c02337d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -641,13 +641,13 @@ type Eproc struct { Tdev int32 Tpgid int32 Tsess uintptr - Wmesg [8]int8 + Wmesg [8]byte Xsize int32 Xrssize int16 Xccount int16 Xswrss int16 Flag int32 - Login [12]int8 + Login [12]byte Spare [4]int32 _ [4]byte } @@ -688,7 +688,7 @@ type ExternProc struct { P_priority uint8 P_usrpri uint8 P_nice int8 - P_comm [17]int8 + P_comm [17]byte P_pgrp uintptr P_addr uintptr P_xstat uint16 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 249ecfcd4c..f6f0d79c42 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,4 +1,4 @@ -// Code generated by mkmerge.go; DO NOT EDIT. +// Code generated by mkmerge; DO NOT EDIT. //go:build linux // +build linux @@ -743,6 +743,8 @@ const ( AT_STATX_FORCE_SYNC = 0x2000 AT_STATX_DONT_SYNC = 0x4000 + AT_RECURSIVE = 0x8000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 @@ -865,6 +867,7 @@ const ( CTRL_CMD_NEWMCAST_GRP = 0x7 CTRL_CMD_DELMCAST_GRP = 0x8 CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_CMD_GETPOLICY = 0xa CTRL_ATTR_UNSPEC = 0x0 CTRL_ATTR_FAMILY_ID = 0x1 CTRL_ATTR_FAMILY_NAME = 0x2 @@ -873,12 +876,19 @@ const ( CTRL_ATTR_MAXATTR = 0x5 CTRL_ATTR_OPS = 0x6 CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_POLICY = 0x8 + CTRL_ATTR_OP_POLICY = 0x9 + CTRL_ATTR_OP = 0xa CTRL_ATTR_OP_UNSPEC = 0x0 CTRL_ATTR_OP_ID = 0x1 CTRL_ATTR_OP_FLAGS = 0x2 CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 + CTRL_ATTR_POLICY_UNSPEC = 0x0 + CTRL_ATTR_POLICY_DO = 0x1 + CTRL_ATTR_POLICY_DUMP = 0x2 + CTRL_ATTR_POLICY_DUMP_MAX = 0x2 ) const ( @@ -3264,7 +3274,8 @@ const ( LWTUNNEL_ENCAP_BPF = 0x6 LWTUNNEL_ENCAP_SEG6_LOCAL = 0x7 LWTUNNEL_ENCAP_RPL = 0x8 - LWTUNNEL_ENCAP_MAX = 0x8 + LWTUNNEL_ENCAP_IOAM6 = 0x9 + LWTUNNEL_ENCAP_MAX = 0x9 MPLS_IPTUNNEL_UNSPEC = 0x0 MPLS_IPTUNNEL_DST = 0x1 @@ -3617,7 +3628,9 @@ const ( ETHTOOL_A_COALESCE_TX_USECS_HIGH = 0x15 ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH = 0x16 ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 - ETHTOOL_A_COALESCE_MAX = 0x17 + ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 + ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 + ETHTOOL_A_COALESCE_MAX = 0x19 ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -3956,3 +3969,77 @@ const ( SHM_RDONLY = 0x1000 SHM_RND = 0x2000 ) + +type MountAttr struct { + Attr_set uint64 + Attr_clr uint64 + Propagation uint64 + Userns_fd uint64 +} + +const ( + WG_CMD_GET_DEVICE = 0x0 + WG_CMD_SET_DEVICE = 0x1 + WGDEVICE_F_REPLACE_PEERS = 0x1 + WGDEVICE_A_UNSPEC = 0x0 + WGDEVICE_A_IFINDEX = 0x1 + WGDEVICE_A_IFNAME = 0x2 + WGDEVICE_A_PRIVATE_KEY = 0x3 + WGDEVICE_A_PUBLIC_KEY = 0x4 + WGDEVICE_A_FLAGS = 0x5 + WGDEVICE_A_LISTEN_PORT = 0x6 + WGDEVICE_A_FWMARK = 0x7 + WGDEVICE_A_PEERS = 0x8 + WGPEER_F_REMOVE_ME = 0x1 + WGPEER_F_REPLACE_ALLOWEDIPS = 0x2 + WGPEER_F_UPDATE_ONLY = 0x4 + WGPEER_A_UNSPEC = 0x0 + WGPEER_A_PUBLIC_KEY = 0x1 + WGPEER_A_PRESHARED_KEY = 0x2 + WGPEER_A_FLAGS = 0x3 + WGPEER_A_ENDPOINT = 0x4 + WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL = 0x5 + WGPEER_A_LAST_HANDSHAKE_TIME = 0x6 + WGPEER_A_RX_BYTES = 0x7 + WGPEER_A_TX_BYTES = 0x8 + WGPEER_A_ALLOWEDIPS = 0x9 + WGPEER_A_PROTOCOL_VERSION = 0xa + WGALLOWEDIP_A_UNSPEC = 0x0 + WGALLOWEDIP_A_FAMILY = 0x1 + WGALLOWEDIP_A_IPADDR = 0x2 + WGALLOWEDIP_A_CIDR_MASK = 0x3 +) + +const ( + NL_ATTR_TYPE_INVALID = 0x0 + NL_ATTR_TYPE_FLAG = 0x1 + NL_ATTR_TYPE_U8 = 0x2 + NL_ATTR_TYPE_U16 = 0x3 + NL_ATTR_TYPE_U32 = 0x4 + NL_ATTR_TYPE_U64 = 0x5 + NL_ATTR_TYPE_S8 = 0x6 + NL_ATTR_TYPE_S16 = 0x7 + NL_ATTR_TYPE_S32 = 0x8 + NL_ATTR_TYPE_S64 = 0x9 + NL_ATTR_TYPE_BINARY = 0xa + NL_ATTR_TYPE_STRING = 0xb + NL_ATTR_TYPE_NUL_STRING = 0xc + NL_ATTR_TYPE_NESTED = 0xd + NL_ATTR_TYPE_NESTED_ARRAY = 0xe + NL_ATTR_TYPE_BITFIELD32 = 0xf + + NL_POLICY_TYPE_ATTR_UNSPEC = 0x0 + NL_POLICY_TYPE_ATTR_TYPE = 0x1 + NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 0x2 + NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 0x3 + NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 0x4 + NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 0x5 + NL_POLICY_TYPE_ATTR_MIN_LENGTH = 0x6 + NL_POLICY_TYPE_ATTR_MAX_LENGTH = 0x7 + NL_POLICY_TYPE_ATTR_POLICY_IDX = 0x8 + NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 0x9 + NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 0xa + NL_POLICY_TYPE_ATTR_PAD = 0xb + NL_POLICY_TYPE_ATTR_MASK = 0xc + NL_POLICY_TYPE_ATTR_MAX = 0xc +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index eeeb9aa39a..bea2549455 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index d30e1155cc..b8c8f28943 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 69d0297520..4db4430163 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 28a0455bc9..3ebcad8a88 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 64a845483d..3eb33e48ab 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index a1b7dee412..79a9446725 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 936fa6a266..8f4b107cad 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 5dd546fbf0..e4eb217981 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 947b32e434..d5b21f0f7d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 2a606151b0..5188d142b9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index d0d735d02c..de4dd4c736 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 95e3d6d06f..dccbf9b060 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index cccf1ef26a..6358806106 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 44fcbe4e9a..765edc13ff 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 2a8b1e6f73..baf5fe6504 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -564,12 +564,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index b1759cf705..e21ae8ecfa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -564,12 +564,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index e807de2065..f190651cd9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -565,12 +565,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index ff3aecaee4..84747c582c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -558,12 +558,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 9ecda69174..ac5c8b6370 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -558,12 +558,11 @@ type Uvmexp struct { Kmapent int32 } -const SizeofClockinfo = 0x14 +const SizeofClockinfo = 0x10 type Clockinfo struct { - Hz int32 - Tick int32 - Tickadj int32 - Stathz int32 - Profhz int32 + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 } diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index 7a11e83b7e..855698bb28 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -9,8 +9,6 @@ package windows import ( errorspkg "errors" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) // EscapeArg rewrites command line argument s as prescribed @@ -147,8 +145,12 @@ func NewProcThreadAttributeList(maxAttrCount uint32) (*ProcThreadAttributeListCo } return nil, err } + alloc, err := LocalAlloc(LMEM_FIXED, uint32(size)) + if err != nil { + return nil, err + } // size is guaranteed to be ≥1 by InitializeProcThreadAttributeList. - al := &ProcThreadAttributeListContainer{data: (*ProcThreadAttributeList)(unsafe.Pointer(&make([]byte, size)[0]))} + al := &ProcThreadAttributeListContainer{data: (*ProcThreadAttributeList)(unsafe.Pointer(alloc))} err = initializeProcThreadAttributeList(al.data, maxAttrCount, 0, &size) if err != nil { return nil, err @@ -157,36 +159,17 @@ func NewProcThreadAttributeList(maxAttrCount uint32) (*ProcThreadAttributeListCo } // Update modifies the ProcThreadAttributeList using UpdateProcThreadAttribute. -// Note that the value passed to this function will be copied into memory -// allocated by LocalAlloc, the contents of which should not contain any -// Go-managed pointers, even if the passed value itself is a Go-managed -// pointer. func (al *ProcThreadAttributeListContainer) Update(attribute uintptr, value unsafe.Pointer, size uintptr) error { - alloc, err := LocalAlloc(LMEM_FIXED, uint32(size)) - if err != nil { - return err - } - var src, dst []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&src)) - hdr.Data = value - hdr.Cap = int(size) - hdr.Len = int(size) - hdr = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) - hdr.Data = unsafe.Pointer(alloc) - hdr.Cap = int(size) - hdr.Len = int(size) - copy(dst, src) - al.heapAllocations = append(al.heapAllocations, alloc) - return updateProcThreadAttribute(al.data, 0, attribute, unsafe.Pointer(alloc), size, nil, nil) + al.pointers = append(al.pointers, value) + return updateProcThreadAttribute(al.data, 0, attribute, value, size, nil, nil) } // Delete frees ProcThreadAttributeList's resources. func (al *ProcThreadAttributeListContainer) Delete() { deleteProcThreadAttributeList(al.data) - for i := range al.heapAllocations { - LocalFree(Handle(al.heapAllocations[i])) - } - al.heapAllocations = nil + LocalFree(Handle(unsafe.Pointer(al.data))) + al.data = nil + al.pointers = nil } // List returns the actual ProcThreadAttributeList to be passed to StartupInfoEx. diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go index 6102910989..8563f79c57 100644 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -7,4 +7,4 @@ package windows -//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go setupapi_windows.go diff --git a/vendor/golang.org/x/sys/windows/setupapi_windows.go b/vendor/golang.org/x/sys/windows/setupapi_windows.go new file mode 100644 index 0000000000..14027da3f3 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/setupapi_windows.go @@ -0,0 +1,1425 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "encoding/binary" + "errors" + "fmt" + "runtime" + "strings" + "syscall" + "unsafe" +) + +// This file contains functions that wrap SetupAPI.dll and CfgMgr32.dll, +// core system functions for managing hardware devices, drivers, and the PnP tree. +// Information about these APIs can be found at: +// https://docs.microsoft.com/en-us/windows-hardware/drivers/install/setupapi +// https://docs.microsoft.com/en-us/windows/win32/devinst/cfgmgr32- + +const ( + ERROR_EXPECTED_SECTION_NAME Errno = 0x20000000 | 0xC0000000 | 0 + ERROR_BAD_SECTION_NAME_LINE Errno = 0x20000000 | 0xC0000000 | 1 + ERROR_SECTION_NAME_TOO_LONG Errno = 0x20000000 | 0xC0000000 | 2 + ERROR_GENERAL_SYNTAX Errno = 0x20000000 | 0xC0000000 | 3 + ERROR_WRONG_INF_STYLE Errno = 0x20000000 | 0xC0000000 | 0x100 + ERROR_SECTION_NOT_FOUND Errno = 0x20000000 | 0xC0000000 | 0x101 + ERROR_LINE_NOT_FOUND Errno = 0x20000000 | 0xC0000000 | 0x102 + ERROR_NO_BACKUP Errno = 0x20000000 | 0xC0000000 | 0x103 + ERROR_NO_ASSOCIATED_CLASS Errno = 0x20000000 | 0xC0000000 | 0x200 + ERROR_CLASS_MISMATCH Errno = 0x20000000 | 0xC0000000 | 0x201 + ERROR_DUPLICATE_FOUND Errno = 0x20000000 | 0xC0000000 | 0x202 + ERROR_NO_DRIVER_SELECTED Errno = 0x20000000 | 0xC0000000 | 0x203 + ERROR_KEY_DOES_NOT_EXIST Errno = 0x20000000 | 0xC0000000 | 0x204 + ERROR_INVALID_DEVINST_NAME Errno = 0x20000000 | 0xC0000000 | 0x205 + ERROR_INVALID_CLASS Errno = 0x20000000 | 0xC0000000 | 0x206 + ERROR_DEVINST_ALREADY_EXISTS Errno = 0x20000000 | 0xC0000000 | 0x207 + ERROR_DEVINFO_NOT_REGISTERED Errno = 0x20000000 | 0xC0000000 | 0x208 + ERROR_INVALID_REG_PROPERTY Errno = 0x20000000 | 0xC0000000 | 0x209 + ERROR_NO_INF Errno = 0x20000000 | 0xC0000000 | 0x20A + ERROR_NO_SUCH_DEVINST Errno = 0x20000000 | 0xC0000000 | 0x20B + ERROR_CANT_LOAD_CLASS_ICON Errno = 0x20000000 | 0xC0000000 | 0x20C + ERROR_INVALID_CLASS_INSTALLER Errno = 0x20000000 | 0xC0000000 | 0x20D + ERROR_DI_DO_DEFAULT Errno = 0x20000000 | 0xC0000000 | 0x20E + ERROR_DI_NOFILECOPY Errno = 0x20000000 | 0xC0000000 | 0x20F + ERROR_INVALID_HWPROFILE Errno = 0x20000000 | 0xC0000000 | 0x210 + ERROR_NO_DEVICE_SELECTED Errno = 0x20000000 | 0xC0000000 | 0x211 + ERROR_DEVINFO_LIST_LOCKED Errno = 0x20000000 | 0xC0000000 | 0x212 + ERROR_DEVINFO_DATA_LOCKED Errno = 0x20000000 | 0xC0000000 | 0x213 + ERROR_DI_BAD_PATH Errno = 0x20000000 | 0xC0000000 | 0x214 + ERROR_NO_CLASSINSTALL_PARAMS Errno = 0x20000000 | 0xC0000000 | 0x215 + ERROR_FILEQUEUE_LOCKED Errno = 0x20000000 | 0xC0000000 | 0x216 + ERROR_BAD_SERVICE_INSTALLSECT Errno = 0x20000000 | 0xC0000000 | 0x217 + ERROR_NO_CLASS_DRIVER_LIST Errno = 0x20000000 | 0xC0000000 | 0x218 + ERROR_NO_ASSOCIATED_SERVICE Errno = 0x20000000 | 0xC0000000 | 0x219 + ERROR_NO_DEFAULT_DEVICE_INTERFACE Errno = 0x20000000 | 0xC0000000 | 0x21A + ERROR_DEVICE_INTERFACE_ACTIVE Errno = 0x20000000 | 0xC0000000 | 0x21B + ERROR_DEVICE_INTERFACE_REMOVED Errno = 0x20000000 | 0xC0000000 | 0x21C + ERROR_BAD_INTERFACE_INSTALLSECT Errno = 0x20000000 | 0xC0000000 | 0x21D + ERROR_NO_SUCH_INTERFACE_CLASS Errno = 0x20000000 | 0xC0000000 | 0x21E + ERROR_INVALID_REFERENCE_STRING Errno = 0x20000000 | 0xC0000000 | 0x21F + ERROR_INVALID_MACHINENAME Errno = 0x20000000 | 0xC0000000 | 0x220 + ERROR_REMOTE_COMM_FAILURE Errno = 0x20000000 | 0xC0000000 | 0x221 + ERROR_MACHINE_UNAVAILABLE Errno = 0x20000000 | 0xC0000000 | 0x222 + ERROR_NO_CONFIGMGR_SERVICES Errno = 0x20000000 | 0xC0000000 | 0x223 + ERROR_INVALID_PROPPAGE_PROVIDER Errno = 0x20000000 | 0xC0000000 | 0x224 + ERROR_NO_SUCH_DEVICE_INTERFACE Errno = 0x20000000 | 0xC0000000 | 0x225 + ERROR_DI_POSTPROCESSING_REQUIRED Errno = 0x20000000 | 0xC0000000 | 0x226 + ERROR_INVALID_COINSTALLER Errno = 0x20000000 | 0xC0000000 | 0x227 + ERROR_NO_COMPAT_DRIVERS Errno = 0x20000000 | 0xC0000000 | 0x228 + ERROR_NO_DEVICE_ICON Errno = 0x20000000 | 0xC0000000 | 0x229 + ERROR_INVALID_INF_LOGCONFIG Errno = 0x20000000 | 0xC0000000 | 0x22A + ERROR_DI_DONT_INSTALL Errno = 0x20000000 | 0xC0000000 | 0x22B + ERROR_INVALID_FILTER_DRIVER Errno = 0x20000000 | 0xC0000000 | 0x22C + ERROR_NON_WINDOWS_NT_DRIVER Errno = 0x20000000 | 0xC0000000 | 0x22D + ERROR_NON_WINDOWS_DRIVER Errno = 0x20000000 | 0xC0000000 | 0x22E + ERROR_NO_CATALOG_FOR_OEM_INF Errno = 0x20000000 | 0xC0000000 | 0x22F + ERROR_DEVINSTALL_QUEUE_NONNATIVE Errno = 0x20000000 | 0xC0000000 | 0x230 + ERROR_NOT_DISABLEABLE Errno = 0x20000000 | 0xC0000000 | 0x231 + ERROR_CANT_REMOVE_DEVINST Errno = 0x20000000 | 0xC0000000 | 0x232 + ERROR_INVALID_TARGET Errno = 0x20000000 | 0xC0000000 | 0x233 + ERROR_DRIVER_NONNATIVE Errno = 0x20000000 | 0xC0000000 | 0x234 + ERROR_IN_WOW64 Errno = 0x20000000 | 0xC0000000 | 0x235 + ERROR_SET_SYSTEM_RESTORE_POINT Errno = 0x20000000 | 0xC0000000 | 0x236 + ERROR_SCE_DISABLED Errno = 0x20000000 | 0xC0000000 | 0x238 + ERROR_UNKNOWN_EXCEPTION Errno = 0x20000000 | 0xC0000000 | 0x239 + ERROR_PNP_REGISTRY_ERROR Errno = 0x20000000 | 0xC0000000 | 0x23A + ERROR_REMOTE_REQUEST_UNSUPPORTED Errno = 0x20000000 | 0xC0000000 | 0x23B + ERROR_NOT_AN_INSTALLED_OEM_INF Errno = 0x20000000 | 0xC0000000 | 0x23C + ERROR_INF_IN_USE_BY_DEVICES Errno = 0x20000000 | 0xC0000000 | 0x23D + ERROR_DI_FUNCTION_OBSOLETE Errno = 0x20000000 | 0xC0000000 | 0x23E + ERROR_NO_AUTHENTICODE_CATALOG Errno = 0x20000000 | 0xC0000000 | 0x23F + ERROR_AUTHENTICODE_DISALLOWED Errno = 0x20000000 | 0xC0000000 | 0x240 + ERROR_AUTHENTICODE_TRUSTED_PUBLISHER Errno = 0x20000000 | 0xC0000000 | 0x241 + ERROR_AUTHENTICODE_TRUST_NOT_ESTABLISHED Errno = 0x20000000 | 0xC0000000 | 0x242 + ERROR_AUTHENTICODE_PUBLISHER_NOT_TRUSTED Errno = 0x20000000 | 0xC0000000 | 0x243 + ERROR_SIGNATURE_OSATTRIBUTE_MISMATCH Errno = 0x20000000 | 0xC0000000 | 0x244 + ERROR_ONLY_VALIDATE_VIA_AUTHENTICODE Errno = 0x20000000 | 0xC0000000 | 0x245 + ERROR_DEVICE_INSTALLER_NOT_READY Errno = 0x20000000 | 0xC0000000 | 0x246 + ERROR_DRIVER_STORE_ADD_FAILED Errno = 0x20000000 | 0xC0000000 | 0x247 + ERROR_DEVICE_INSTALL_BLOCKED Errno = 0x20000000 | 0xC0000000 | 0x248 + ERROR_DRIVER_INSTALL_BLOCKED Errno = 0x20000000 | 0xC0000000 | 0x249 + ERROR_WRONG_INF_TYPE Errno = 0x20000000 | 0xC0000000 | 0x24A + ERROR_FILE_HASH_NOT_IN_CATALOG Errno = 0x20000000 | 0xC0000000 | 0x24B + ERROR_DRIVER_STORE_DELETE_FAILED Errno = 0x20000000 | 0xC0000000 | 0x24C + ERROR_UNRECOVERABLE_STACK_OVERFLOW Errno = 0x20000000 | 0xC0000000 | 0x300 + EXCEPTION_SPAPI_UNRECOVERABLE_STACK_OVERFLOW Errno = ERROR_UNRECOVERABLE_STACK_OVERFLOW + ERROR_NO_DEFAULT_INTERFACE_DEVICE Errno = ERROR_NO_DEFAULT_DEVICE_INTERFACE + ERROR_INTERFACE_DEVICE_ACTIVE Errno = ERROR_DEVICE_INTERFACE_ACTIVE + ERROR_INTERFACE_DEVICE_REMOVED Errno = ERROR_DEVICE_INTERFACE_REMOVED + ERROR_NO_SUCH_INTERFACE_DEVICE Errno = ERROR_NO_SUCH_DEVICE_INTERFACE +) + +const ( + MAX_DEVICE_ID_LEN = 200 + MAX_DEVNODE_ID_LEN = MAX_DEVICE_ID_LEN + MAX_GUID_STRING_LEN = 39 // 38 chars + terminator null + MAX_CLASS_NAME_LEN = 32 + MAX_PROFILE_LEN = 80 + MAX_CONFIG_VALUE = 9999 + MAX_INSTANCE_VALUE = 9999 + CONFIGMG_VERSION = 0x0400 +) + +// Maximum string length constants +const ( + LINE_LEN = 256 // Windows 9x-compatible maximum for displayable strings coming from a device INF. + MAX_INF_STRING_LENGTH = 4096 // Actual maximum size of an INF string (including string substitutions). + MAX_INF_SECTION_NAME_LENGTH = 255 // For Windows 9x compatibility, INF section names should be constrained to 32 characters. + MAX_TITLE_LEN = 60 + MAX_INSTRUCTION_LEN = 256 + MAX_LABEL_LEN = 30 + MAX_SERVICE_NAME_LEN = 256 + MAX_SUBTITLE_LEN = 256 +) + +const ( + // SP_MAX_MACHINENAME_LENGTH defines maximum length of a machine name in the format expected by ConfigMgr32 CM_Connect_Machine (i.e., "\\\\MachineName\0"). + SP_MAX_MACHINENAME_LENGTH = MAX_PATH + 3 +) + +// HSPFILEQ is type for setup file queue +type HSPFILEQ uintptr + +// DevInfo holds reference to device information set +type DevInfo Handle + +// DEVINST is a handle usually recognized by cfgmgr32 APIs +type DEVINST uint32 + +// DevInfoData is a device information structure (references a device instance that is a member of a device information set) +type DevInfoData struct { + size uint32 + ClassGUID GUID + DevInst DEVINST + _ uintptr +} + +// DevInfoListDetailData is a structure for detailed information on a device information set (used for SetupDiGetDeviceInfoListDetail which supersedes the functionality of SetupDiGetDeviceInfoListClass). +type DevInfoListDetailData struct { + size uint32 // Use unsafeSizeOf method + ClassGUID GUID + RemoteMachineHandle Handle + remoteMachineName [SP_MAX_MACHINENAME_LENGTH]uint16 +} + +func (*DevInfoListDetailData) unsafeSizeOf() uint32 { + if unsafe.Sizeof(uintptr(0)) == 4 { + // Windows declares this with pshpack1.h + return uint32(unsafe.Offsetof(DevInfoListDetailData{}.remoteMachineName) + unsafe.Sizeof(DevInfoListDetailData{}.remoteMachineName)) + } + return uint32(unsafe.Sizeof(DevInfoListDetailData{})) +} + +func (data *DevInfoListDetailData) RemoteMachineName() string { + return UTF16ToString(data.remoteMachineName[:]) +} + +func (data *DevInfoListDetailData) SetRemoteMachineName(remoteMachineName string) error { + str, err := UTF16FromString(remoteMachineName) + if err != nil { + return err + } + copy(data.remoteMachineName[:], str) + return nil +} + +// DI_FUNCTION is function type for device installer +type DI_FUNCTION uint32 + +const ( + DIF_SELECTDEVICE DI_FUNCTION = 0x00000001 + DIF_INSTALLDEVICE DI_FUNCTION = 0x00000002 + DIF_ASSIGNRESOURCES DI_FUNCTION = 0x00000003 + DIF_PROPERTIES DI_FUNCTION = 0x00000004 + DIF_REMOVE DI_FUNCTION = 0x00000005 + DIF_FIRSTTIMESETUP DI_FUNCTION = 0x00000006 + DIF_FOUNDDEVICE DI_FUNCTION = 0x00000007 + DIF_SELECTCLASSDRIVERS DI_FUNCTION = 0x00000008 + DIF_VALIDATECLASSDRIVERS DI_FUNCTION = 0x00000009 + DIF_INSTALLCLASSDRIVERS DI_FUNCTION = 0x0000000A + DIF_CALCDISKSPACE DI_FUNCTION = 0x0000000B + DIF_DESTROYPRIVATEDATA DI_FUNCTION = 0x0000000C + DIF_VALIDATEDRIVER DI_FUNCTION = 0x0000000D + DIF_DETECT DI_FUNCTION = 0x0000000F + DIF_INSTALLWIZARD DI_FUNCTION = 0x00000010 + DIF_DESTROYWIZARDDATA DI_FUNCTION = 0x00000011 + DIF_PROPERTYCHANGE DI_FUNCTION = 0x00000012 + DIF_ENABLECLASS DI_FUNCTION = 0x00000013 + DIF_DETECTVERIFY DI_FUNCTION = 0x00000014 + DIF_INSTALLDEVICEFILES DI_FUNCTION = 0x00000015 + DIF_UNREMOVE DI_FUNCTION = 0x00000016 + DIF_SELECTBESTCOMPATDRV DI_FUNCTION = 0x00000017 + DIF_ALLOW_INSTALL DI_FUNCTION = 0x00000018 + DIF_REGISTERDEVICE DI_FUNCTION = 0x00000019 + DIF_NEWDEVICEWIZARD_PRESELECT DI_FUNCTION = 0x0000001A + DIF_NEWDEVICEWIZARD_SELECT DI_FUNCTION = 0x0000001B + DIF_NEWDEVICEWIZARD_PREANALYZE DI_FUNCTION = 0x0000001C + DIF_NEWDEVICEWIZARD_POSTANALYZE DI_FUNCTION = 0x0000001D + DIF_NEWDEVICEWIZARD_FINISHINSTALL DI_FUNCTION = 0x0000001E + DIF_INSTALLINTERFACES DI_FUNCTION = 0x00000020 + DIF_DETECTCANCEL DI_FUNCTION = 0x00000021 + DIF_REGISTER_COINSTALLERS DI_FUNCTION = 0x00000022 + DIF_ADDPROPERTYPAGE_ADVANCED DI_FUNCTION = 0x00000023 + DIF_ADDPROPERTYPAGE_BASIC DI_FUNCTION = 0x00000024 + DIF_TROUBLESHOOTER DI_FUNCTION = 0x00000026 + DIF_POWERMESSAGEWAKE DI_FUNCTION = 0x00000027 + DIF_ADDREMOTEPROPERTYPAGE_ADVANCED DI_FUNCTION = 0x00000028 + DIF_UPDATEDRIVER_UI DI_FUNCTION = 0x00000029 + DIF_FINISHINSTALL_ACTION DI_FUNCTION = 0x0000002A +) + +// DevInstallParams is device installation parameters structure (associated with a particular device information element, or globally with a device information set) +type DevInstallParams struct { + size uint32 + Flags DI_FLAGS + FlagsEx DI_FLAGSEX + hwndParent uintptr + InstallMsgHandler uintptr + InstallMsgHandlerContext uintptr + FileQueue HSPFILEQ + _ uintptr + _ uint32 + driverPath [MAX_PATH]uint16 +} + +func (params *DevInstallParams) DriverPath() string { + return UTF16ToString(params.driverPath[:]) +} + +func (params *DevInstallParams) SetDriverPath(driverPath string) error { + str, err := UTF16FromString(driverPath) + if err != nil { + return err + } + copy(params.driverPath[:], str) + return nil +} + +// DI_FLAGS is SP_DEVINSTALL_PARAMS.Flags values +type DI_FLAGS uint32 + +const ( + // Flags for choosing a device + DI_SHOWOEM DI_FLAGS = 0x00000001 // support Other... button + DI_SHOWCOMPAT DI_FLAGS = 0x00000002 // show compatibility list + DI_SHOWCLASS DI_FLAGS = 0x00000004 // show class list + DI_SHOWALL DI_FLAGS = 0x00000007 // both class & compat list shown + DI_NOVCP DI_FLAGS = 0x00000008 // don't create a new copy queue--use caller-supplied FileQueue + DI_DIDCOMPAT DI_FLAGS = 0x00000010 // Searched for compatible devices + DI_DIDCLASS DI_FLAGS = 0x00000020 // Searched for class devices + DI_AUTOASSIGNRES DI_FLAGS = 0x00000040 // No UI for resources if possible + + // Flags returned by DiInstallDevice to indicate need to reboot/restart + DI_NEEDRESTART DI_FLAGS = 0x00000080 // Reboot required to take effect + DI_NEEDREBOOT DI_FLAGS = 0x00000100 // "" + + // Flags for device installation + DI_NOBROWSE DI_FLAGS = 0x00000200 // no Browse... in InsertDisk + + // Flags set by DiBuildDriverInfoList + DI_MULTMFGS DI_FLAGS = 0x00000400 // Set if multiple manufacturers in class driver list + + // Flag indicates that device is disabled + DI_DISABLED DI_FLAGS = 0x00000800 // Set if device disabled + + // Flags for Device/Class Properties + DI_GENERALPAGE_ADDED DI_FLAGS = 0x00001000 + DI_RESOURCEPAGE_ADDED DI_FLAGS = 0x00002000 + + // Flag to indicate the setting properties for this Device (or class) caused a change so the Dev Mgr UI probably needs to be updated. + DI_PROPERTIES_CHANGE DI_FLAGS = 0x00004000 + + // Flag to indicate that the sorting from the INF file should be used. + DI_INF_IS_SORTED DI_FLAGS = 0x00008000 + + // Flag to indicate that only the the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched. + DI_ENUMSINGLEINF DI_FLAGS = 0x00010000 + + // Flag that prevents ConfigMgr from removing/re-enumerating devices during device + // registration, installation, and deletion. + DI_DONOTCALLCONFIGMG DI_FLAGS = 0x00020000 + + // The following flag can be used to install a device disabled + DI_INSTALLDISABLED DI_FLAGS = 0x00040000 + + // Flag that causes SetupDiBuildDriverInfoList to build a device's compatible driver + // list from its existing class driver list, instead of the normal INF search. + DI_COMPAT_FROM_CLASS DI_FLAGS = 0x00080000 + + // This flag is set if the Class Install params should be used. + DI_CLASSINSTALLPARAMS DI_FLAGS = 0x00100000 + + // This flag is set if the caller of DiCallClassInstaller does NOT want the internal default action performed if the Class installer returns ERROR_DI_DO_DEFAULT. + DI_NODI_DEFAULTACTION DI_FLAGS = 0x00200000 + + // Flags for device installation + DI_QUIETINSTALL DI_FLAGS = 0x00800000 // don't confuse the user with questions or excess info + DI_NOFILECOPY DI_FLAGS = 0x01000000 // No file Copy necessary + DI_FORCECOPY DI_FLAGS = 0x02000000 // Force files to be copied from install path + DI_DRIVERPAGE_ADDED DI_FLAGS = 0x04000000 // Prop provider added Driver page. + DI_USECI_SELECTSTRINGS DI_FLAGS = 0x08000000 // Use Class Installer Provided strings in the Select Device Dlg + DI_OVERRIDE_INFFLAGS DI_FLAGS = 0x10000000 // Override INF flags + DI_PROPS_NOCHANGEUSAGE DI_FLAGS = 0x20000000 // No Enable/Disable in General Props + + DI_NOSELECTICONS DI_FLAGS = 0x40000000 // No small icons in select device dialogs + + DI_NOWRITE_IDS DI_FLAGS = 0x80000000 // Don't write HW & Compat IDs on install +) + +// DI_FLAGSEX is SP_DEVINSTALL_PARAMS.FlagsEx values +type DI_FLAGSEX uint32 + +const ( + DI_FLAGSEX_CI_FAILED DI_FLAGSEX = 0x00000004 // Failed to Load/Call class installer + DI_FLAGSEX_FINISHINSTALL_ACTION DI_FLAGSEX = 0x00000008 // Class/co-installer wants to get a DIF_FINISH_INSTALL action in client context. + DI_FLAGSEX_DIDINFOLIST DI_FLAGSEX = 0x00000010 // Did the Class Info List + DI_FLAGSEX_DIDCOMPATINFO DI_FLAGSEX = 0x00000020 // Did the Compat Info List + DI_FLAGSEX_FILTERCLASSES DI_FLAGSEX = 0x00000040 + DI_FLAGSEX_SETFAILEDINSTALL DI_FLAGSEX = 0x00000080 + DI_FLAGSEX_DEVICECHANGE DI_FLAGSEX = 0x00000100 + DI_FLAGSEX_ALWAYSWRITEIDS DI_FLAGSEX = 0x00000200 + DI_FLAGSEX_PROPCHANGE_PENDING DI_FLAGSEX = 0x00000400 // One or more device property sheets have had changes made to them, and need to have a DIF_PROPERTYCHANGE occur. + DI_FLAGSEX_ALLOWEXCLUDEDDRVS DI_FLAGSEX = 0x00000800 + DI_FLAGSEX_NOUIONQUERYREMOVE DI_FLAGSEX = 0x00001000 + DI_FLAGSEX_USECLASSFORCOMPAT DI_FLAGSEX = 0x00002000 // Use the device's class when building compat drv list. (Ignored if DI_COMPAT_FROM_CLASS flag is specified.) + DI_FLAGSEX_NO_DRVREG_MODIFY DI_FLAGSEX = 0x00008000 // Don't run AddReg and DelReg for device's software (driver) key. + DI_FLAGSEX_IN_SYSTEM_SETUP DI_FLAGSEX = 0x00010000 // Installation is occurring during initial system setup. + DI_FLAGSEX_INET_DRIVER DI_FLAGSEX = 0x00020000 // Driver came from Windows Update + DI_FLAGSEX_APPENDDRIVERLIST DI_FLAGSEX = 0x00040000 // Cause SetupDiBuildDriverInfoList to append a new driver list to an existing list. + DI_FLAGSEX_PREINSTALLBACKUP DI_FLAGSEX = 0x00080000 // not used + DI_FLAGSEX_BACKUPONREPLACE DI_FLAGSEX = 0x00100000 // not used + DI_FLAGSEX_DRIVERLIST_FROM_URL DI_FLAGSEX = 0x00200000 // build driver list from INF(s) retrieved from URL specified in SP_DEVINSTALL_PARAMS.DriverPath (empty string means Windows Update website) + DI_FLAGSEX_EXCLUDE_OLD_INET_DRIVERS DI_FLAGSEX = 0x00800000 // Don't include old Internet drivers when building a driver list. Ignored on Windows Vista and later. + DI_FLAGSEX_POWERPAGE_ADDED DI_FLAGSEX = 0x01000000 // class installer added their own power page + DI_FLAGSEX_FILTERSIMILARDRIVERS DI_FLAGSEX = 0x02000000 // only include similar drivers in class list + DI_FLAGSEX_INSTALLEDDRIVER DI_FLAGSEX = 0x04000000 // only add the installed driver to the class or compat driver list. Used in calls to SetupDiBuildDriverInfoList + DI_FLAGSEX_NO_CLASSLIST_NODE_MERGE DI_FLAGSEX = 0x08000000 // Don't remove identical driver nodes from the class list + DI_FLAGSEX_ALTPLATFORM_DRVSEARCH DI_FLAGSEX = 0x10000000 // Build driver list based on alternate platform information specified in associated file queue + DI_FLAGSEX_RESTART_DEVICE_ONLY DI_FLAGSEX = 0x20000000 // only restart the device drivers are being installed on as opposed to restarting all devices using those drivers. + DI_FLAGSEX_RECURSIVESEARCH DI_FLAGSEX = 0x40000000 // Tell SetupDiBuildDriverInfoList to do a recursive search + DI_FLAGSEX_SEARCH_PUBLISHED_INFS DI_FLAGSEX = 0x80000000 // Tell SetupDiBuildDriverInfoList to do a "published INF" search +) + +// ClassInstallHeader is the first member of any class install parameters structure. It contains the device installation request code that defines the format of the rest of the install parameters structure. +type ClassInstallHeader struct { + size uint32 + InstallFunction DI_FUNCTION +} + +func MakeClassInstallHeader(installFunction DI_FUNCTION) *ClassInstallHeader { + hdr := &ClassInstallHeader{InstallFunction: installFunction} + hdr.size = uint32(unsafe.Sizeof(*hdr)) + return hdr +} + +// DICS_STATE specifies values indicating a change in a device's state +type DICS_STATE uint32 + +const ( + DICS_ENABLE DICS_STATE = 0x00000001 // The device is being enabled. + DICS_DISABLE DICS_STATE = 0x00000002 // The device is being disabled. + DICS_PROPCHANGE DICS_STATE = 0x00000003 // The properties of the device have changed. + DICS_START DICS_STATE = 0x00000004 // The device is being started (if the request is for the currently active hardware profile). + DICS_STOP DICS_STATE = 0x00000005 // The device is being stopped. The driver stack will be unloaded and the CSCONFIGFLAG_DO_NOT_START flag will be set for the device. +) + +// DICS_FLAG specifies the scope of a device property change +type DICS_FLAG uint32 + +const ( + DICS_FLAG_GLOBAL DICS_FLAG = 0x00000001 // make change in all hardware profiles + DICS_FLAG_CONFIGSPECIFIC DICS_FLAG = 0x00000002 // make change in specified profile only + DICS_FLAG_CONFIGGENERAL DICS_FLAG = 0x00000004 // 1 or more hardware profile-specific changes to follow (obsolete) +) + +// PropChangeParams is a structure corresponding to a DIF_PROPERTYCHANGE install function. +type PropChangeParams struct { + ClassInstallHeader ClassInstallHeader + StateChange DICS_STATE + Scope DICS_FLAG + HwProfile uint32 +} + +// DI_REMOVEDEVICE specifies the scope of the device removal +type DI_REMOVEDEVICE uint32 + +const ( + DI_REMOVEDEVICE_GLOBAL DI_REMOVEDEVICE = 0x00000001 // Make this change in all hardware profiles. Remove information about the device from the registry. + DI_REMOVEDEVICE_CONFIGSPECIFIC DI_REMOVEDEVICE = 0x00000002 // Make this change to only the hardware profile specified by HwProfile. this flag only applies to root-enumerated devices. When Windows removes the device from the last hardware profile in which it was configured, Windows performs a global removal. +) + +// RemoveDeviceParams is a structure corresponding to a DIF_REMOVE install function. +type RemoveDeviceParams struct { + ClassInstallHeader ClassInstallHeader + Scope DI_REMOVEDEVICE + HwProfile uint32 +} + +// DrvInfoData is driver information structure (member of a driver info list that may be associated with a particular device instance, or (globally) with a device information set) +type DrvInfoData struct { + size uint32 + DriverType uint32 + _ uintptr + description [LINE_LEN]uint16 + mfgName [LINE_LEN]uint16 + providerName [LINE_LEN]uint16 + DriverDate Filetime + DriverVersion uint64 +} + +func (data *DrvInfoData) Description() string { + return UTF16ToString(data.description[:]) +} + +func (data *DrvInfoData) SetDescription(description string) error { + str, err := UTF16FromString(description) + if err != nil { + return err + } + copy(data.description[:], str) + return nil +} + +func (data *DrvInfoData) MfgName() string { + return UTF16ToString(data.mfgName[:]) +} + +func (data *DrvInfoData) SetMfgName(mfgName string) error { + str, err := UTF16FromString(mfgName) + if err != nil { + return err + } + copy(data.mfgName[:], str) + return nil +} + +func (data *DrvInfoData) ProviderName() string { + return UTF16ToString(data.providerName[:]) +} + +func (data *DrvInfoData) SetProviderName(providerName string) error { + str, err := UTF16FromString(providerName) + if err != nil { + return err + } + copy(data.providerName[:], str) + return nil +} + +// IsNewer method returns true if DrvInfoData date and version is newer than supplied parameters. +func (data *DrvInfoData) IsNewer(driverDate Filetime, driverVersion uint64) bool { + if data.DriverDate.HighDateTime > driverDate.HighDateTime { + return true + } + if data.DriverDate.HighDateTime < driverDate.HighDateTime { + return false + } + + if data.DriverDate.LowDateTime > driverDate.LowDateTime { + return true + } + if data.DriverDate.LowDateTime < driverDate.LowDateTime { + return false + } + + if data.DriverVersion > driverVersion { + return true + } + if data.DriverVersion < driverVersion { + return false + } + + return false +} + +// DrvInfoDetailData is driver information details structure (provides detailed information about a particular driver information structure) +type DrvInfoDetailData struct { + size uint32 // Use unsafeSizeOf method + InfDate Filetime + compatIDsOffset uint32 + compatIDsLength uint32 + _ uintptr + sectionName [LINE_LEN]uint16 + infFileName [MAX_PATH]uint16 + drvDescription [LINE_LEN]uint16 + hardwareID [1]uint16 +} + +func (*DrvInfoDetailData) unsafeSizeOf() uint32 { + if unsafe.Sizeof(uintptr(0)) == 4 { + // Windows declares this with pshpack1.h + return uint32(unsafe.Offsetof(DrvInfoDetailData{}.hardwareID) + unsafe.Sizeof(DrvInfoDetailData{}.hardwareID)) + } + return uint32(unsafe.Sizeof(DrvInfoDetailData{})) +} + +func (data *DrvInfoDetailData) SectionName() string { + return UTF16ToString(data.sectionName[:]) +} + +func (data *DrvInfoDetailData) InfFileName() string { + return UTF16ToString(data.infFileName[:]) +} + +func (data *DrvInfoDetailData) DrvDescription() string { + return UTF16ToString(data.drvDescription[:]) +} + +func (data *DrvInfoDetailData) HardwareID() string { + if data.compatIDsOffset > 1 { + bufW := data.getBuf() + return UTF16ToString(bufW[:wcslen(bufW)]) + } + + return "" +} + +func (data *DrvInfoDetailData) CompatIDs() []string { + a := make([]string, 0) + + if data.compatIDsLength > 0 { + bufW := data.getBuf() + bufW = bufW[data.compatIDsOffset : data.compatIDsOffset+data.compatIDsLength] + for i := 0; i < len(bufW); { + j := i + wcslen(bufW[i:]) + if i < j { + a = append(a, UTF16ToString(bufW[i:j])) + } + i = j + 1 + } + } + + return a +} + +func (data *DrvInfoDetailData) getBuf() []uint16 { + len := (data.size - uint32(unsafe.Offsetof(data.hardwareID))) / 2 + sl := struct { + addr *uint16 + len int + cap int + }{&data.hardwareID[0], int(len), int(len)} + return *(*[]uint16)(unsafe.Pointer(&sl)) +} + +// IsCompatible method tests if given hardware ID matches the driver or is listed on the compatible ID list. +func (data *DrvInfoDetailData) IsCompatible(hwid string) bool { + hwidLC := strings.ToLower(hwid) + if strings.ToLower(data.HardwareID()) == hwidLC { + return true + } + a := data.CompatIDs() + for i := range a { + if strings.ToLower(a[i]) == hwidLC { + return true + } + } + + return false +} + +// DICD flags control SetupDiCreateDeviceInfo +type DICD uint32 + +const ( + DICD_GENERATE_ID DICD = 0x00000001 + DICD_INHERIT_CLASSDRVS DICD = 0x00000002 +) + +// SUOI flags control SetupUninstallOEMInf +type SUOI uint32 + +const ( + SUOI_FORCEDELETE SUOI = 0x0001 +) + +// SPDIT flags to distinguish between class drivers and +// device drivers. (Passed in 'DriverType' parameter of +// driver information list APIs) +type SPDIT uint32 + +const ( + SPDIT_NODRIVER SPDIT = 0x00000000 + SPDIT_CLASSDRIVER SPDIT = 0x00000001 + SPDIT_COMPATDRIVER SPDIT = 0x00000002 +) + +// DIGCF flags control what is included in the device information set built by SetupDiGetClassDevs +type DIGCF uint32 + +const ( + DIGCF_DEFAULT DIGCF = 0x00000001 // only valid with DIGCF_DEVICEINTERFACE + DIGCF_PRESENT DIGCF = 0x00000002 + DIGCF_ALLCLASSES DIGCF = 0x00000004 + DIGCF_PROFILE DIGCF = 0x00000008 + DIGCF_DEVICEINTERFACE DIGCF = 0x00000010 +) + +// DIREG specifies values for SetupDiCreateDevRegKey, SetupDiOpenDevRegKey, and SetupDiDeleteDevRegKey. +type DIREG uint32 + +const ( + DIREG_DEV DIREG = 0x00000001 // Open/Create/Delete device key + DIREG_DRV DIREG = 0x00000002 // Open/Create/Delete driver key + DIREG_BOTH DIREG = 0x00000004 // Delete both driver and Device key +) + +// SPDRP specifies device registry property codes +// (Codes marked as read-only (R) may only be used for +// SetupDiGetDeviceRegistryProperty) +// +// These values should cover the same set of registry properties +// as defined by the CM_DRP codes in cfgmgr32.h. +// +// Note that SPDRP codes are zero based while CM_DRP codes are one based! +type SPDRP uint32 + +const ( + SPDRP_DEVICEDESC SPDRP = 0x00000000 // DeviceDesc (R/W) + SPDRP_HARDWAREID SPDRP = 0x00000001 // HardwareID (R/W) + SPDRP_COMPATIBLEIDS SPDRP = 0x00000002 // CompatibleIDs (R/W) + SPDRP_SERVICE SPDRP = 0x00000004 // Service (R/W) + SPDRP_CLASS SPDRP = 0x00000007 // Class (R--tied to ClassGUID) + SPDRP_CLASSGUID SPDRP = 0x00000008 // ClassGUID (R/W) + SPDRP_DRIVER SPDRP = 0x00000009 // Driver (R/W) + SPDRP_CONFIGFLAGS SPDRP = 0x0000000A // ConfigFlags (R/W) + SPDRP_MFG SPDRP = 0x0000000B // Mfg (R/W) + SPDRP_FRIENDLYNAME SPDRP = 0x0000000C // FriendlyName (R/W) + SPDRP_LOCATION_INFORMATION SPDRP = 0x0000000D // LocationInformation (R/W) + SPDRP_PHYSICAL_DEVICE_OBJECT_NAME SPDRP = 0x0000000E // PhysicalDeviceObjectName (R) + SPDRP_CAPABILITIES SPDRP = 0x0000000F // Capabilities (R) + SPDRP_UI_NUMBER SPDRP = 0x00000010 // UiNumber (R) + SPDRP_UPPERFILTERS SPDRP = 0x00000011 // UpperFilters (R/W) + SPDRP_LOWERFILTERS SPDRP = 0x00000012 // LowerFilters (R/W) + SPDRP_BUSTYPEGUID SPDRP = 0x00000013 // BusTypeGUID (R) + SPDRP_LEGACYBUSTYPE SPDRP = 0x00000014 // LegacyBusType (R) + SPDRP_BUSNUMBER SPDRP = 0x00000015 // BusNumber (R) + SPDRP_ENUMERATOR_NAME SPDRP = 0x00000016 // Enumerator Name (R) + SPDRP_SECURITY SPDRP = 0x00000017 // Security (R/W, binary form) + SPDRP_SECURITY_SDS SPDRP = 0x00000018 // Security (W, SDS form) + SPDRP_DEVTYPE SPDRP = 0x00000019 // Device Type (R/W) + SPDRP_EXCLUSIVE SPDRP = 0x0000001A // Device is exclusive-access (R/W) + SPDRP_CHARACTERISTICS SPDRP = 0x0000001B // Device Characteristics (R/W) + SPDRP_ADDRESS SPDRP = 0x0000001C // Device Address (R) + SPDRP_UI_NUMBER_DESC_FORMAT SPDRP = 0x0000001D // UiNumberDescFormat (R/W) + SPDRP_DEVICE_POWER_DATA SPDRP = 0x0000001E // Device Power Data (R) + SPDRP_REMOVAL_POLICY SPDRP = 0x0000001F // Removal Policy (R) + SPDRP_REMOVAL_POLICY_HW_DEFAULT SPDRP = 0x00000020 // Hardware Removal Policy (R) + SPDRP_REMOVAL_POLICY_OVERRIDE SPDRP = 0x00000021 // Removal Policy Override (RW) + SPDRP_INSTALL_STATE SPDRP = 0x00000022 // Device Install State (R) + SPDRP_LOCATION_PATHS SPDRP = 0x00000023 // Device Location Paths (R) + SPDRP_BASE_CONTAINERID SPDRP = 0x00000024 // Base ContainerID (R) + + SPDRP_MAXIMUM_PROPERTY SPDRP = 0x00000025 // Upper bound on ordinals +) + +// DEVPROPTYPE represents the property-data-type identifier that specifies the +// data type of a device property value in the unified device property model. +type DEVPROPTYPE uint32 + +const ( + DEVPROP_TYPEMOD_ARRAY DEVPROPTYPE = 0x00001000 + DEVPROP_TYPEMOD_LIST DEVPROPTYPE = 0x00002000 + + DEVPROP_TYPE_EMPTY DEVPROPTYPE = 0x00000000 + DEVPROP_TYPE_NULL DEVPROPTYPE = 0x00000001 + DEVPROP_TYPE_SBYTE DEVPROPTYPE = 0x00000002 + DEVPROP_TYPE_BYTE DEVPROPTYPE = 0x00000003 + DEVPROP_TYPE_INT16 DEVPROPTYPE = 0x00000004 + DEVPROP_TYPE_UINT16 DEVPROPTYPE = 0x00000005 + DEVPROP_TYPE_INT32 DEVPROPTYPE = 0x00000006 + DEVPROP_TYPE_UINT32 DEVPROPTYPE = 0x00000007 + DEVPROP_TYPE_INT64 DEVPROPTYPE = 0x00000008 + DEVPROP_TYPE_UINT64 DEVPROPTYPE = 0x00000009 + DEVPROP_TYPE_FLOAT DEVPROPTYPE = 0x0000000A + DEVPROP_TYPE_DOUBLE DEVPROPTYPE = 0x0000000B + DEVPROP_TYPE_DECIMAL DEVPROPTYPE = 0x0000000C + DEVPROP_TYPE_GUID DEVPROPTYPE = 0x0000000D + DEVPROP_TYPE_CURRENCY DEVPROPTYPE = 0x0000000E + DEVPROP_TYPE_DATE DEVPROPTYPE = 0x0000000F + DEVPROP_TYPE_FILETIME DEVPROPTYPE = 0x00000010 + DEVPROP_TYPE_BOOLEAN DEVPROPTYPE = 0x00000011 + DEVPROP_TYPE_STRING DEVPROPTYPE = 0x00000012 + DEVPROP_TYPE_STRING_LIST DEVPROPTYPE = DEVPROP_TYPE_STRING | DEVPROP_TYPEMOD_LIST + DEVPROP_TYPE_SECURITY_DESCRIPTOR DEVPROPTYPE = 0x00000013 + DEVPROP_TYPE_SECURITY_DESCRIPTOR_STRING DEVPROPTYPE = 0x00000014 + DEVPROP_TYPE_DEVPROPKEY DEVPROPTYPE = 0x00000015 + DEVPROP_TYPE_DEVPROPTYPE DEVPROPTYPE = 0x00000016 + DEVPROP_TYPE_BINARY DEVPROPTYPE = DEVPROP_TYPE_BYTE | DEVPROP_TYPEMOD_ARRAY + DEVPROP_TYPE_ERROR DEVPROPTYPE = 0x00000017 + DEVPROP_TYPE_NTSTATUS DEVPROPTYPE = 0x00000018 + DEVPROP_TYPE_STRING_INDIRECT DEVPROPTYPE = 0x00000019 + + MAX_DEVPROP_TYPE DEVPROPTYPE = 0x00000019 + MAX_DEVPROP_TYPEMOD DEVPROPTYPE = 0x00002000 + + DEVPROP_MASK_TYPE DEVPROPTYPE = 0x00000FFF + DEVPROP_MASK_TYPEMOD DEVPROPTYPE = 0x0000F000 +) + +// DEVPROPGUID specifies a property category. +type DEVPROPGUID GUID + +// DEVPROPID uniquely identifies the property within the property category. +type DEVPROPID uint32 + +const DEVPROPID_FIRST_USABLE DEVPROPID = 2 + +// DEVPROPKEY represents a device property key for a device property in the +// unified device property model. +type DEVPROPKEY struct { + FmtID DEVPROPGUID + PID DEVPROPID +} + +// CONFIGRET is a return value or error code from cfgmgr32 APIs +type CONFIGRET uint32 + +func (ret CONFIGRET) Error() string { + if win32Error, ok := ret.Unwrap().(Errno); ok { + return fmt.Sprintf("%s (CfgMgr error: 0x%08x)", win32Error.Error(), uint32(ret)) + } + return fmt.Sprintf("CfgMgr error: 0x%08x", uint32(ret)) +} + +func (ret CONFIGRET) Win32Error(defaultError Errno) Errno { + return cm_MapCrToWin32Err(ret, defaultError) +} + +func (ret CONFIGRET) Unwrap() error { + const noMatch = Errno(^uintptr(0)) + win32Error := ret.Win32Error(noMatch) + if win32Error == noMatch { + return nil + } + return win32Error +} + +const ( + CR_SUCCESS CONFIGRET = 0x00000000 + CR_DEFAULT CONFIGRET = 0x00000001 + CR_OUT_OF_MEMORY CONFIGRET = 0x00000002 + CR_INVALID_POINTER CONFIGRET = 0x00000003 + CR_INVALID_FLAG CONFIGRET = 0x00000004 + CR_INVALID_DEVNODE CONFIGRET = 0x00000005 + CR_INVALID_DEVINST = CR_INVALID_DEVNODE + CR_INVALID_RES_DES CONFIGRET = 0x00000006 + CR_INVALID_LOG_CONF CONFIGRET = 0x00000007 + CR_INVALID_ARBITRATOR CONFIGRET = 0x00000008 + CR_INVALID_NODELIST CONFIGRET = 0x00000009 + CR_DEVNODE_HAS_REQS CONFIGRET = 0x0000000A + CR_DEVINST_HAS_REQS = CR_DEVNODE_HAS_REQS + CR_INVALID_RESOURCEID CONFIGRET = 0x0000000B + CR_DLVXD_NOT_FOUND CONFIGRET = 0x0000000C + CR_NO_SUCH_DEVNODE CONFIGRET = 0x0000000D + CR_NO_SUCH_DEVINST = CR_NO_SUCH_DEVNODE + CR_NO_MORE_LOG_CONF CONFIGRET = 0x0000000E + CR_NO_MORE_RES_DES CONFIGRET = 0x0000000F + CR_ALREADY_SUCH_DEVNODE CONFIGRET = 0x00000010 + CR_ALREADY_SUCH_DEVINST = CR_ALREADY_SUCH_DEVNODE + CR_INVALID_RANGE_LIST CONFIGRET = 0x00000011 + CR_INVALID_RANGE CONFIGRET = 0x00000012 + CR_FAILURE CONFIGRET = 0x00000013 + CR_NO_SUCH_LOGICAL_DEV CONFIGRET = 0x00000014 + CR_CREATE_BLOCKED CONFIGRET = 0x00000015 + CR_NOT_SYSTEM_VM CONFIGRET = 0x00000016 + CR_REMOVE_VETOED CONFIGRET = 0x00000017 + CR_APM_VETOED CONFIGRET = 0x00000018 + CR_INVALID_LOAD_TYPE CONFIGRET = 0x00000019 + CR_BUFFER_SMALL CONFIGRET = 0x0000001A + CR_NO_ARBITRATOR CONFIGRET = 0x0000001B + CR_NO_REGISTRY_HANDLE CONFIGRET = 0x0000001C + CR_REGISTRY_ERROR CONFIGRET = 0x0000001D + CR_INVALID_DEVICE_ID CONFIGRET = 0x0000001E + CR_INVALID_DATA CONFIGRET = 0x0000001F + CR_INVALID_API CONFIGRET = 0x00000020 + CR_DEVLOADER_NOT_READY CONFIGRET = 0x00000021 + CR_NEED_RESTART CONFIGRET = 0x00000022 + CR_NO_MORE_HW_PROFILES CONFIGRET = 0x00000023 + CR_DEVICE_NOT_THERE CONFIGRET = 0x00000024 + CR_NO_SUCH_VALUE CONFIGRET = 0x00000025 + CR_WRONG_TYPE CONFIGRET = 0x00000026 + CR_INVALID_PRIORITY CONFIGRET = 0x00000027 + CR_NOT_DISABLEABLE CONFIGRET = 0x00000028 + CR_FREE_RESOURCES CONFIGRET = 0x00000029 + CR_QUERY_VETOED CONFIGRET = 0x0000002A + CR_CANT_SHARE_IRQ CONFIGRET = 0x0000002B + CR_NO_DEPENDENT CONFIGRET = 0x0000002C + CR_SAME_RESOURCES CONFIGRET = 0x0000002D + CR_NO_SUCH_REGISTRY_KEY CONFIGRET = 0x0000002E + CR_INVALID_MACHINENAME CONFIGRET = 0x0000002F + CR_REMOTE_COMM_FAILURE CONFIGRET = 0x00000030 + CR_MACHINE_UNAVAILABLE CONFIGRET = 0x00000031 + CR_NO_CM_SERVICES CONFIGRET = 0x00000032 + CR_ACCESS_DENIED CONFIGRET = 0x00000033 + CR_CALL_NOT_IMPLEMENTED CONFIGRET = 0x00000034 + CR_INVALID_PROPERTY CONFIGRET = 0x00000035 + CR_DEVICE_INTERFACE_ACTIVE CONFIGRET = 0x00000036 + CR_NO_SUCH_DEVICE_INTERFACE CONFIGRET = 0x00000037 + CR_INVALID_REFERENCE_STRING CONFIGRET = 0x00000038 + CR_INVALID_CONFLICT_LIST CONFIGRET = 0x00000039 + CR_INVALID_INDEX CONFIGRET = 0x0000003A + CR_INVALID_STRUCTURE_SIZE CONFIGRET = 0x0000003B + NUM_CR_RESULTS CONFIGRET = 0x0000003C +) + +const ( + CM_GET_DEVICE_INTERFACE_LIST_PRESENT = 0 // only currently 'live' device interfaces + CM_GET_DEVICE_INTERFACE_LIST_ALL_DEVICES = 1 // all registered device interfaces, live or not +) + +const ( + DN_ROOT_ENUMERATED = 0x00000001 // Was enumerated by ROOT + DN_DRIVER_LOADED = 0x00000002 // Has Register_Device_Driver + DN_ENUM_LOADED = 0x00000004 // Has Register_Enumerator + DN_STARTED = 0x00000008 // Is currently configured + DN_MANUAL = 0x00000010 // Manually installed + DN_NEED_TO_ENUM = 0x00000020 // May need reenumeration + DN_NOT_FIRST_TIME = 0x00000040 // Has received a config + DN_HARDWARE_ENUM = 0x00000080 // Enum generates hardware ID + DN_LIAR = 0x00000100 // Lied about can reconfig once + DN_HAS_MARK = 0x00000200 // Not CM_Create_DevInst lately + DN_HAS_PROBLEM = 0x00000400 // Need device installer + DN_FILTERED = 0x00000800 // Is filtered + DN_MOVED = 0x00001000 // Has been moved + DN_DISABLEABLE = 0x00002000 // Can be disabled + DN_REMOVABLE = 0x00004000 // Can be removed + DN_PRIVATE_PROBLEM = 0x00008000 // Has a private problem + DN_MF_PARENT = 0x00010000 // Multi function parent + DN_MF_CHILD = 0x00020000 // Multi function child + DN_WILL_BE_REMOVED = 0x00040000 // DevInst is being removed + DN_NOT_FIRST_TIMEE = 0x00080000 // Has received a config enumerate + DN_STOP_FREE_RES = 0x00100000 // When child is stopped, free resources + DN_REBAL_CANDIDATE = 0x00200000 // Don't skip during rebalance + DN_BAD_PARTIAL = 0x00400000 // This devnode's log_confs do not have same resources + DN_NT_ENUMERATOR = 0x00800000 // This devnode's is an NT enumerator + DN_NT_DRIVER = 0x01000000 // This devnode's is an NT driver + DN_NEEDS_LOCKING = 0x02000000 // Devnode need lock resume processing + DN_ARM_WAKEUP = 0x04000000 // Devnode can be the wakeup device + DN_APM_ENUMERATOR = 0x08000000 // APM aware enumerator + DN_APM_DRIVER = 0x10000000 // APM aware driver + DN_SILENT_INSTALL = 0x20000000 // Silent install + DN_NO_SHOW_IN_DM = 0x40000000 // No show in device manager + DN_BOOT_LOG_PROB = 0x80000000 // Had a problem during preassignment of boot log conf + DN_NEED_RESTART = DN_LIAR // System needs to be restarted for this Devnode to work properly + DN_DRIVER_BLOCKED = DN_NOT_FIRST_TIME // One or more drivers are blocked from loading for this Devnode + DN_LEGACY_DRIVER = DN_MOVED // This device is using a legacy driver + DN_CHILD_WITH_INVALID_ID = DN_HAS_MARK // One or more children have invalid IDs + DN_DEVICE_DISCONNECTED = DN_NEEDS_LOCKING // The function driver for a device reported that the device is not connected. Typically this means a wireless device is out of range. + DN_QUERY_REMOVE_PENDING = DN_MF_PARENT // Device is part of a set of related devices collectively pending query-removal + DN_QUERY_REMOVE_ACTIVE = DN_MF_CHILD // Device is actively engaged in a query-remove IRP + DN_CHANGEABLE_FLAGS = DN_NOT_FIRST_TIME | DN_HARDWARE_ENUM | DN_HAS_MARK | DN_DISABLEABLE | DN_REMOVABLE | DN_MF_CHILD | DN_MF_PARENT | DN_NOT_FIRST_TIMEE | DN_STOP_FREE_RES | DN_REBAL_CANDIDATE | DN_NT_ENUMERATOR | DN_NT_DRIVER | DN_SILENT_INSTALL | DN_NO_SHOW_IN_DM +) + +//sys setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) [failretval==DevInfo(InvalidHandle)] = setupapi.SetupDiCreateDeviceInfoListExW + +// SetupDiCreateDeviceInfoListEx function creates an empty device information set on a remote or a local computer and optionally associates the set with a device setup class. +func SetupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName string) (deviceInfoSet DevInfo, err error) { + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return + } + } + return setupDiCreateDeviceInfoListEx(classGUID, hwndParent, machineNameUTF16, 0) +} + +//sys setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) = setupapi.SetupDiGetDeviceInfoListDetailW + +// SetupDiGetDeviceInfoListDetail function retrieves information associated with a device information set including the class GUID, remote computer handle, and remote computer name. +func SetupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo) (deviceInfoSetDetailData *DevInfoListDetailData, err error) { + data := &DevInfoListDetailData{} + data.size = data.unsafeSizeOf() + + return data, setupDiGetDeviceInfoListDetail(deviceInfoSet, data) +} + +// DeviceInfoListDetail method retrieves information associated with a device information set including the class GUID, remote computer handle, and remote computer name. +func (deviceInfoSet DevInfo) DeviceInfoListDetail() (*DevInfoListDetailData, error) { + return SetupDiGetDeviceInfoListDetail(deviceInfoSet) +} + +//sys setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiCreateDeviceInfoW + +// SetupDiCreateDeviceInfo function creates a new device information element and adds it as a new member to the specified device information set. +func SetupDiCreateDeviceInfo(deviceInfoSet DevInfo, deviceName string, classGUID *GUID, deviceDescription string, hwndParent uintptr, creationFlags DICD) (deviceInfoData *DevInfoData, err error) { + deviceNameUTF16, err := UTF16PtrFromString(deviceName) + if err != nil { + return + } + + var deviceDescriptionUTF16 *uint16 + if deviceDescription != "" { + deviceDescriptionUTF16, err = UTF16PtrFromString(deviceDescription) + if err != nil { + return + } + } + + data := &DevInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiCreateDeviceInfo(deviceInfoSet, deviceNameUTF16, classGUID, deviceDescriptionUTF16, hwndParent, creationFlags, data) +} + +// CreateDeviceInfo method creates a new device information element and adds it as a new member to the specified device information set. +func (deviceInfoSet DevInfo) CreateDeviceInfo(deviceName string, classGUID *GUID, deviceDescription string, hwndParent uintptr, creationFlags DICD) (*DevInfoData, error) { + return SetupDiCreateDeviceInfo(deviceInfoSet, deviceName, classGUID, deviceDescription, hwndParent, creationFlags) +} + +//sys setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiEnumDeviceInfo + +// SetupDiEnumDeviceInfo function returns a DevInfoData structure that specifies a device information element in a device information set. +func SetupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex int) (*DevInfoData, error) { + data := &DevInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiEnumDeviceInfo(deviceInfoSet, uint32(memberIndex), data) +} + +// EnumDeviceInfo method returns a DevInfoData structure that specifies a device information element in a device information set. +func (deviceInfoSet DevInfo) EnumDeviceInfo(memberIndex int) (*DevInfoData, error) { + return SetupDiEnumDeviceInfo(deviceInfoSet, memberIndex) +} + +// SetupDiDestroyDeviceInfoList function deletes a device information set and frees all associated memory. +//sys SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) = setupapi.SetupDiDestroyDeviceInfoList + +// Close method deletes a device information set and frees all associated memory. +func (deviceInfoSet DevInfo) Close() error { + return SetupDiDestroyDeviceInfoList(deviceInfoSet) +} + +//sys SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) = setupapi.SetupDiBuildDriverInfoList + +// BuildDriverInfoList method builds a list of drivers that is associated with a specific device or with the global class driver list for a device information set. +func (deviceInfoSet DevInfo) BuildDriverInfoList(deviceInfoData *DevInfoData, driverType SPDIT) error { + return SetupDiBuildDriverInfoList(deviceInfoSet, deviceInfoData, driverType) +} + +//sys SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) = setupapi.SetupDiCancelDriverInfoSearch + +// CancelDriverInfoSearch method cancels a driver list search that is currently in progress in a different thread. +func (deviceInfoSet DevInfo) CancelDriverInfoSearch() error { + return SetupDiCancelDriverInfoSearch(deviceInfoSet) +} + +//sys setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiEnumDriverInfoW + +// SetupDiEnumDriverInfo function enumerates the members of a driver list. +func SetupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex int) (*DrvInfoData, error) { + data := &DrvInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiEnumDriverInfo(deviceInfoSet, deviceInfoData, driverType, uint32(memberIndex), data) +} + +// EnumDriverInfo method enumerates the members of a driver list. +func (deviceInfoSet DevInfo) EnumDriverInfo(deviceInfoData *DevInfoData, driverType SPDIT, memberIndex int) (*DrvInfoData, error) { + return SetupDiEnumDriverInfo(deviceInfoSet, deviceInfoData, driverType, memberIndex) +} + +//sys setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiGetSelectedDriverW + +// SetupDiGetSelectedDriver function retrieves the selected driver for a device information set or a particular device information element. +func SetupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (*DrvInfoData, error) { + data := &DrvInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiGetSelectedDriver(deviceInfoSet, deviceInfoData, data) +} + +// SelectedDriver method retrieves the selected driver for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) SelectedDriver(deviceInfoData *DevInfoData) (*DrvInfoData, error) { + return SetupDiGetSelectedDriver(deviceInfoSet, deviceInfoData) +} + +//sys SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiSetSelectedDriverW + +// SetSelectedDriver method sets, or resets, the selected driver for a device information element or the selected class driver for a device information set. +func (deviceInfoSet DevInfo) SetSelectedDriver(deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) error { + return SetupDiSetSelectedDriver(deviceInfoSet, deviceInfoData, driverInfoData) +} + +//sys setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetDriverInfoDetailW + +// SetupDiGetDriverInfoDetail function retrieves driver information detail for a device information set or a particular device information element in the device information set. +func SetupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (*DrvInfoDetailData, error) { + reqSize := uint32(2048) + for { + buf := make([]byte, reqSize) + data := (*DrvInfoDetailData)(unsafe.Pointer(&buf[0])) + data.size = data.unsafeSizeOf() + err := setupDiGetDriverInfoDetail(deviceInfoSet, deviceInfoData, driverInfoData, data, uint32(len(buf)), &reqSize) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return nil, err + } + data.size = reqSize + return data, nil + } +} + +// DriverInfoDetail method retrieves driver information detail for a device information set or a particular device information element in the device information set. +func (deviceInfoSet DevInfo) DriverInfoDetail(deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (*DrvInfoDetailData, error) { + return SetupDiGetDriverInfoDetail(deviceInfoSet, deviceInfoData, driverInfoData) +} + +//sys SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) = setupapi.SetupDiDestroyDriverInfoList + +// DestroyDriverInfoList method deletes a driver list. +func (deviceInfoSet DevInfo) DestroyDriverInfoList(deviceInfoData *DevInfoData, driverType SPDIT) error { + return SetupDiDestroyDriverInfoList(deviceInfoSet, deviceInfoData, driverType) +} + +//sys setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) [failretval==DevInfo(InvalidHandle)] = setupapi.SetupDiGetClassDevsExW + +// SetupDiGetClassDevsEx function returns a handle to a device information set that contains requested device information elements for a local or a remote computer. +func SetupDiGetClassDevsEx(classGUID *GUID, enumerator string, hwndParent uintptr, flags DIGCF, deviceInfoSet DevInfo, machineName string) (handle DevInfo, err error) { + var enumeratorUTF16 *uint16 + if enumerator != "" { + enumeratorUTF16, err = UTF16PtrFromString(enumerator) + if err != nil { + return + } + } + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return + } + } + return setupDiGetClassDevsEx(classGUID, enumeratorUTF16, hwndParent, flags, deviceInfoSet, machineNameUTF16, 0) +} + +// SetupDiCallClassInstaller function calls the appropriate class installer, and any registered co-installers, with the specified installation request (DIF code). +//sys SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiCallClassInstaller + +// CallClassInstaller member calls the appropriate class installer, and any registered co-installers, with the specified installation request (DIF code). +func (deviceInfoSet DevInfo) CallClassInstaller(installFunction DI_FUNCTION, deviceInfoData *DevInfoData) error { + return SetupDiCallClassInstaller(installFunction, deviceInfoSet, deviceInfoData) +} + +// SetupDiOpenDevRegKey function opens a registry key for device-specific configuration information. +//sys SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) [failretval==InvalidHandle] = setupapi.SetupDiOpenDevRegKey + +// OpenDevRegKey method opens a registry key for device-specific configuration information. +func (deviceInfoSet DevInfo) OpenDevRegKey(DeviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (Handle, error) { + return SetupDiOpenDevRegKey(deviceInfoSet, DeviceInfoData, Scope, HwProfile, KeyType, samDesired) +} + +//sys setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) = setupapi.SetupDiGetDevicePropertyW + +// SetupDiGetDeviceProperty function retrieves a specified device instance property. +func SetupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY) (value interface{}, err error) { + reqSize := uint32(256) + for { + var dataType DEVPROPTYPE + buf := make([]byte, reqSize) + err = setupDiGetDeviceProperty(deviceInfoSet, deviceInfoData, propertyKey, &dataType, &buf[0], uint32(len(buf)), &reqSize, 0) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return + } + switch dataType { + case DEVPROP_TYPE_STRING: + ret := UTF16ToString(bufToUTF16(buf)) + runtime.KeepAlive(buf) + return ret, nil + } + return nil, errors.New("unimplemented property type") + } +} + +//sys setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetDeviceRegistryPropertyW + +// SetupDiGetDeviceRegistryProperty function retrieves a specified Plug and Play device property. +func SetupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP) (value interface{}, err error) { + reqSize := uint32(256) + for { + var dataType uint32 + buf := make([]byte, reqSize) + err = setupDiGetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, &dataType, &buf[0], uint32(len(buf)), &reqSize) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return + } + return getRegistryValue(buf[:reqSize], dataType) + } +} + +func getRegistryValue(buf []byte, dataType uint32) (interface{}, error) { + switch dataType { + case REG_SZ: + ret := UTF16ToString(bufToUTF16(buf)) + runtime.KeepAlive(buf) + return ret, nil + case REG_EXPAND_SZ: + value := UTF16ToString(bufToUTF16(buf)) + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + ret := make([]uint16, 100) + for { + n, err := ExpandEnvironmentStrings(p, &ret[0], uint32(len(ret))) + if err != nil { + return "", err + } + if n <= uint32(len(ret)) { + return UTF16ToString(ret[:n]), nil + } + ret = make([]uint16, n) + } + case REG_BINARY: + return buf, nil + case REG_DWORD_LITTLE_ENDIAN: + return binary.LittleEndian.Uint32(buf), nil + case REG_DWORD_BIG_ENDIAN: + return binary.BigEndian.Uint32(buf), nil + case REG_MULTI_SZ: + bufW := bufToUTF16(buf) + a := []string{} + for i := 0; i < len(bufW); { + j := i + wcslen(bufW[i:]) + if i < j { + a = append(a, UTF16ToString(bufW[i:j])) + } + i = j + 1 + } + runtime.KeepAlive(buf) + return a, nil + case REG_QWORD_LITTLE_ENDIAN: + return binary.LittleEndian.Uint64(buf), nil + default: + return nil, fmt.Errorf("Unsupported registry value type: %v", dataType) + } +} + +// bufToUTF16 function reinterprets []byte buffer as []uint16 +func bufToUTF16(buf []byte) []uint16 { + sl := struct { + addr *uint16 + len int + cap int + }{(*uint16)(unsafe.Pointer(&buf[0])), len(buf) / 2, cap(buf) / 2} + return *(*[]uint16)(unsafe.Pointer(&sl)) +} + +// utf16ToBuf function reinterprets []uint16 as []byte +func utf16ToBuf(buf []uint16) []byte { + sl := struct { + addr *byte + len int + cap int + }{(*byte)(unsafe.Pointer(&buf[0])), len(buf) * 2, cap(buf) * 2} + return *(*[]byte)(unsafe.Pointer(&sl)) +} + +func wcslen(str []uint16) int { + for i := 0; i < len(str); i++ { + if str[i] == 0 { + return i + } + } + return len(str) +} + +// DeviceRegistryProperty method retrieves a specified Plug and Play device property. +func (deviceInfoSet DevInfo) DeviceRegistryProperty(deviceInfoData *DevInfoData, property SPDRP) (interface{}, error) { + return SetupDiGetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property) +} + +//sys setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) = setupapi.SetupDiSetDeviceRegistryPropertyW + +// SetupDiSetDeviceRegistryProperty function sets a Plug and Play device property for a device. +func SetupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffers []byte) error { + return setupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, &propertyBuffers[0], uint32(len(propertyBuffers))) +} + +// SetDeviceRegistryProperty function sets a Plug and Play device property for a device. +func (deviceInfoSet DevInfo) SetDeviceRegistryProperty(deviceInfoData *DevInfoData, property SPDRP, propertyBuffers []byte) error { + return SetupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, propertyBuffers) +} + +// SetDeviceRegistryPropertyString method sets a Plug and Play device property string for a device. +func (deviceInfoSet DevInfo) SetDeviceRegistryPropertyString(deviceInfoData *DevInfoData, property SPDRP, str string) error { + str16, err := UTF16FromString(str) + if err != nil { + return err + } + err = SetupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, utf16ToBuf(append(str16, 0))) + runtime.KeepAlive(str16) + return err +} + +//sys setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) = setupapi.SetupDiGetDeviceInstallParamsW + +// SetupDiGetDeviceInstallParams function retrieves device installation parameters for a device information set or a particular device information element. +func SetupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (*DevInstallParams, error) { + params := &DevInstallParams{} + params.size = uint32(unsafe.Sizeof(*params)) + + return params, setupDiGetDeviceInstallParams(deviceInfoSet, deviceInfoData, params) +} + +// DeviceInstallParams method retrieves device installation parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) DeviceInstallParams(deviceInfoData *DevInfoData) (*DevInstallParams, error) { + return SetupDiGetDeviceInstallParams(deviceInfoSet, deviceInfoData) +} + +//sys setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) = setupapi.SetupDiGetDeviceInstanceIdW + +// SetupDiGetDeviceInstanceId function retrieves the instance ID of the device. +func SetupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (string, error) { + reqSize := uint32(1024) + for { + buf := make([]uint16, reqSize) + err := setupDiGetDeviceInstanceId(deviceInfoSet, deviceInfoData, &buf[0], uint32(len(buf)), &reqSize) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return "", err + } + return UTF16ToString(buf), nil + } +} + +// DeviceInstanceID method retrieves the instance ID of the device. +func (deviceInfoSet DevInfo) DeviceInstanceID(deviceInfoData *DevInfoData) (string, error) { + return SetupDiGetDeviceInstanceId(deviceInfoSet, deviceInfoData) +} + +// SetupDiGetClassInstallParams function retrieves class installation parameters for a device information set or a particular device information element. +//sys SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetClassInstallParamsW + +// ClassInstallParams method retrieves class installation parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) ClassInstallParams(deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) error { + return SetupDiGetClassInstallParams(deviceInfoSet, deviceInfoData, classInstallParams, classInstallParamsSize, requiredSize) +} + +//sys SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) = setupapi.SetupDiSetDeviceInstallParamsW + +// SetDeviceInstallParams member sets device installation parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) SetDeviceInstallParams(deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) error { + return SetupDiSetDeviceInstallParams(deviceInfoSet, deviceInfoData, deviceInstallParams) +} + +// SetupDiSetClassInstallParams function sets or clears class install parameters for a device information set or a particular device information element. +//sys SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) = setupapi.SetupDiSetClassInstallParamsW + +// SetClassInstallParams method sets or clears class install parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) SetClassInstallParams(deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) error { + return SetupDiSetClassInstallParams(deviceInfoSet, deviceInfoData, classInstallParams, classInstallParamsSize) +} + +//sys setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) = setupapi.SetupDiClassNameFromGuidExW + +// SetupDiClassNameFromGuidEx function retrieves the class name associated with a class GUID. The class can be installed on a local or remote computer. +func SetupDiClassNameFromGuidEx(classGUID *GUID, machineName string) (className string, err error) { + var classNameUTF16 [MAX_CLASS_NAME_LEN]uint16 + + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return + } + } + + err = setupDiClassNameFromGuidEx(classGUID, &classNameUTF16[0], MAX_CLASS_NAME_LEN, nil, machineNameUTF16, 0) + if err != nil { + return + } + + className = UTF16ToString(classNameUTF16[:]) + return +} + +//sys setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) = setupapi.SetupDiClassGuidsFromNameExW + +// SetupDiClassGuidsFromNameEx function retrieves the GUIDs associated with the specified class name. This resulting list contains the classes currently installed on a local or remote computer. +func SetupDiClassGuidsFromNameEx(className string, machineName string) ([]GUID, error) { + classNameUTF16, err := UTF16PtrFromString(className) + if err != nil { + return nil, err + } + + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return nil, err + } + } + + reqSize := uint32(4) + for { + buf := make([]GUID, reqSize) + err = setupDiClassGuidsFromNameEx(classNameUTF16, &buf[0], uint32(len(buf)), &reqSize, machineNameUTF16, 0) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return nil, err + } + return buf[:reqSize], nil + } +} + +//sys setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiGetSelectedDevice + +// SetupDiGetSelectedDevice function retrieves the selected device information element in a device information set. +func SetupDiGetSelectedDevice(deviceInfoSet DevInfo) (*DevInfoData, error) { + data := &DevInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiGetSelectedDevice(deviceInfoSet, data) +} + +// SelectedDevice method retrieves the selected device information element in a device information set. +func (deviceInfoSet DevInfo) SelectedDevice() (*DevInfoData, error) { + return SetupDiGetSelectedDevice(deviceInfoSet) +} + +// SetupDiSetSelectedDevice function sets a device information element as the selected member of a device information set. This function is typically used by an installation wizard. +//sys SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiSetSelectedDevice + +// SetSelectedDevice method sets a device information element as the selected member of a device information set. This function is typically used by an installation wizard. +func (deviceInfoSet DevInfo) SetSelectedDevice(deviceInfoData *DevInfoData) error { + return SetupDiSetSelectedDevice(deviceInfoSet, deviceInfoData) +} + +//sys setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) = setupapi.SetupUninstallOEMInfW + +// SetupUninstallOEMInf uninstalls the specified driver. +func SetupUninstallOEMInf(infFileName string, flags SUOI) error { + infFileName16, err := UTF16PtrFromString(infFileName) + if err != nil { + return err + } + return setupUninstallOEMInf(infFileName16, flags, 0) +} + +//sys cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) = CfgMgr32.CM_MapCrToWin32Err + +//sys cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_Device_Interface_List_SizeW +//sys cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_Device_Interface_ListW + +func CM_Get_Device_Interface_List(deviceID string, interfaceClass *GUID, flags uint32) ([]string, error) { + deviceID16, err := UTF16PtrFromString(deviceID) + if err != nil { + return nil, err + } + var buf []uint16 + var buflen uint32 + for { + if ret := cm_Get_Device_Interface_List_Size(&buflen, interfaceClass, deviceID16, flags); ret != CR_SUCCESS { + return nil, ret + } + buf = make([]uint16, buflen) + if ret := cm_Get_Device_Interface_List(interfaceClass, deviceID16, &buf[0], buflen, flags); ret == CR_SUCCESS { + break + } else if ret != CR_BUFFER_SMALL { + return nil, ret + } + } + var interfaces []string + for i := 0; i < len(buf); { + j := i + wcslen(buf[i:]) + if i < j { + interfaces = append(interfaces, UTF16ToString(buf[i:j])) + } + i = j + 1 + } + if interfaces == nil { + return nil, ERROR_NO_SUCH_DEVICE_INTERFACE + } + return interfaces, nil +} + +//sys cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_DevNode_Status + +func CM_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) error { + ret := cm_Get_DevNode_Status(status, problemNumber, devInst, flags) + if ret == CR_SUCCESS { + return nil + } + return ret +} diff --git a/vendor/golang.org/x/sys/windows/setupapierrors_windows.go b/vendor/golang.org/x/sys/windows/setupapierrors_windows.go deleted file mode 100644 index 1681810e04..0000000000 --- a/vendor/golang.org/x/sys/windows/setupapierrors_windows.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -import "syscall" - -const ( - ERROR_EXPECTED_SECTION_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0 - ERROR_BAD_SECTION_NAME_LINE syscall.Errno = 0x20000000 | 0xC0000000 | 1 - ERROR_SECTION_NAME_TOO_LONG syscall.Errno = 0x20000000 | 0xC0000000 | 2 - ERROR_GENERAL_SYNTAX syscall.Errno = 0x20000000 | 0xC0000000 | 3 - ERROR_WRONG_INF_STYLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x100 - ERROR_SECTION_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x101 - ERROR_LINE_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x102 - ERROR_NO_BACKUP syscall.Errno = 0x20000000 | 0xC0000000 | 0x103 - ERROR_NO_ASSOCIATED_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x200 - ERROR_CLASS_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x201 - ERROR_DUPLICATE_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x202 - ERROR_NO_DRIVER_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x203 - ERROR_KEY_DOES_NOT_EXIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x204 - ERROR_INVALID_DEVINST_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x205 - ERROR_INVALID_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x206 - ERROR_DEVINST_ALREADY_EXISTS syscall.Errno = 0x20000000 | 0xC0000000 | 0x207 - ERROR_DEVINFO_NOT_REGISTERED syscall.Errno = 0x20000000 | 0xC0000000 | 0x208 - ERROR_INVALID_REG_PROPERTY syscall.Errno = 0x20000000 | 0xC0000000 | 0x209 - ERROR_NO_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x20A - ERROR_NO_SUCH_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x20B - ERROR_CANT_LOAD_CLASS_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x20C - ERROR_INVALID_CLASS_INSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x20D - ERROR_DI_DO_DEFAULT syscall.Errno = 0x20000000 | 0xC0000000 | 0x20E - ERROR_DI_NOFILECOPY syscall.Errno = 0x20000000 | 0xC0000000 | 0x20F - ERROR_INVALID_HWPROFILE syscall.Errno = 0x20000000 | 0xC0000000 | 0x210 - ERROR_NO_DEVICE_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x211 - ERROR_DEVINFO_LIST_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x212 - ERROR_DEVINFO_DATA_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x213 - ERROR_DI_BAD_PATH syscall.Errno = 0x20000000 | 0xC0000000 | 0x214 - ERROR_NO_CLASSINSTALL_PARAMS syscall.Errno = 0x20000000 | 0xC0000000 | 0x215 - ERROR_FILEQUEUE_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x216 - ERROR_BAD_SERVICE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x217 - ERROR_NO_CLASS_DRIVER_LIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x218 - ERROR_NO_ASSOCIATED_SERVICE syscall.Errno = 0x20000000 | 0xC0000000 | 0x219 - ERROR_NO_DEFAULT_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21A - ERROR_DEVICE_INTERFACE_ACTIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21B - ERROR_DEVICE_INTERFACE_REMOVED syscall.Errno = 0x20000000 | 0xC0000000 | 0x21C - ERROR_BAD_INTERFACE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x21D - ERROR_NO_SUCH_INTERFACE_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x21E - ERROR_INVALID_REFERENCE_STRING syscall.Errno = 0x20000000 | 0xC0000000 | 0x21F - ERROR_INVALID_MACHINENAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x220 - ERROR_REMOTE_COMM_FAILURE syscall.Errno = 0x20000000 | 0xC0000000 | 0x221 - ERROR_MACHINE_UNAVAILABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x222 - ERROR_NO_CONFIGMGR_SERVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x223 - ERROR_INVALID_PROPPAGE_PROVIDER syscall.Errno = 0x20000000 | 0xC0000000 | 0x224 - ERROR_NO_SUCH_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x225 - ERROR_DI_POSTPROCESSING_REQUIRED syscall.Errno = 0x20000000 | 0xC0000000 | 0x226 - ERROR_INVALID_COINSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x227 - ERROR_NO_COMPAT_DRIVERS syscall.Errno = 0x20000000 | 0xC0000000 | 0x228 - ERROR_NO_DEVICE_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x229 - ERROR_INVALID_INF_LOGCONFIG syscall.Errno = 0x20000000 | 0xC0000000 | 0x22A - ERROR_DI_DONT_INSTALL syscall.Errno = 0x20000000 | 0xC0000000 | 0x22B - ERROR_INVALID_FILTER_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22C - ERROR_NON_WINDOWS_NT_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22D - ERROR_NON_WINDOWS_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22E - ERROR_NO_CATALOG_FOR_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x22F - ERROR_DEVINSTALL_QUEUE_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x230 - ERROR_NOT_DISABLEABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x231 - ERROR_CANT_REMOVE_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x232 - ERROR_INVALID_TARGET syscall.Errno = 0x20000000 | 0xC0000000 | 0x233 - ERROR_DRIVER_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x234 - ERROR_IN_WOW64 syscall.Errno = 0x20000000 | 0xC0000000 | 0x235 - ERROR_SET_SYSTEM_RESTORE_POINT syscall.Errno = 0x20000000 | 0xC0000000 | 0x236 - ERROR_SCE_DISABLED syscall.Errno = 0x20000000 | 0xC0000000 | 0x238 - ERROR_UNKNOWN_EXCEPTION syscall.Errno = 0x20000000 | 0xC0000000 | 0x239 - ERROR_PNP_REGISTRY_ERROR syscall.Errno = 0x20000000 | 0xC0000000 | 0x23A - ERROR_REMOTE_REQUEST_UNSUPPORTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x23B - ERROR_NOT_AN_INSTALLED_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x23C - ERROR_INF_IN_USE_BY_DEVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x23D - ERROR_DI_FUNCTION_OBSOLETE syscall.Errno = 0x20000000 | 0xC0000000 | 0x23E - ERROR_NO_AUTHENTICODE_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x23F - ERROR_AUTHENTICODE_DISALLOWED syscall.Errno = 0x20000000 | 0xC0000000 | 0x240 - ERROR_AUTHENTICODE_TRUSTED_PUBLISHER syscall.Errno = 0x20000000 | 0xC0000000 | 0x241 - ERROR_AUTHENTICODE_TRUST_NOT_ESTABLISHED syscall.Errno = 0x20000000 | 0xC0000000 | 0x242 - ERROR_AUTHENTICODE_PUBLISHER_NOT_TRUSTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x243 - ERROR_SIGNATURE_OSATTRIBUTE_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x244 - ERROR_ONLY_VALIDATE_VIA_AUTHENTICODE syscall.Errno = 0x20000000 | 0xC0000000 | 0x245 - ERROR_DEVICE_INSTALLER_NOT_READY syscall.Errno = 0x20000000 | 0xC0000000 | 0x246 - ERROR_DRIVER_STORE_ADD_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x247 - ERROR_DEVICE_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x248 - ERROR_DRIVER_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x249 - ERROR_WRONG_INF_TYPE syscall.Errno = 0x20000000 | 0xC0000000 | 0x24A - ERROR_FILE_HASH_NOT_IN_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x24B - ERROR_DRIVER_STORE_DELETE_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x24C - ERROR_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = 0x20000000 | 0xC0000000 | 0x300 - EXCEPTION_SPAPI_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = ERROR_UNRECOVERABLE_STACK_OVERFLOW - ERROR_NO_DEFAULT_INTERFACE_DEVICE syscall.Errno = ERROR_NO_DEFAULT_DEVICE_INTERFACE - ERROR_INTERFACE_DEVICE_ACTIVE syscall.Errno = ERROR_DEVICE_INTERFACE_ACTIVE - ERROR_INTERFACE_DEVICE_REMOVED syscall.Errno = ERROR_DEVICE_INTERFACE_REMOVED - ERROR_NO_SUCH_INTERFACE_DEVICE syscall.Errno = ERROR_NO_SUCH_DEVICE_INTERFACE -) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 53ee74e08b..200b62a003 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -248,6 +248,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW //sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW //sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW +//sys ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW //sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock //sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock //sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 @@ -322,6 +323,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot +//sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW +//sys Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32NextW //sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW //sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW //sys Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) @@ -893,9 +896,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, int32, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil } @@ -915,9 +916,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } + sa.raw.Addr = sa.Addr return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil } @@ -990,9 +989,7 @@ func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil case AF_INET6: @@ -1001,9 +998,7 @@ func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } + sa.Addr = pp.Addr return sa, nil } return nil, syscall.EAFNOSUPPORT diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 286dd1eab9..bb31abda41 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -156,6 +156,8 @@ const ( MAX_PATH = 260 MAX_LONG_PATH = 32768 + MAX_MODULE_NAME32 = 255 + MAX_COMPUTERNAME_LENGTH = 15 TIME_ZONE_ID_UNKNOWN = 0 @@ -936,8 +938,8 @@ type StartupInfoEx struct { type ProcThreadAttributeList struct{} type ProcThreadAttributeListContainer struct { - data *ProcThreadAttributeList - heapAllocations []uintptr + data *ProcThreadAttributeList + pointers []unsafe.Pointer } type ProcessInformation struct { @@ -970,6 +972,21 @@ type ThreadEntry32 struct { Flags uint32 } +type ModuleEntry32 struct { + Size uint32 + ModuleID uint32 + ProcessID uint32 + GlblcntUsage uint32 + ProccntUsage uint32 + ModBaseAddr uintptr + ModBaseSize uint32 + ModuleHandle Handle + Module [MAX_MODULE_NAME32 + 1]uint16 + ExePath [MAX_PATH]uint16 +} + +const SizeofModuleEntry32 = unsafe.Sizeof(ModuleEntry32{}) + type Systemtime struct { Year uint16 Month uint16 @@ -2732,6 +2749,43 @@ type PROCESS_BASIC_INFORMATION struct { InheritedFromUniqueProcessId uintptr } +type SYSTEM_PROCESS_INFORMATION struct { + NextEntryOffset uint32 + NumberOfThreads uint32 + WorkingSetPrivateSize int64 + HardFaultCount uint32 + NumberOfThreadsHighWatermark uint32 + CycleTime uint64 + CreateTime int64 + UserTime int64 + KernelTime int64 + ImageName NTUnicodeString + BasePriority int32 + UniqueProcessID uintptr + InheritedFromUniqueProcessID uintptr + HandleCount uint32 + SessionID uint32 + UniqueProcessKey *uint32 + PeakVirtualSize uintptr + VirtualSize uintptr + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr + PrivatePageCount uintptr + ReadOperationCount int64 + WriteOperationCount int64 + OtherOperationCount int64 + ReadTransferCount int64 + WriteTransferCount int64 + OtherTransferCount int64 +} + // SystemInformationClasses for NtQuerySystemInformation and NtSetSystemInformation const ( SystemBasicInformation = iota diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 91817d6dcb..1055d47ed3 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -36,6 +36,7 @@ func errnoErr(e syscall.Errno) error { } var ( + modCfgMgr32 = NewLazySystemDLL("CfgMgr32.dll") modadvapi32 = NewLazySystemDLL("advapi32.dll") modcrypt32 = NewLazySystemDLL("crypt32.dll") moddnsapi = NewLazySystemDLL("dnsapi.dll") @@ -48,6 +49,7 @@ var ( modpsapi = NewLazySystemDLL("psapi.dll") modsechost = NewLazySystemDLL("sechost.dll") modsecur32 = NewLazySystemDLL("secur32.dll") + modsetupapi = NewLazySystemDLL("setupapi.dll") modshell32 = NewLazySystemDLL("shell32.dll") moduser32 = NewLazySystemDLL("user32.dll") moduserenv = NewLazySystemDLL("userenv.dll") @@ -56,6 +58,10 @@ var ( modws2_32 = NewLazySystemDLL("ws2_32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") + procCM_Get_DevNode_Status = modCfgMgr32.NewProc("CM_Get_DevNode_Status") + procCM_Get_Device_Interface_ListW = modCfgMgr32.NewProc("CM_Get_Device_Interface_ListW") + procCM_Get_Device_Interface_List_SizeW = modCfgMgr32.NewProc("CM_Get_Device_Interface_List_SizeW") + procCM_MapCrToWin32Err = modCfgMgr32.NewProc("CM_MapCrToWin32Err") procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") @@ -199,6 +205,7 @@ var ( procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") procExitProcess = modkernel32.NewProc("ExitProcess") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") procFindCloseChangeNotification = modkernel32.NewProc("FindCloseChangeNotification") procFindFirstChangeNotificationW = modkernel32.NewProc("FindFirstChangeNotificationW") @@ -288,6 +295,8 @@ var ( procLockFileEx = modkernel32.NewProc("LockFileEx") procLockResource = modkernel32.NewProc("LockResource") procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") + procModule32FirstW = modkernel32.NewProc("Module32FirstW") + procModule32NextW = modkernel32.NewProc("Module32NextW") procMoveFileExW = modkernel32.NewProc("MoveFileExW") procMoveFileW = modkernel32.NewProc("MoveFileW") procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") @@ -400,6 +409,34 @@ var ( procUnsubscribeServiceChangeNotifications = modsechost.NewProc("UnsubscribeServiceChangeNotifications") procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") procTranslateNameW = modsecur32.NewProc("TranslateNameW") + procSetupDiBuildDriverInfoList = modsetupapi.NewProc("SetupDiBuildDriverInfoList") + procSetupDiCallClassInstaller = modsetupapi.NewProc("SetupDiCallClassInstaller") + procSetupDiCancelDriverInfoSearch = modsetupapi.NewProc("SetupDiCancelDriverInfoSearch") + procSetupDiClassGuidsFromNameExW = modsetupapi.NewProc("SetupDiClassGuidsFromNameExW") + procSetupDiClassNameFromGuidExW = modsetupapi.NewProc("SetupDiClassNameFromGuidExW") + procSetupDiCreateDeviceInfoListExW = modsetupapi.NewProc("SetupDiCreateDeviceInfoListExW") + procSetupDiCreateDeviceInfoW = modsetupapi.NewProc("SetupDiCreateDeviceInfoW") + procSetupDiDestroyDeviceInfoList = modsetupapi.NewProc("SetupDiDestroyDeviceInfoList") + procSetupDiDestroyDriverInfoList = modsetupapi.NewProc("SetupDiDestroyDriverInfoList") + procSetupDiEnumDeviceInfo = modsetupapi.NewProc("SetupDiEnumDeviceInfo") + procSetupDiEnumDriverInfoW = modsetupapi.NewProc("SetupDiEnumDriverInfoW") + procSetupDiGetClassDevsExW = modsetupapi.NewProc("SetupDiGetClassDevsExW") + procSetupDiGetClassInstallParamsW = modsetupapi.NewProc("SetupDiGetClassInstallParamsW") + procSetupDiGetDeviceInfoListDetailW = modsetupapi.NewProc("SetupDiGetDeviceInfoListDetailW") + procSetupDiGetDeviceInstallParamsW = modsetupapi.NewProc("SetupDiGetDeviceInstallParamsW") + procSetupDiGetDeviceInstanceIdW = modsetupapi.NewProc("SetupDiGetDeviceInstanceIdW") + procSetupDiGetDevicePropertyW = modsetupapi.NewProc("SetupDiGetDevicePropertyW") + procSetupDiGetDeviceRegistryPropertyW = modsetupapi.NewProc("SetupDiGetDeviceRegistryPropertyW") + procSetupDiGetDriverInfoDetailW = modsetupapi.NewProc("SetupDiGetDriverInfoDetailW") + procSetupDiGetSelectedDevice = modsetupapi.NewProc("SetupDiGetSelectedDevice") + procSetupDiGetSelectedDriverW = modsetupapi.NewProc("SetupDiGetSelectedDriverW") + procSetupDiOpenDevRegKey = modsetupapi.NewProc("SetupDiOpenDevRegKey") + procSetupDiSetClassInstallParamsW = modsetupapi.NewProc("SetupDiSetClassInstallParamsW") + procSetupDiSetDeviceInstallParamsW = modsetupapi.NewProc("SetupDiSetDeviceInstallParamsW") + procSetupDiSetDeviceRegistryPropertyW = modsetupapi.NewProc("SetupDiSetDeviceRegistryPropertyW") + procSetupDiSetSelectedDevice = modsetupapi.NewProc("SetupDiSetSelectedDevice") + procSetupDiSetSelectedDriverW = modsetupapi.NewProc("SetupDiSetSelectedDriverW") + procSetupUninstallOEMInfW = modsetupapi.NewProc("SetupUninstallOEMInfW") procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") procShellExecuteW = modshell32.NewProc("ShellExecuteW") @@ -447,6 +484,30 @@ var ( procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") ) +func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) { + r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0) + ret = CONFIGRET(r0) + return +} + +func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) { + r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0) + ret = CONFIGRET(r0) + return +} + +func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) { + r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0) + ret = CONFIGRET(r0) + return +} + +func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) { + r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0) + ret = Errno(r0) + return +} + func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { var _p0 uint32 if resetToDefault { @@ -1716,6 +1777,15 @@ func ExitProcess(exitcode uint32) { return } +func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + func FindClose(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -2499,6 +2569,22 @@ func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow ui return } +func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) if r1 == 0 { @@ -3432,6 +3518,233 @@ func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint return } +func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { + r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + handle = DevInfo(r0) + if handle == DevInfo(InvalidHandle) { + err = errnoErr(e1) + } + return +} + +func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { + r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + handle = DevInfo(r0) + if handle == DevInfo(InvalidHandle) { + err = errnoErr(e1) + } + return +} + +func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) { + r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) + key = Handle(r0) + if key == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) diff --git a/vendor/golang.org/x/term/codereview.cfg b/vendor/golang.org/x/term/codereview.cfg new file mode 100644 index 0000000000..3f8b14b64e --- /dev/null +++ b/vendor/golang.org/x/term/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/term/term.go b/vendor/golang.org/x/term/term.go index 1f6a38fad2..d592708808 100644 --- a/vendor/golang.org/x/term/term.go +++ b/vendor/golang.org/x/term/term.go @@ -12,6 +12,8 @@ // panic(err) // } // defer term.Restore(int(os.Stdin.Fd()), oldState) +// +// Note that on non-Unix systems os.Stdin.Fd() may not be 0. package term // State contains the state of a terminal. diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index c49c5e0c52..22bb7699e9 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -449,7 +449,7 @@ type ExternalMetricStatus struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.22 -// +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v2beta2,HorizontalPodAutoscaler +// +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v2,HorizontalPodAutoscaler // HorizontalPodAutoscaler is the configuration for a horizontal pod // autoscaler, which automatically manages the replica count of any resource diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.prerelease-lifecycle.go index 67d38d0517..3437454ee3 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.prerelease-lifecycle.go @@ -40,7 +40,7 @@ func (in *HorizontalPodAutoscaler) APILifecycleDeprecated() (major, minor int) { // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *HorizontalPodAutoscaler) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "autoscaling", Version: "v2beta2", Kind: "HorizontalPodAutoscaler"} + return schema.GroupVersionKind{Group: "autoscaling", Version: "v2", Kind: "HorizontalPodAutoscaler"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 7619f1e009..10bdec5b94 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -28,6 +28,7 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.12 // +k8s:prerelease-lifecycle-gen:deprecated=1.23 +// +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v2,HorizontalPodAutoscaler // HorizontalPodAutoscaler is the configuration for a horizontal pod // autoscaler, which automatically manages the replica count of any resource diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go index b8e49bc51b..6d1c2504ab 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go @@ -21,6 +21,10 @@ limitations under the License. package v2beta2 +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *HorizontalPodAutoscaler) APILifecycleIntroduced() (major, minor int) { @@ -33,6 +37,12 @@ func (in *HorizontalPodAutoscaler) APILifecycleDeprecated() (major, minor int) { return 1, 23 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *HorizontalPodAutoscaler) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "autoscaling", Version: "v2", Kind: "HorizontalPodAutoscaler"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *HorizontalPodAutoscaler) APILifecycleRemoved() (major, minor int) { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go index 99b609494b..5af677e2f5 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go @@ -63,7 +63,7 @@ const ( // +k8s:prerelease-lifecycle-gen:introduced=1.18 // +k8s:prerelease-lifecycle-gen:deprecated=1.20 // +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,FlowSchema +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -87,7 +87,7 @@ type FlowSchema struct { // +k8s:prerelease-lifecycle-gen:introduced=1.18 // +k8s:prerelease-lifecycle-gen:deprecated=1.20 // +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,FlowSchemaList +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -344,7 +344,7 @@ type FlowSchemaConditionType string // +k8s:prerelease-lifecycle-gen:introduced=1.18 // +k8s:prerelease-lifecycle-gen:deprecated=1.20 // +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,PriorityLevelConfiguration +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -367,7 +367,7 @@ type PriorityLevelConfiguration struct { // +k8s:prerelease-lifecycle-gen:introduced=1.18 // +k8s:prerelease-lifecycle-gen:deprecated=1.20 // +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta1,PriorityLevelConfigurationList +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go index a248aec452..2260141609 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -40,7 +40,7 @@ func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "FlowSchema"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchema"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. @@ -64,7 +64,7 @@ func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "FlowSchemaList"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchemaList"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. @@ -88,7 +88,7 @@ func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "PriorityLevelConfiguration"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. @@ -112,7 +112,7 @@ func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "PriorityLevelConfigurationList"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfigurationList"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go index 767a32e7f5..b457326429 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go @@ -105,6 +105,7 @@ const ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -126,6 +127,7 @@ type FlowSchema struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -380,6 +382,7 @@ type FlowSchemaConditionType string // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -400,6 +403,7 @@ type PriorityLevelConfiguration struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go index f02fcdccdc..ed1e16c26a 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go @@ -21,6 +21,10 @@ limitations under the License. package v1beta1 +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { @@ -33,6 +37,12 @@ func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { return 1, 23 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchema"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { @@ -51,6 +61,12 @@ func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { return 1, 23 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchemaList"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { @@ -69,6 +85,12 @@ func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int return 1, 23 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { @@ -87,6 +109,12 @@ func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor return 1, 23 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfigurationList"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index 133ba0493c..8a2824b51b 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -144,7 +144,6 @@ message PodDisruptionBudgetSpec { // A null selector selects no pods. // An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. // In policy/v1, an empty selector will select all pods in the namespace. - // +patchStrategy=replace // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index 553cb316dc..486f93461a 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -36,9 +36,8 @@ type PodDisruptionBudgetSpec struct { // A null selector selects no pods. // An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. // In policy/v1, an empty selector will select all pods in the namespace. - // +patchStrategy=replace // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" patchStrategy:"replace" protobuf:"bytes,2,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS index ad5063fdf1..8cccebf2e9 100644 --- a/vendor/k8s.io/klog/v2/OWNERS +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -1,19 +1,13 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: - - jayunit100 - - hoegaarden - - andyxning - - neolit123 - pohly - - yagonobre - - vincepri - - detiber approvers: - dims - thockin - - justinsb - - tallclair - - piosz + - serathius +emeritus_approvers: - brancz + - justinsb - lavalamp - - serathius + - piosz + - tallclair diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md index 64d29622e8..7de2212cca 100644 --- a/vendor/k8s.io/klog/v2/README.md +++ b/vendor/k8s.io/klog/v2/README.md @@ -23,6 +23,21 @@ Historical context is available here: * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ +## Release versioning + +Semantic versioning is used in this repository. It contains several Go modules +with different levels of stability: +- `k8s.io/klog/v2` - stable API, `vX.Y.Z` tags +- `k8s.io/hack/tools` - no stable API yet (may change eventually or get moved to separate repo), `hack/tools/v0.Y.Z` tags +- `examples` - no stable API, no tags, no intention to ever stabilize + +Exempt from the API stability guarantee are items (packages, functions, etc.) +which are marked explicitly as `EXPERIMENTAL` in their docs comment. Those +may still change in incompatible ways or get removed entirely. This can only +be used for code that is used in tests to avoid situations where non-test +code from two different Kubernetes dependencies depends on incompatible +releases of klog because an experimental API was changed. + ---- How to use klog @@ -32,6 +47,7 @@ How to use klog - You can now use `log_file` instead of `log_dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) - If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) - For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) +- See our documentation on [pkg.go.dev/k8s.io](https://pkg.go.dev/k8s.io/klog). **NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater. @@ -85,7 +101,7 @@ The comment from glog.go introduces the ideas: glog.Fatalf("Initialization failed: %s", err) - See the documentation for the V function for an explanation + See the documentation of the V function for an explanation of these examples: if glog.V(2) { diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go new file mode 100644 index 0000000000..0bf19280e5 --- /dev/null +++ b/vendor/k8s.io/klog/v2/contextual.go @@ -0,0 +1,197 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "context" + + "github.com/go-logr/logr" +) + +// This file provides the implementation of +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/1602-structured-logging +// +// SetLogger and ClearLogger were originally added to klog.go and got moved +// here. Contextual logging adds a way to retrieve a Logger for direct logging +// without the logging calls in klog.go. +// +// The global variables are expected to be modified only during sequential +// parts of a program (init, serial tests) and therefore are not protected by +// mutex locking. + +var ( + // contextualLoggingEnabled controls whether contextual logging is + // active. Disabling it may have some small performance benefit. + contextualLoggingEnabled = true + + // globalLogger is the global Logger chosen by users of klog, nil if + // none is available. + globalLogger *Logger + + // globalLoggerOptions contains the options that were supplied for + // globalLogger. + globalLoggerOptions loggerOptions + + // klogLogger is used as fallback for logging through the normal klog code + // when no Logger is set. + klogLogger logr.Logger = logr.New(&klogger{}) +) + +// SetLogger sets a Logger implementation that will be used as backing +// implementation of the traditional klog log calls. klog will do its own +// verbosity checks before calling logger.V().Info. logger.Error is always +// called, regardless of the klog verbosity settings. +// +// If set, all log lines will be suppressed from the regular output, and +// redirected to the logr implementation. +// Use as: +// ... +// klog.SetLogger(zapr.NewLogger(zapLog)) +// +// To remove a backing logr implemention, use ClearLogger. Setting an +// empty logger with SetLogger(logr.Logger{}) does not work. +// +// Modifying the logger is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. +func SetLogger(logger logr.Logger) { + SetLoggerWithOptions(logger) +} + +// SetLoggerWithOptions is a more flexible version of SetLogger. Without +// additional options, it behaves exactly like SetLogger. By passing +// ContextualLogger(true) as option, it can be used to set a logger that then +// will also get called directly by applications which retrieve it via +// FromContext, Background, or TODO. +// +// Supporting direct calls is recommended because it avoids the overhead of +// routing log entries through klogr into klog and then into the actual Logger +// backend. +func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) { + globalLogger = &logger + globalLoggerOptions = loggerOptions{} + for _, opt := range opts { + opt(&globalLoggerOptions) + } +} + +// ContextualLogger determines whether the logger passed to +// SetLoggerWithOptions may also get called directly. Such a logger cannot rely +// on verbosity checking in klog. +func ContextualLogger(enabled bool) LoggerOption { + return func(o *loggerOptions) { + o.contextualLogger = enabled + } +} + +// FlushLogger provides a callback for flushing data buffered by the logger. +func FlushLogger(flush func()) LoggerOption { + return func(o *loggerOptions) { + o.flush = flush + } +} + +// LoggerOption implements the functional parameter paradigm for +// SetLoggerWithOptions. +type LoggerOption func(o *loggerOptions) + +type loggerOptions struct { + contextualLogger bool + flush func() +} + +// ClearLogger removes a backing Logger implementation if one was set earlier +// with SetLogger. +// +// Modifying the logger is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. +func ClearLogger() { + globalLogger = nil + globalLoggerOptions = loggerOptions{} +} + +// EnableContextualLogging controls whether contextual logging is enabled. +// By default it is enabled. When disabled, FromContext avoids looking up +// the logger in the context and always returns the global logger. +// LoggerWithValues, LoggerWithName, and NewContext become no-ops +// and return their input logger respectively context. This may be useful +// to avoid the additional overhead for contextual logging. +// +// This must be called during initialization before goroutines are started. +func EnableContextualLogging(enabled bool) { + contextualLoggingEnabled = enabled +} + +// FromContext retrieves a logger set by the caller or, if not set, +// falls back to the program's global logger (a Logger instance or klog +// itself). +func FromContext(ctx context.Context) Logger { + if contextualLoggingEnabled { + if logger, err := logr.FromContext(ctx); err == nil { + return logger + } + } + + return Background() +} + +// TODO can be used as a last resort by code that has no means of +// receiving a logger from its caller. FromContext or an explicit logger +// parameter should be used instead. +func TODO() Logger { + return Background() +} + +// Background retrieves the fallback logger. It should not be called before +// that logger was initialized by the program and not by code that should +// better receive a logger via its parameters. TODO can be used as a temporary +// solution for such code. +func Background() Logger { + if globalLoggerOptions.contextualLogger { + // Is non-nil because globalLoggerOptions.contextualLogger is + // only true if a logger was set. + return *globalLogger + } + + return klogLogger +} + +// LoggerWithValues returns logger.WithValues(...kv) when +// contextual logging is enabled, otherwise the logger. +func LoggerWithValues(logger Logger, kv ...interface{}) Logger { + if contextualLoggingEnabled { + return logger.WithValues(kv...) + } + return logger +} + +// LoggerWithName returns logger.WithName(name) when contextual logging is +// enabled, otherwise the logger. +func LoggerWithName(logger Logger, name string) Logger { + if contextualLoggingEnabled { + return logger.WithName(name) + } + return logger +} + +// NewContext returns logr.NewContext(ctx, logger) when +// contextual logging is enabled, otherwise ctx. +func NewContext(ctx context.Context, logger Logger) context.Context { + if contextualLoggingEnabled { + return logr.NewContext(ctx, logger) + } + return ctx +} diff --git a/vendor/k8s.io/klog/v2/exit.go b/vendor/k8s.io/klog/v2/exit.go new file mode 100644 index 0000000000..320a147728 --- /dev/null +++ b/vendor/k8s.io/klog/v2/exit.go @@ -0,0 +1,69 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package klog + +import ( + "fmt" + "os" + "time" +) + +var ( + + // ExitFlushTimeout is the timeout that klog has traditionally used during + // calls like Fatal or Exit when flushing log data right before exiting. + // Applications that replace those calls and do not have some specific + // requirements like "exit immediately" can use this value as parameter + // for FlushAndExit. + // + // Can be set for testing purpose or to change the application's + // default. + ExitFlushTimeout = 10 * time.Second + + // OsExit is the function called by FlushAndExit to terminate the program. + // + // Can be set for testing purpose or to change the application's + // default behavior. Note that the function should not simply return + // because callers of functions like Fatal will not expect that. + OsExit = os.Exit +) + +// FlushAndExit flushes log data for a certain amount of time and then calls +// os.Exit. Combined with some logging call it provides a replacement for +// traditional calls like Fatal or Exit. +func FlushAndExit(flushTimeout time.Duration, exitCode int) { + timeoutFlush(flushTimeout) + OsExit(exitCode) +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when klog.Fatal is called from a hook that holds +// a lock. Flushing also might take too long. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) + } +} diff --git a/vendor/k8s.io/klog/v2/imports.go b/vendor/k8s.io/klog/v2/imports.go new file mode 100644 index 0000000000..602c3ed9e6 --- /dev/null +++ b/vendor/k8s.io/klog/v2/imports.go @@ -0,0 +1,38 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "github.com/go-logr/logr" +) + +// The reason for providing these aliases is to allow code to work with logr +// without directly importing it. + +// Logger in this package is exactly the same as logr.Logger. +type Logger = logr.Logger + +// LogSink in this package is exactly the same as logr.LogSink. +type LogSink = logr.LogSink + +// Runtimeinfo in this package is exactly the same as logr.RuntimeInfo. +type RuntimeInfo = logr.RuntimeInfo + +var ( + // New is an alias for logr.New. + New = logr.New +) diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go new file mode 100644 index 0000000000..ac88682a2c --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -0,0 +1,159 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package buffer provides a cache for byte.Buffer instances that can be reused +// to avoid frequent allocation and deallocation. It also has utility code +// for log header formatting that use these buffers. +package buffer + +import ( + "bytes" + "os" + "sync" + "time" + + "k8s.io/klog/v2/internal/severity" +) + +var ( + // Pid is inserted into log headers. Can be overridden for tests. + Pid = os.Getpid() +) + +// Buffer holds a single byte.Buffer for reuse. The zero value is ready for +// use. It also provides some helper methods for output formatting. +type Buffer struct { + bytes.Buffer + Tmp [64]byte // temporary byte array for creating headers. + next *Buffer +} + +// Buffers manages the reuse of individual buffer instances. It is thread-safe. +type Buffers struct { + // mu protects the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + mu sync.Mutex + + // freeList is a list of byte buffers, maintained under mu. + freeList *Buffer +} + +// GetBuffer returns a new, ready-to-use buffer. +func (bl *Buffers) GetBuffer() *Buffer { + bl.mu.Lock() + b := bl.freeList + if b != nil { + bl.freeList = b.next + } + bl.mu.Unlock() + if b == nil { + b = new(Buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// PutBuffer returns a buffer to the free list. +func (bl *Buffers) PutBuffer(b *Buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + bl.mu.Lock() + b.next = bl.freeList + bl.freeList = b + bl.mu.Unlock() +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.Tmp[i]. +func (buf *Buffer) twoDigits(i, d int) { + buf.Tmp[i+1] = digits[d%10] + d /= 10 + buf.Tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.Tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *Buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.Tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.Tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.Tmp[i]. +func (buf *Buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.Tmp) + for { + j-- + buf.Tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.Tmp[i:], buf.Tmp[j:]) +} + +// FormatHeader formats a log header using the provided file name and line number. +func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now time.Time) { + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > severity.FatalLog { + s = severity.InfoLog // for safety. + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.Tmp[0] = severity.Char[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.Tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.Tmp[8] = ':' + buf.twoDigits(9, minute) + buf.Tmp[11] = ':' + buf.twoDigits(12, second) + buf.Tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.Tmp[21] = ' ' + buf.nDigits(7, 22, Pid, ' ') // TODO: should be TID + buf.Tmp[29] = ' ' + buf.Write(buf.Tmp[:30]) + buf.WriteString(file) + buf.Tmp[0] = ':' + n := buf.someDigits(1, line) + buf.Tmp[n+1] = ']' + buf.Tmp[n+2] = ' ' + buf.Write(buf.Tmp[:n+3]) +} diff --git a/vendor/k8s.io/klog/v2/internal/clock/README.md b/vendor/k8s.io/klog/v2/internal/clock/README.md new file mode 100644 index 0000000000..03d692c8f8 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/clock/README.md @@ -0,0 +1,7 @@ +# Clock + +This package provides an interface for time-based operations. It allows +mocking time for testing. + +This is a copy of k8s.io/utils/clock. We have to copy it to avoid a circular +dependency (k8s.io/klog -> k8s.io/utils -> k8s.io/klog). diff --git a/vendor/k8s.io/klog/v2/internal/clock/clock.go b/vendor/k8s.io/klog/v2/internal/clock/clock.go new file mode 100644 index 0000000000..b8b6af5c81 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/clock/clock.go @@ -0,0 +1,178 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import "time" + +// PassiveClock allows for injecting fake or real clocks into code +// that needs to read the current time but does not support scheduling +// activity in the future. +type PassiveClock interface { + Now() time.Time + Since(time.Time) time.Duration +} + +// Clock allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type Clock interface { + PassiveClock + // After returns the channel of a new Timer. + // This method does not allow to free/GC the backing timer before it fires. Use + // NewTimer instead. + After(d time.Duration) <-chan time.Time + // NewTimer returns a new Timer. + NewTimer(d time.Duration) Timer + // Sleep sleeps for the provided duration d. + // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. + Sleep(d time.Duration) + // Tick returns the channel of a new Ticker. + // This method does not allow to free/GC the backing ticker. Use + // NewTicker from WithTicker instead. + Tick(d time.Duration) <-chan time.Time +} + +// WithTicker allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type WithTicker interface { + Clock + // NewTicker returns a new Ticker. + NewTicker(time.Duration) Ticker +} + +// WithDelayedExecution allows for injecting fake or real clocks into +// code that needs to make use of AfterFunc functionality. +type WithDelayedExecution interface { + Clock + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// WithTickerAndDelayedExecution allows for injecting fake or real clocks +// into code that needs Ticker and AfterFunc functionality +type WithTickerAndDelayedExecution interface { + WithTicker + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// Ticker defines the Ticker interface. +type Ticker interface { + C() <-chan time.Time + Stop() +} + +var _ = WithTicker(RealClock{}) + +// RealClock really calls time.Now() +type RealClock struct{} + +// Now returns the current time. +func (RealClock) Now() time.Time { + return time.Now() +} + +// Since returns time since the specified timestamp. +func (RealClock) Since(ts time.Time) time.Duration { + return time.Since(ts) +} + +// After is the same as time.After(d). +// This method does not allow to free/GC the backing timer before it fires. Use +// NewTimer instead. +func (RealClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +// NewTimer is the same as time.NewTimer(d) +func (RealClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + timer: time.NewTimer(d), + } +} + +// AfterFunc is the same as time.AfterFunc(d, f). +func (RealClock) AfterFunc(d time.Duration, f func()) Timer { + return &realTimer{ + timer: time.AfterFunc(d, f), + } +} + +// Tick is the same as time.Tick(d) +// This method does not allow to free/GC the backing ticker. Use +// NewTicker instead. +func (RealClock) Tick(d time.Duration) <-chan time.Time { + return time.Tick(d) +} + +// NewTicker returns a new Ticker. +func (RealClock) NewTicker(d time.Duration) Ticker { + return &realTicker{ + ticker: time.NewTicker(d), + } +} + +// Sleep is the same as time.Sleep(d) +// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. +func (RealClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// Timer allows for injecting fake or real timers into code that +// needs to do arbitrary things based on time. +type Timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +var _ = Timer(&realTimer{}) + +// realTimer is backed by an actual time.Timer. +type realTimer struct { + timer *time.Timer +} + +// C returns the underlying timer's channel. +func (r *realTimer) C() <-chan time.Time { + return r.timer.C +} + +// Stop calls Stop() on the underlying timer. +func (r *realTimer) Stop() bool { + return r.timer.Stop() +} + +// Reset calls Reset() on the underlying timer. +func (r *realTimer) Reset(d time.Duration) bool { + return r.timer.Reset(d) +} + +type realTicker struct { + ticker *time.Ticker +} + +func (r *realTicker) C() <-chan time.Time { + return r.ticker.C +} + +func (r *realTicker) Stop() { + r.ticker.Stop() +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go new file mode 100644 index 0000000000..d897313682 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -0,0 +1,225 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + "strconv" +) + +// WithValues implements LogSink.WithValues. The old key/value pairs are +// assumed to be well-formed, the new ones are checked and padded if +// necessary. It returns a new slice. +func WithValues(oldKV, newKV []interface{}) []interface{} { + if len(newKV) == 0 { + return oldKV + } + newLen := len(oldKV) + len(newKV) + hasMissingValue := newLen%2 != 0 + if hasMissingValue { + newLen++ + } + // The new LogSink must have its own slice. + kv := make([]interface{}, 0, newLen) + kv = append(kv, oldKV...) + kv = append(kv, newKV...) + if hasMissingValue { + kv = append(kv, missingValue) + } + return kv +} + +// TrimDuplicates deduplicates elements provided in multiple key/value tuple +// slices, whilst maintaining the distinction between where the items are +// contained. +func TrimDuplicates(kvLists ...[]interface{}) [][]interface{} { + // maintain a map of all seen keys + seenKeys := map[interface{}]struct{}{} + // build the same number of output slices as inputs + outs := make([][]interface{}, len(kvLists)) + // iterate over the input slices backwards, as 'later' kv specifications + // of the same key will take precedence over earlier ones + for i := len(kvLists) - 1; i >= 0; i-- { + // initialise this output slice + outs[i] = []interface{}{} + // obtain a reference to the kvList we are processing + // and make sure it has an even number of entries + kvList := kvLists[i] + if len(kvList)%2 != 0 { + kvList = append(kvList, missingValue) + } + + // start iterating at len(kvList) - 2 (i.e. the 2nd last item) for + // slices that have an even number of elements. + // We add (len(kvList) % 2) here to handle the case where there is an + // odd number of elements in a kvList. + // If there is an odd number, then the last element in the slice will + // have the value 'null'. + for i2 := len(kvList) - 2 + (len(kvList) % 2); i2 >= 0; i2 -= 2 { + k := kvList[i2] + // if we have already seen this key, do not include it again + if _, ok := seenKeys[k]; ok { + continue + } + // make a note that we've observed a new key + seenKeys[k] = struct{}{} + // attempt to obtain the value of the key + var v interface{} + // i2+1 should only ever be out of bounds if we handling the first + // iteration over a slice with an odd number of elements + if i2+1 < len(kvList) { + v = kvList[i2+1] + } + // add this KV tuple to the *start* of the output list to maintain + // the original order as we are iterating over the slice backwards + outs[i] = append([]interface{}{k, v}, outs[i]...) + } + } + return outs +} + +const missingValue = "(MISSING)" + +// KVListFormat serializes all key/value pairs into the provided buffer. +// A space gets inserted before the first pair and between each pair. +func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { + for i := 0; i < len(keysAndValues); i += 2 { + var v interface{} + k := keysAndValues[i] + if i+1 < len(keysAndValues) { + v = keysAndValues[i+1] + } else { + v = missingValue + } + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if k, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(k) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case fmt.Stringer: + writeStringValue(b, true, StringerToString(v)) + case string: + writeStringValue(b, true, v) + case error: + writeStringValue(b, true, ErrorToString(v)) + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + writeStringValue(b, false, fmt.Sprintf("%+v", v)) + } + } +} + +// StringerToString converts a Stringer to a string, +// handling panics if they occur. +func StringerToString(s fmt.Stringer) (ret string) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = s.String() + return +} + +// ErrorToString converts an error to a string, +// handling panics if they occur. +func ErrorToString(err error) (ret string) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = err.Error() + return +} + +func writeStringValue(b *bytes.Buffer, quote bool, v string) { + data := []byte(v) + index := bytes.IndexByte(data, '\n') + if index == -1 { + b.WriteByte('=') + if quote { + // Simple string, quote quotation marks and non-printable characters. + b.WriteString(strconv.Quote(v)) + return + } + // Non-string with no line breaks. + b.WriteString(v) + return + } + + // Complex multi-line string, show as-is with indention like this: + // I... "hello world" key=< + // line 1 + // line 2 + // > + // + // Tabs indent the lines of the value while the end of string delimiter + // is indented with a space. That has two purposes: + // - visual difference between the two for a human reader because indention + // will be different + // - no ambiguity when some value line starts with the end delimiter + // + // One downside is that the output cannot distinguish between strings that + // end with a line break and those that don't because the end delimiter + // will always be on the next line. + b.WriteString("=<\n") + for index != -1 { + b.WriteByte('\t') + b.Write(data[0 : index+1]) + data = data[index+1:] + index = bytes.IndexByte(data, '\n') + } + if len(data) == 0 { + // String ended with line break, don't add another. + b.WriteString(" >") + } else { + // No line break at end of last line, write rest of string and + // add one. + b.WriteByte('\t') + b.Write(data) + b.WriteString("\n >") + } +} diff --git a/vendor/k8s.io/klog/v2/internal/severity/severity.go b/vendor/k8s.io/klog/v2/internal/severity/severity.go new file mode 100644 index 0000000000..30fa1834f0 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/severity/severity.go @@ -0,0 +1,58 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package severity provides definitions for klog severity (info, warning, ...) +package severity + +import ( + "strings" +) + +// severity identifies the sort of log: info, warning etc. The binding to flag.Value +// is handled in klog.go +type Severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + InfoLog Severity = iota + WarningLog + ErrorLog + FatalLog + NumSeverity = 4 +) + +// Char contains one shortcut letter per severity level. +const Char = "IWEF" + +// Name contains one name per severity level. +var Name = []string{ + InfoLog: "INFO", + WarningLog: "WARNING", + ErrorLog: "ERROR", + FatalLog: "FATAL", +} + +// ByName looks up a severity level by name. +func ByName(s string) (Severity, bool) { + s = strings.ToUpper(s) + for i, name := range Name { + if name == s { + return Severity(i), true + } + } + return 0, false +} diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go new file mode 100644 index 0000000000..db58f8baa6 --- /dev/null +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -0,0 +1,94 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "fmt" + "reflect" + + "github.com/go-logr/logr" +) + +// ObjectRef references a kubernetes object +type ObjectRef struct { + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` +} + +func (ref ObjectRef) String() string { + if ref.Namespace != "" { + return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name) + } + return ref.Name +} + +// MarshalLog ensures that loggers with support for structured output will log +// as a struct by removing the String method via a custom type. +func (ref ObjectRef) MarshalLog() interface{} { + type or ObjectRef + return or(ref) +} + +var _ logr.Marshaler = ObjectRef{} + +// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface +// this interface may expand in the future, but will always be a subset of the +// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface +type KMetadata interface { + GetName() string + GetNamespace() string +} + +// KObj returns ObjectRef from ObjectMeta +func KObj(obj KMetadata) ObjectRef { + if obj == nil { + return ObjectRef{} + } + if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() { + return ObjectRef{} + } + + return ObjectRef{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } +} + +// KRef returns ObjectRef from name and namespace +func KRef(namespace, name string) ObjectRef { + return ObjectRef{ + Name: name, + Namespace: namespace, + } +} + +// KObjs returns slice of ObjectRef from an slice of ObjectMeta +func KObjs(arg interface{}) []ObjectRef { + s := reflect.ValueOf(arg) + if s.Kind() != reflect.Slice { + return nil + } + objectRefs := make([]ObjectRef, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + if v, ok := s.Index(i).Interface().(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil + } + } + return objectRefs +} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 45efbb0755..cb04590fe6 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -81,7 +81,6 @@ import ( "math" "os" "path/filepath" - "reflect" "runtime" "strconv" "strings" @@ -90,81 +89,58 @@ import ( "time" "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/clock" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" ) -// severity identifies the sort of log: info, warning etc. It also implements +// severityValue identifies the sort of log: info, warning etc. It also implements // the flag.Value interface. The -stderrthreshold flag is of type severity and // should be modified only through the flag.Value interface. The values match // the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", +type severityValue struct { + severity.Severity } // get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) +func (s *severityValue) get() severity.Severity { + return severity.Severity(atomic.LoadInt32((*int32)(&s.Severity))) } // set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) +func (s *severityValue) set(val severity.Severity) { + atomic.StoreInt32((*int32)(&s.Severity), int32(val)) } // String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) +func (s *severityValue) String() string { + return strconv.FormatInt(int64(s.Severity), 10) } // Get is part of the flag.Getter interface. -func (s *severity) Get() interface{} { - return *s +func (s *severityValue) Get() interface{} { + return s.Severity } // Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity +func (s *severityValue) Set(value string) error { + var threshold severity.Severity // Is it a known name? - if v, ok := severityByName(value); ok { + if v, ok := severity.ByName(value); ok { threshold = v } else { v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } - threshold = severity(v) + threshold = severity.Severity(v) } logging.stderrThreshold.set(threshold) return nil } -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - // OutputStats tracks the number of output lines and bytes written. type OutputStats struct { lines int64 @@ -187,10 +163,10 @@ var Stats struct { Info, Warning, Error OutputStats } -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, +var severityStats = [severity.NumSeverity]*OutputStats{ + severity.InfoLog: &Stats.Info, + severity.WarningLog: &Stats.Warning, + severity.ErrorLog: &Stats.Error, } // Level is exported because it appears in the arguments to V and is @@ -404,9 +380,11 @@ type flushSyncWriter interface { io.Writer } -// init sets up the defaults and runs flushDaemon. +// init sets up the defaults. func init() { - logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. + logging.stderrThreshold = severityValue{ + Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. + } logging.setVState(0, nil, false) logging.logDir = "" logging.logFile = "" @@ -417,7 +395,7 @@ func init() { logging.addDirHeader = false logging.skipLogHeaders = false logging.oneOutput = false - go logging.flushDaemon() + logging.flushD = newFlushDaemon(logging.lockAndFlushAll, nil) } // InitFlags is for explicitly initializing the flags. @@ -457,20 +435,23 @@ type loggingT struct { alsoToStderr bool // The -alsologtostderr flag. // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. + stderrThreshold severityValue // The -stderrthreshold flag. - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex + // bufferCache maintains the free list. It uses its own mutex // so buffers can be grabbed and printed to without holding the main lock, // for better parallelization. - freeListMu sync.Mutex + bufferCache buffer.Buffers // mu protects the remaining elements of this structure and is // used to synchronize logging. mu sync.Mutex // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter + file [severity.NumSeverity]flushSyncWriter + // flushD holds a flushDaemon that frequently flushes log file buffers. + flushD *flushDaemon + // flushInterval is the interval for periodic flushing. If zero, + // the global default will be used. + flushInterval time.Duration // pcs is used in V to avoid an allocation when computing the caller's PC. pcs [1]uintptr // vmap is a cache of the V Level for each V() call site, identified by PC. @@ -508,9 +489,6 @@ type loggingT struct { // If true, add the file directory to the header addDirHeader bool - // If set, all output will be redirected unconditionally to the provided logr.Logger - logr *logr.Logger - // If true, messages will not be propagated to lower severity log levels oneOutput bool @@ -518,13 +496,6 @@ type loggingT struct { filter LogFilter } -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - var logging loggingT // setVState sets a consistent state for V logging. @@ -547,35 +518,6 @@ func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool l.verbosity.set(verbosity) } -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - var timeNow = time.Now // Stubbed out for testing. /* @@ -595,7 +537,7 @@ where the fields are defined as follows: line The line number msg The user-supplied message */ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { +func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, string, int) { _, file, line, ok := runtime.Caller(3 + depth) if !ok { file = "???" @@ -615,133 +557,68 @@ func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { } // formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() +func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer { + buf := l.bufferCache.GetBuffer() if l.skipHeaders { return buf } - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) + now := timeNow() + buf.FormatHeader(s, file, line, now) return buf } -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) +func (l *loggingT) println(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { + l.printlnDepth(s, logger, filter, 1, args...) } -func (l *loggingT) println(s severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { - buf, file, line := l.header(s, 0) +func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) // if logger is set, we clear the generated header as we rely on the backing // logger implementation to print headers if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { args = filter.Filter(args) } fmt.Fprintln(buf, args...) - l.output(s, logger, buf, 0 /* depth */, file, line, false) + l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) print(s severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { +func (l *loggingT) print(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { l.printDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printDepth(s severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { args = filter.Filter(args) } fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { + if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) printf(s severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) +func (l *loggingT) printf(s severity.Severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) { + l.printfDepth(s, logger, filter, 1, format, args...) +} + +func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, format string, args ...interface{}) { + buf, file, line := l.header(s, depth) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { format, args = filter.FilterF(format, args) @@ -750,19 +627,19 @@ func (l *loggingT) printf(s severity, logger *logr.Logger, filter LogFilter, for if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logger, buf, 0 /* depth */, file, line, false) + l.output(s, logger, buf, depth, file, line, false) } // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { +func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -783,7 +660,7 @@ func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, dept logger.WithCallDepth(depth+2).Error(err, msg, keysAndValues...) return } - l.printS(err, errorLog, depth+1, msg, keysAndValues...) + l.printS(err, severity.ErrorLog, depth+1, msg, keysAndValues...) } // if loggr is specified, will call loggr.Info, otherwise output with logging module. @@ -795,48 +672,25 @@ func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg s logger.WithCallDepth(depth+2).Info(msg, keysAndValues...) return } - l.printS(nil, infoLog, depth+1, msg, keysAndValues...) + l.printS(nil, severity.InfoLog, depth+1, msg, keysAndValues...) } // printS is called from infoS and errorS if loggr is not specified. // set log severity by s -func (l *loggingT) printS(err error, s severity, depth int, msg string, keysAndValues ...interface{}) { - b := &bytes.Buffer{} - b.WriteString(fmt.Sprintf("%q", msg)) +func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { + // Only create a new buffer if we don't have one cached. + b := l.bufferCache.GetBuffer() + // The message is always quoted, even if it contains line breaks. + // If developers want multi-line output, they should use a small, fixed + // message and put the multi-line output into a value. + b.WriteString(strconv.Quote(msg)) if err != nil { - b.WriteByte(' ') - b.WriteString(fmt.Sprintf("err=%q", err.Error())) - } - kvListFormat(b, keysAndValues...) - l.printDepth(s, logging.logr, nil, depth+1, b) -} - -const missingValue = "(MISSING)" - -func kvListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { - for i := 0; i < len(keysAndValues); i += 2 { - var v interface{} - k := keysAndValues[i] - if i+1 < len(keysAndValues) { - v = keysAndValues[i+1] - } else { - v = missingValue - } - b.WriteByte(' ') - - switch v.(type) { - case string, error: - b.WriteString(fmt.Sprintf("%s=%q", k, v)) - case []byte: - b.WriteString(fmt.Sprintf("%s=%+q", k, v)) - default: - if _, ok := v.(fmt.Stringer); ok { - b.WriteString(fmt.Sprintf("%s=%q", k, v)) - } else { - b.WriteString(fmt.Sprintf("%s=%+v", k, v)) - } - } + serialize.KVListFormat(&b.Buffer, "err", err) } + serialize.KVListFormat(&b.Buffer, keysAndValues...) + l.printDepth(s, globalLogger, nil, depth+1, &b.Buffer) + // Make the buffer available for reuse. + l.bufferCache.PutBuffer(b) } // redirectBuffer is used to set an alternate destination for the logs @@ -856,36 +710,11 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { return rb.w.Write(bytes) } -// SetLogger will set the backing logr implementation for klog. -// If set, all log lines will be suppressed from the regular Output, and -// redirected to the logr implementation. -// Use as: -// ... -// klog.SetLogger(zapr.NewLogger(zapLog)) -// -// To remove a backing logr implemention, use ClearLogger. Setting an -// empty logger with SetLogger(logr.Logger{}) does not work. -func SetLogger(logr logr.Logger) { - logging.mu.Lock() - defer logging.mu.Unlock() - - logging.logr = &logr -} - -// ClearLogger removes a backing logr implementation if one was set earlier -// with SetLogger. -func ClearLogger() { - logging.mu.Lock() - defer logging.mu.Unlock() - - logging.logr = nil -} - // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() - for s := fatalLog; s >= infoLog; s-- { + for s := severity.FatalLog; s >= severity.InfoLog; s-- { rb := &redirectBuffer{ w: w, } @@ -897,7 +726,7 @@ func SetOutput(w io.Writer) { func SetOutputBySeverity(name string, w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() - sev, ok := severityByName(name) + sev, ok := severity.ByName(name) if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) } @@ -916,8 +745,16 @@ func LogToStderr(stderr bool) { } // output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, file string, line int, alsoToStderr bool) { +func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) { + var isLocked = true l.mu.Lock() + defer func() { + if isLocked { + // Unlock before returning in case that it wasn't done already. + l.mu.Unlock() + } + }() + if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { buf.Write(stacks(false)) @@ -927,8 +764,8 @@ func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, if log != nil { // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} - if s == errorLog { - l.logr.WithCallDepth(depth+3).Error(nil, string(data)) + if s == severity.ErrorLog { + globalLogger.WithCallDepth(depth+3).Error(nil, string(data)) } else { log.WithCallDepth(depth + 3).Info(string(data)) } @@ -942,13 +779,13 @@ func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, if logging.logFile != "" { // Since we are using a single log file, all of the items in l.file array // will point to the same file, so just use one of them to write data. - if l.file[infoLog] == nil { - if err := l.createFiles(infoLog); err != nil { + if l.file[severity.InfoLog] == nil { + if err := l.createFiles(severity.InfoLog); err != nil { os.Stderr.Write(data) // Make sure the message appears somewhere. l.exit(err) } } - l.file[infoLog].Write(data) + l.file[severity.InfoLog].Write(data) } else { if l.file[s] == nil { if err := l.createFiles(s); err != nil { @@ -961,27 +798,28 @@ func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, l.file[s].Write(data) } else { switch s { - case fatalLog: - l.file[fatalLog].Write(data) + case severity.FatalLog: + l.file[severity.FatalLog].Write(data) fallthrough - case errorLog: - l.file[errorLog].Write(data) + case severity.ErrorLog: + l.file[severity.ErrorLog].Write(data) fallthrough - case warningLog: - l.file[warningLog].Write(data) + case severity.WarningLog: + l.file[severity.WarningLog].Write(data) fallthrough - case infoLog: - l.file[infoLog].Write(data) + case severity.InfoLog: + l.file[severity.InfoLog].Write(data) } } } } - if s == fatalLog { + if s == severity.FatalLog { // If we got here via Exit rather than Fatal, print no stacks. if atomic.LoadUint32(&fatalNoStacks) > 0 { l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) + isLocked = false + timeoutFlush(ExitFlushTimeout) + OsExit(1) } // Dump all goroutine stacks before exiting. trace := stacks(true) @@ -991,40 +829,24 @@ func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, } // Write the stack trace for all goroutines to the files. logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { + for log := severity.FatalLog; log >= severity.InfoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. f.Write(trace) } } l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + isLocked = false + timeoutFlush(ExitFlushTimeout) + OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. } - l.putBuffer(buf) - l.mu.Unlock() + l.bufferCache.PutBuffer(buf) + if stats := severityStats[s]; stats != nil { atomic.AddInt64(&stats.lines, 1) atomic.AddInt64(&stats.bytes, int64(len(data))) } } -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when klog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) - } -} - // stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. func stacks(all bool) []byte { // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. @@ -1061,7 +883,7 @@ func (l *loggingT) exit(err error) { return } l.flushAll() - os.Exit(2) + OsExit(2) } // syncBuffer joins a bufio.Writer to its underlying file, providing access to the @@ -1072,7 +894,7 @@ type syncBuffer struct { logger *loggingT *bufio.Writer file *os.File - sev severity + sev severity.Severity nbytes uint64 // The number of bytes written to this file maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } @@ -1118,7 +940,7 @@ func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { sb.file.Close() } var err error - sb.file, _, err = create(severityName[sb.sev], now, startup) + sb.file, _, err = create(severity.Name[sb.sev], now, startup) if err != nil { return err } @@ -1156,11 +978,16 @@ const bufferSize = 256 * 1024 // createFiles creates all the log files for severity from sev down to infoLog. // l.mu is held. -func (l *loggingT) createFiles(sev severity) error { +func (l *loggingT) createFiles(sev severity.Severity) error { + interval := l.flushInterval + if interval == 0 { + interval = flushInterval + } + l.flushD.run(interval) now := time.Now() // Files are created in decreasing severity order, so as soon as we find one // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { + for s := sev; s >= severity.InfoLog && l.file[s] == nil; s-- { sb := &syncBuffer{ logger: l, sev: s, @@ -1177,12 +1004,93 @@ func (l *loggingT) createFiles(sev severity) error { const flushInterval = 5 * time.Second // flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() +type flushDaemon struct { + mu sync.Mutex + clock clock.WithTicker + flush func() + stopC chan struct{} + stopDone chan struct{} +} + +// newFlushDaemon returns a new flushDaemon. If the passed clock is nil, a +// clock.RealClock is used. +func newFlushDaemon(flush func(), tickClock clock.WithTicker) *flushDaemon { + if tickClock == nil { + tickClock = clock.RealClock{} + } + return &flushDaemon{ + flush: flush, + clock: tickClock, } } +// run starts a goroutine that periodically calls the daemons flush function. +// Calling run on an already running daemon will have no effect. +func (f *flushDaemon) run(interval time.Duration) { + f.mu.Lock() + defer f.mu.Unlock() + + if f.stopC != nil { // daemon already running + return + } + + f.stopC = make(chan struct{}, 1) + f.stopDone = make(chan struct{}, 1) + + ticker := f.clock.NewTicker(interval) + go func() { + defer ticker.Stop() + defer func() { f.stopDone <- struct{}{} }() + for { + select { + case <-ticker.C(): + f.flush() + case <-f.stopC: + f.flush() + return + } + } + }() +} + +// stop stops the running flushDaemon and waits until the daemon has shut down. +// Calling stop on a daemon that isn't running will have no effect. +func (f *flushDaemon) stop() { + f.mu.Lock() + defer f.mu.Unlock() + + if f.stopC == nil { // daemon not running + return + } + + f.stopC <- struct{}{} + <-f.stopDone + + f.stopC = nil + f.stopDone = nil +} + +// isRunning returns true if the flush daemon is running. +func (f *flushDaemon) isRunning() bool { + f.mu.Lock() + defer f.mu.Unlock() + return f.stopC != nil +} + +// StopFlushDaemon stops the flush daemon, if running. +// This prevents klog from leaking goroutines on shutdown. After stopping +// the daemon, you can still manually flush buffers by calling Flush(). +func StopFlushDaemon() { + logging.flushD.stop() +} + +// StartFlushDaemon ensures that the flush daemon runs with the given delay +// between flush calls. If it is already running, it gets restarted. +func StartFlushDaemon(interval time.Duration) { + StopFlushDaemon() + logging.flushD.run(interval) +} + // lockAndFlushAll is like flushAll but locks l.mu first. func (l *loggingT) lockAndFlushAll() { l.mu.Lock() @@ -1194,13 +1102,16 @@ func (l *loggingT) lockAndFlushAll() { // l.mu is held. func (l *loggingT) flushAll() { // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { + for s := severity.FatalLog; s >= severity.InfoLog; s-- { file := l.file[s] if file != nil { file.Flush() // ignore error file.Sync() // ignore error } } + if globalLoggerOptions.flush != nil { + globalLoggerOptions.flush() + } } // CopyStandardLogTo arranges for messages written to the Go "log" package's @@ -1211,7 +1122,7 @@ func (l *loggingT) flushAll() { // Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not // recognized, CopyStandardLogTo panics. func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) + sev, ok := severity.ByName(name) if !ok { panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) } @@ -1223,7 +1134,7 @@ func CopyStandardLogTo(name string) { // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. -type logBridge severity +type logBridge severity.Severity // Write parses the standard logging line and passes its components to the // logger for severity(lb). @@ -1247,7 +1158,7 @@ func (lb logBridge) Write(b []byte) (n int, err error) { } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. - logging.printWithFileLine(severity(lb), logging.logr, logging.filter, file, line, true, text) + logging.printWithFileLine(severity.Severity(lb), globalLogger, logging.filter, file, line, true, text) return len(b), nil } @@ -1282,22 +1193,21 @@ func (l *loggingT) setV(pc uintptr) Level { type Verbose struct { enabled bool logr *logr.Logger - filter LogFilter } func newVerbose(level Level, b bool) Verbose { - if logging.logr == nil { - return Verbose{b, nil, logging.filter} + if globalLogger == nil { + return Verbose{b, nil} } - v := logging.logr.V(int(level)) - return Verbose{b, &v, logging.filter} + v := globalLogger.V(int(level)) + return Verbose{b, &v} } // V reports whether verbosity at the call site is at least the requested level. // The returned value is a struct of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either -// if glog.V(2).Enabled() { klog.Info("log this") } +// if klog.V(2).Enabled() { klog.Info("log this") } // or // klog.V(2).Info("log this") // The second form is shorter but the first is cheaper if logging is off because it does @@ -1353,7 +1263,15 @@ func (v Verbose) Enabled() bool { // See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { if v.enabled { - logging.print(infoLog, v.logr, v.filter, args...) + logging.print(severity.InfoLog, v.logr, logging.filter, args...) + } +} + +// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoDepth(depth int, args ...interface{}) { + if v.enabled { + logging.printDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) } } @@ -1361,7 +1279,15 @@ func (v Verbose) Info(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { if v.enabled { - logging.println(infoLog, v.logr, v.filter, args...) + logging.println(severity.InfoLog, v.logr, logging.filter, args...) + } +} + +// InfolnDepth is equivalent to the global InfolnDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfolnDepth(depth int, args ...interface{}) { + if v.enabled { + logging.printlnDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) } } @@ -1369,7 +1295,15 @@ func (v Verbose) Infoln(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { if v.enabled { - logging.printf(infoLog, v.logr, v.filter, format, args...) + logging.printf(severity.InfoLog, v.logr, logging.filter, format, args...) + } +} + +// InfofDepth is equivalent to the global InfofDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) { + if v.enabled { + logging.printfDepth(severity.InfoLog, v.logr, logging.filter, depth, format, args...) } } @@ -1377,20 +1311,28 @@ func (v Verbose) Infof(format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, v.filter, 0, msg, keysAndValues...) + logging.infoS(v.logr, logging.filter, 0, msg, keysAndValues...) } } // InfoSDepth acts as InfoS but uses depth to determine which call frame to log. // InfoSDepth(0, "msg") is the same as InfoS("msg"). func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { - logging.infoS(logging.logr, logging.filter, depth, msg, keysAndValues...) + logging.infoS(globalLogger, logging.filter, depth, msg, keysAndValues...) +} + +// InfoSDepth is equivalent to the global InfoSDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { + if v.enabled { + logging.infoS(v.logr, logging.filter, depth, msg, keysAndValues...) + } } // Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, v.filter, 0, msg, args...) + logging.errorS(err, v.logr, logging.filter, 0, msg, args...) } } @@ -1398,32 +1340,44 @@ func (v Verbose) Error(err error, msg string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, v.filter, 0, msg, keysAndValues...) + logging.errorS(err, v.logr, logging.filter, 0, msg, keysAndValues...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { - logging.print(infoLog, logging.logr, logging.filter, args...) + logging.print(severity.InfoLog, globalLogger, logging.filter, args...) } // InfoDepth acts as Info but uses depth to determine which call frame to log. // InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { - logging.println(infoLog, logging.logr, logging.filter, args...) + logging.println(severity.InfoLog, globalLogger, logging.filter, args...) +} + +// InfolnDepth acts as Infoln but uses depth to determine which call frame to log. +// InfolnDepth(0, "msg") is the same as Infoln("msg"). +func InfolnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { - logging.printf(infoLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.InfoLog, globalLogger, logging.filter, format, args...) +} + +// InfofDepth acts as Infof but uses depth to determine which call frame to log. +// InfofDepth(0, "msg", args...) is the same as Infof("msg", args...). +func InfofDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.InfoLog, globalLogger, logging.filter, depth, format, args...) } // InfoS structured logs to the INFO log. @@ -1435,55 +1389,79 @@ func Infof(format string, args ...interface{}) { // output: // >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" func InfoS(msg string, keysAndValues ...interface{}) { - logging.infoS(logging.logr, logging.filter, 0, msg, keysAndValues...) + logging.infoS(globalLogger, logging.filter, 0, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { - logging.print(warningLog, logging.logr, logging.filter, args...) + logging.print(severity.WarningLog, globalLogger, logging.filter, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { - logging.println(warningLog, logging.logr, logging.filter, args...) + logging.println(severity.WarningLog, globalLogger, logging.filter, args...) +} + +// WarninglnDepth acts as Warningln but uses depth to determine which call frame to log. +// WarninglnDepth(0, "msg") is the same as Warningln("msg"). +func WarninglnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.WarningLog, globalLogger, logging.filter, format, args...) +} + +// WarningfDepth acts as Warningf but uses depth to determine which call frame to log. +// WarningfDepth(0, "msg", args...) is the same as Warningf("msg", args...). +func WarningfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.WarningLog, globalLogger, logging.filter, depth, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { - logging.print(errorLog, logging.logr, logging.filter, args...) + logging.print(severity.ErrorLog, globalLogger, logging.filter, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { - logging.println(errorLog, logging.logr, logging.filter, args...) + logging.println(severity.ErrorLog, globalLogger, logging.filter, args...) +} + +// ErrorlnDepth acts as Errorln but uses depth to determine which call frame to log. +// ErrorlnDepth(0, "msg") is the same as Errorln("msg"). +func ErrorlnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.ErrorLog, globalLogger, logging.filter, format, args...) +} + +// ErrorfDepth acts as Errorf but uses depth to determine which call frame to log. +// ErrorfDepth(0, "msg", args...) is the same as Errorf("msg", args...). +func ErrorfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.ErrorLog, globalLogger, logging.filter, depth, format, args...) } // ErrorS structured logs to the ERROR, WARNING, and INFO logs. @@ -1496,71 +1474,97 @@ func Errorf(format string, args ...interface{}) { // output: // >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" func ErrorS(err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, logging.logr, logging.filter, 0, msg, keysAndValues...) + logging.errorS(err, globalLogger, logging.filter, 0, msg, keysAndValues...) } // ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log. // ErrorSDepth(0, "msg") is the same as ErrorS("msg"). func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, logging.logr, logging.filter, depth, msg, keysAndValues...) + logging.errorS(err, globalLogger, logging.filter, depth, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { - logging.print(fatalLog, logging.logr, logging.filter, args...) + logging.print(severity.FatalLog, globalLogger, logging.filter, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { - logging.println(fatalLog, logging.logr, logging.filter, args...) + logging.println(severity.FatalLog, globalLogger, logging.filter, args...) +} + +// FatallnDepth acts as Fatalln but uses depth to determine which call frame to log. +// FatallnDepth(0, "msg") is the same as Fatalln("msg"). +func FatallnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...) +} + +// FatalfDepth acts as Fatalf but uses depth to determine which call frame to log. +// FatalfDepth(0, "msg", args...) is the same as Fatalf("msg", args...). +func FatalfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...) } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. // It allows Exit and relatives to use the Fatal logs. var fatalNoStacks uint32 -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, logging.logr, logging.filter, args...) + logging.print(severity.FatalLog, globalLogger, logging.filter, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) } -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). func Exitln(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, logging.logr, logging.filter, args...) + logging.println(severity.FatalLog, globalLogger, logging.filter, args...) } -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// ExitlnDepth acts as Exitln but uses depth to determine which call frame to log. +// ExitlnDepth(0, "msg") is the same as Exitln("msg"). +func ExitlnDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...) +} + +// ExitfDepth acts as Exitf but uses depth to determine which call frame to log. +// ExitfDepth(0, "msg", args...) is the same as Exitf("msg", args...). +func ExitfDepth(depth int, format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...) } // LogFilter is a collection of functions that can filter all logging calls, @@ -1571,79 +1575,10 @@ type LogFilter interface { FilterS(msg string, keysAndValues []interface{}) (string, []interface{}) } +// SetLogFilter installs a filter that is used for all log calls. +// +// Modifying the filter is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. func SetLogFilter(filter LogFilter) { - logging.mu.Lock() - defer logging.mu.Unlock() - logging.filter = filter } - -// ObjectRef references a kubernetes object -type ObjectRef struct { - Name string `json:"name"` - Namespace string `json:"namespace,omitempty"` -} - -func (ref ObjectRef) String() string { - if ref.Namespace != "" { - return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name) - } - return ref.Name -} - -// MarshalLog ensures that loggers with support for structured output will log -// as a struct by removing the String method via a custom type. -func (ref ObjectRef) MarshalLog() interface{} { - type or ObjectRef - return or(ref) -} - -var _ logr.Marshaler = ObjectRef{} - -// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface -// this interface may expand in the future, but will always be a subset of the -// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface -type KMetadata interface { - GetName() string - GetNamespace() string -} - -// KObj returns ObjectRef from ObjectMeta -func KObj(obj KMetadata) ObjectRef { - if obj == nil { - return ObjectRef{} - } - if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() { - return ObjectRef{} - } - - return ObjectRef{ - Name: obj.GetName(), - Namespace: obj.GetNamespace(), - } -} - -// KRef returns ObjectRef from name and namespace -func KRef(namespace, name string) ObjectRef { - return ObjectRef{ - Name: name, - Namespace: namespace, - } -} - -// KObjs returns slice of ObjectRef from an slice of ObjectMeta -func KObjs(arg interface{}) []ObjectRef { - s := reflect.ValueOf(arg) - if s.Kind() != reflect.Slice { - return nil - } - objectRefs := make([]ObjectRef, 0, s.Len()) - for i := 0; i < s.Len(); i++ { - if v, ok := s.Index(i).Interface().(KMetadata); ok { - objectRefs = append(objectRefs, KObj(v)) - } else { - return nil - } - } - return objectRefs -} diff --git a/vendor/k8s.io/klog/v2/klog_file.go b/vendor/k8s.io/klog/v2/klog_file.go index de830d9221..1025d644f3 100644 --- a/vendor/k8s.io/klog/v2/klog_file.go +++ b/vendor/k8s.io/klog/v2/klog_file.go @@ -22,9 +22,7 @@ import ( "errors" "fmt" "os" - "os/user" "path/filepath" - "runtime" "strings" "sync" "time" @@ -57,38 +55,6 @@ func init() { } } -func getUserName() string { - userNameOnce.Do(func() { - // On Windows, the Go 'user' package requires netapi32.dll. - // This affects Windows Nano Server: - // https://github.com/golang/go/issues/21867 - // Fallback to using environment variables. - if runtime.GOOS == "windows" { - u := os.Getenv("USERNAME") - if len(u) == 0 { - return - } - // Sanitize the USERNAME since it may contain filepath separators. - u = strings.Replace(u, `\`, "_", -1) - - // user.Current().Username normally produces something like 'USERDOMAIN\USERNAME' - d := os.Getenv("USERDOMAIN") - if len(d) != 0 { - userName = d + "_" + u - } else { - userName = u - } - } else { - current, err := user.Current() - if err == nil { - userName = current.Username - } - } - }) - - return userName -} - // shortHostname returns its argument, truncating at the first period. // For instance, given "www.google.com" it returns "www". func shortHostname(hostname string) string { diff --git a/vendor/k8s.io/klog/v2/klog_file_others.go b/vendor/k8s.io/klog/v2/klog_file_others.go new file mode 100644 index 0000000000..aa46726851 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klog_file_others.go @@ -0,0 +1,19 @@ +//go:build !windows +// +build !windows + +package klog + +import ( + "os/user" +) + +func getUserName() string { + userNameOnce.Do(func() { + current, err := user.Current() + if err == nil { + userName = current.Username + } + }) + + return userName +} diff --git a/vendor/k8s.io/klog/v2/klog_file_windows.go b/vendor/k8s.io/klog/v2/klog_file_windows.go new file mode 100644 index 0000000000..2517f9c538 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klog_file_windows.go @@ -0,0 +1,34 @@ +//go:build windows +// +build windows + +package klog + +import ( + "os" + "strings" +) + +func getUserName() string { + userNameOnce.Do(func() { + // On Windows, the Go 'user' package requires netapi32.dll. + // This affects Windows Nano Server: + // https://github.com/golang/go/issues/21867 + // Fallback to using environment variables. + u := os.Getenv("USERNAME") + if len(u) == 0 { + return + } + // Sanitize the USERNAME since it may contain filepath separators. + u = strings.Replace(u, `\`, "_", -1) + + // user.Current().Username normally produces something like 'USERDOMAIN\USERNAME' + d := os.Getenv("USERDOMAIN") + if len(d) != 0 { + userName = d + "_" + u + } else { + userName = u + } + }) + + return userName +} diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go new file mode 100644 index 0000000000..351d7a7405 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -0,0 +1,87 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/serialize" +) + +// NewKlogr returns a logger that is functionally identical to +// klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The +// difference is that it uses a simpler implementation. +func NewKlogr() Logger { + return New(&klogger{}) +} + +// klogger is a subset of klogr/klogr.go. It had to be copied to break an +// import cycle (klogr wants to use klog, and klog wants to use klogr). +type klogger struct { + level int + callDepth int + prefix string + values []interface{} +} + +func (l *klogger) Init(info logr.RuntimeInfo) { + l.callDepth += info.CallDepth +} + +func (l klogger) Info(level int, msg string, kvList ...interface{}) { + trimmed := serialize.TrimDuplicates(l.values, kvList) + if l.prefix != "" { + msg = l.prefix + ": " + msg + } + V(Level(level)).InfoSDepth(l.callDepth+1, msg, append(trimmed[0], trimmed[1]...)...) +} + +func (l klogger) Enabled(level int) bool { + return V(Level(level)).Enabled() +} + +func (l klogger) Error(err error, msg string, kvList ...interface{}) { + trimmed := serialize.TrimDuplicates(l.values, kvList) + if l.prefix != "" { + msg = l.prefix + ": " + msg + } + ErrorSDepth(l.callDepth+1, err, msg, append(trimmed[0], trimmed[1]...)...) +} + +// WithName returns a new logr.Logger with the specified name appended. klogr +// uses '/' characters to separate name elements. Callers should not pass '/' +// in the provided name string, but this library does not actually enforce that. +func (l klogger) WithName(name string) logr.LogSink { + if len(l.prefix) > 0 { + l.prefix = l.prefix + "/" + } + l.prefix += name + return &l +} + +func (l klogger) WithValues(kvList ...interface{}) logr.LogSink { + l.values = serialize.WithValues(l.values, kvList) + return &l +} + +func (l klogger) WithCallDepth(depth int) logr.LogSink { + l.callDepth += depth + return &l +} + +var _ logr.LogSink = &klogger{} +var _ logr.CallDepthLogSink = &klogger{} diff --git a/vendor/k8s.io/utils/clock/clock.go b/vendor/k8s.io/utils/clock/clock.go index dd181ce8d8..b8b6af5c81 100644 --- a/vendor/k8s.io/utils/clock/clock.go +++ b/vendor/k8s.io/utils/clock/clock.go @@ -63,6 +63,16 @@ type WithDelayedExecution interface { AfterFunc(d time.Duration, f func()) Timer } +// WithTickerAndDelayedExecution allows for injecting fake or real clocks +// into code that needs Ticker and AfterFunc functionality +type WithTickerAndDelayedExecution interface { + WithTicker + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + // Ticker defines the Ticker interface. type Ticker interface { C() <-chan time.Time diff --git a/vendor/k8s.io/utils/clock/testing/fake_clock.go b/vendor/k8s.io/utils/clock/testing/fake_clock.go index fb493c4bab..79e11deb65 100644 --- a/vendor/k8s.io/utils/clock/testing/fake_clock.go +++ b/vendor/k8s.io/utils/clock/testing/fake_clock.go @@ -239,7 +239,8 @@ func (f *FakeClock) Sleep(d time.Duration) { // IntervalClock implements clock.PassiveClock, but each invocation of Now steps the clock forward the specified duration. // IntervalClock technically implements the other methods of clock.Clock, but each implementation is just a panic. -// See SimpleIntervalClock for an alternative that only has the methods of PassiveClock. +// +// Deprecated: See SimpleIntervalClock for an alternative that only has the methods of PassiveClock. type IntervalClock struct { Time time.Time Duration time.Duration @@ -282,9 +283,9 @@ func (*IntervalClock) Tick(d time.Duration) <-chan time.Time { // NewTicker has no implementation yet and is omitted. // TODO: make interval clock use FakeClock so this can be implemented. -//func (*IntervalClock) NewTicker(d time.Duration) clock.Ticker { -// panic("IntervalClock doesn't implement NewTicker") -//} +func (*IntervalClock) NewTicker(d time.Duration) clock.Ticker { + panic("IntervalClock doesn't implement NewTicker") +} // Sleep is unimplemented, will panic. func (*IntervalClock) Sleep(d time.Duration) { diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go index 2cab2c5800..f5802d2e81 100644 --- a/vendor/k8s.io/utils/pointer/pointer.go +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -19,6 +19,7 @@ package pointer import ( "fmt" "reflect" + "time" ) // AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, @@ -184,7 +185,7 @@ func StringEqual(a, b *string) bool { return *a == *b } -// Float32 returns a pointer to the a float32. +// Float32 returns a pointer to a float32. func Float32(i float32) *float32 { return &i } @@ -214,7 +215,7 @@ func Float32Equal(a, b *float32) bool { return *a == *b } -// Float64 returns a pointer to the a float64. +// Float64 returns a pointer to a float64. func Float64(i float64) *float64 { return &i } @@ -243,3 +244,29 @@ func Float64Equal(a, b *float64) bool { } return *a == *b } + +// Duration returns a pointer to a time.Duration. +func Duration(d time.Duration) *time.Duration { + return &d +} + +// DurationDeref dereferences the time.Duration ptr and returns it if not nil, or else +// returns def. +func DurationDeref(ptr *time.Duration, def time.Duration) time.Duration { + if ptr != nil { + return *ptr + } + return def +} + +// DurationEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +func DurationEqual(a, b *time.Duration) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} diff --git a/vendor/modules.txt b/vendor/modules.txt index ded5f5cbc1..2b5a186116 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -131,7 +131,7 @@ github.com/fsnotify/fsnotify # github.com/ghodss/yaml v1.0.0 ## explicit github.com/ghodss/yaml -# github.com/go-logr/logr v1.2.0 +# github.com/go-logr/logr v1.2.3 ## explicit; go 1.16 github.com/go-logr/logr # github.com/go-logr/zapr v1.2.0 @@ -170,17 +170,18 @@ github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp -# github.com/google/go-cmp v0.5.6 -## explicit; go 1.8 +# github.com/google/go-cmp v0.5.7 +## explicit; go 1.11 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/cmpopts github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/gofuzz v1.1.0 +# github.com/google/gofuzz v1.2.0 ## explicit; go 1.12 github.com/google/gofuzz +github.com/google/gofuzz/bytesource # github.com/google/uuid v1.1.2 ## explicit github.com/google/uuid @@ -243,7 +244,7 @@ github.com/modern-go/reflect2 # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid -# github.com/openshift/api v0.0.0-20220315184754-d7c10d0b647e +# github.com/openshift/api v0.0.0-20220411210816-c3bb724c282a ## explicit; go 1.16 github.com/openshift/api github.com/openshift/api/apiserver @@ -419,7 +420,7 @@ golang.org/x/crypto/pkcs12/internal/rc2 # golang.org/x/mod v0.4.2 ## explicit; go 1.12 golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20210825183410-e898025ed96a +# golang.org/x/net v0.0.0-20220412020605-290c469a71a5 ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -438,14 +439,14 @@ golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 +# golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e ## explicit; go 1.17 golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b +# golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 ## explicit; go 1.17 golang.org/x/term # golang.org/x/text v0.3.7 @@ -606,7 +607,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.23.0 +# k8s.io/api v0.23.5 ## explicit; go 1.16 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -665,7 +666,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.23.0 +# k8s.io/apimachinery v0.23.5 ## explicit; go 1.16 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -971,9 +972,13 @@ k8s.io/component-base/config/v1alpha1 k8s.io/component-base/metrics k8s.io/component-base/metrics/legacyregistry k8s.io/component-base/version -# k8s.io/klog/v2 v2.30.0 +# k8s.io/klog/v2 v2.60.1 ## explicit; go 1.13 k8s.io/klog/v2 +k8s.io/klog/v2/internal/buffer +k8s.io/klog/v2/internal/clock +k8s.io/klog/v2/internal/serialize +k8s.io/klog/v2/internal/severity # k8s.io/kube-aggregator v0.23.0 ## explicit; go 1.16 k8s.io/kube-aggregator/pkg/apis/apiregistration @@ -985,7 +990,7 @@ k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistr ## explicit; go 1.16 k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/util/proto -# k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b +# k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 ## explicit; go 1.12 k8s.io/utils/buffer k8s.io/utils/clock @@ -1049,8 +1054,8 @@ sigs.k8s.io/controller-tools/pkg/schemapatcher sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml sigs.k8s.io/controller-tools/pkg/version sigs.k8s.io/controller-tools/pkg/webhook -# sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 -## explicit; go 1.16 +# sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 +## explicit; go 1.17 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json # sigs.k8s.io/kube-storage-version-migrator v0.0.4 @@ -1059,7 +1064,7 @@ sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1 sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1 -# sigs.k8s.io/structured-merge-diff/v4 v4.2.0 +# sigs.k8s.io/structured-merge-diff/v4 v4.2.1 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/schema diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go index a047d981bf..3a8b64547d 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go @@ -234,6 +234,7 @@ type decodeState struct { savedStrictErrors []error seenStrictErrors map[string]struct{} + strictFieldStack []string caseSensitive bool @@ -261,6 +262,8 @@ func (d *decodeState) init(data []byte) *decodeState { // Reuse the allocated space for the FieldStack slice. d.errorContext.FieldStack = d.errorContext.FieldStack[:0] } + // Reuse the allocated space for the strict FieldStack slice. + d.strictFieldStack = d.strictFieldStack[:0] return d } @@ -555,6 +558,12 @@ func (d *decodeState) array(v reflect.Value) error { break } + origStrictFieldStackLen := len(d.strictFieldStack) + defer func() { + // Reset to original length and reuse the allocated space for the strict FieldStack slice. + d.strictFieldStack = d.strictFieldStack[:origStrictFieldStackLen] + }() + i := 0 for { // Look ahead for ] - can only happen on first iteration. @@ -580,6 +589,7 @@ func (d *decodeState) array(v reflect.Value) error { } } + d.appendStrictFieldStackIndex(i) if i < v.Len() { // Decode into element. if err := d.value(v.Index(i)); err != nil { @@ -591,6 +601,8 @@ func (d *decodeState) array(v reflect.Value) error { return err } } + // Reset to original length and reuse the allocated space for the strict FieldStack slice. + d.strictFieldStack = d.strictFieldStack[:origStrictFieldStackLen] i++ // Next token must be , or ]. @@ -683,7 +695,7 @@ func (d *decodeState) object(v reflect.Value) error { seenKeys = map[string]struct{}{} } if _, seen := seenKeys[fieldName]; seen { - d.saveStrictError(fmt.Errorf("duplicate field %q", fieldName)) + d.saveStrictError(d.newFieldError("duplicate field", fieldName)) } else { seenKeys[fieldName] = struct{}{} } @@ -699,7 +711,7 @@ func (d *decodeState) object(v reflect.Value) error { var seenKeys uint64 checkDuplicateField = func(fieldNameIndex int, fieldName string) { if seenKeys&(1< 0 { + return fmt.Errorf("%s %q", msg, strings.Join(d.strictFieldStack, "")+"."+field) + } else { + return fmt.Errorf("%s %q", msg, field) + } +} + // saveStrictError saves a strict decoding error, // for reporting at the end of the unmarshal if no other errors occurred. func (d *decodeState) saveStrictError(err error) { @@ -90,6 +100,24 @@ func (d *decodeState) saveStrictError(err error) { d.savedStrictErrors = append(d.savedStrictErrors, err) } +func (d *decodeState) appendStrictFieldStackKey(key string) { + if !d.disallowDuplicateFields && !d.disallowUnknownFields { + return + } + if len(d.strictFieldStack) > 0 { + d.strictFieldStack = append(d.strictFieldStack, ".", key) + } else { + d.strictFieldStack = append(d.strictFieldStack, key) + } +} + +func (d *decodeState) appendStrictFieldStackIndex(i int) { + if !d.disallowDuplicateFields && !d.disallowUnknownFields { + return + } + d.strictFieldStack = append(d.strictFieldStack, "[", strconv.Itoa(i), "]") +} + // UnmarshalStrictError holds errors resulting from use of strict disallow___ decoder directives. // If this is returned from Unmarshal(), it means the decoding was successful in all other respects. type UnmarshalStrictError struct { diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go index afec9f3cfd..75244ef646 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go @@ -17,8 +17,6 @@ limitations under the License. package typed import ( - "math" - "sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/schema" "sigs.k8s.io/structured-merge-diff/v4/value" @@ -170,74 +168,94 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err if lhs != nil { lLen = lhs.Length() } - out := make([]interface{}, 0, int(math.Max(float64(rLen), float64(lLen)))) + outLen := lLen + if outLen < rLen { + outLen = rLen + } + out := make([]interface{}, 0, outLen) + + rhsOrder, observedRHS, rhsErrs := w.indexListPathElements(t, rhs) + errs = append(errs, rhsErrs...) + lhsOrder, observedLHS, lhsErrs := w.indexListPathElements(t, lhs) + errs = append(errs, lhsErrs...) + + sharedOrder := make([]*fieldpath.PathElement, 0, rLen) + for i := range rhsOrder { + pe := &rhsOrder[i] + if _, ok := observedLHS.Get(*pe); ok { + sharedOrder = append(sharedOrder, pe) + } + } - lhsOrder := make([]fieldpath.PathElement, 0, lLen) + var nextShared *fieldpath.PathElement + if len(sharedOrder) > 0 { + nextShared = sharedOrder[0] + sharedOrder = sharedOrder[1:] + } - // First, collect all LHS children. - observedLHS := fieldpath.MakePathElementValueMap(lLen) - if lhs != nil { - for i := 0; i < lhs.Length(); i++ { - child := lhs.At(i) - pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) - if err != nil { - errs = append(errs, errorf("lhs: element %v: %v", i, err.Error())...) - // If we can't construct the path element, we can't - // even report errors deeper in the schema, so bail on - // this element. + lLen, rLen = len(lhsOrder), len(rhsOrder) + for lI, rI := 0, 0; lI < lLen || rI < rLen; { + if lI < lLen && rI < rLen { + pe := lhsOrder[lI] + if pe.Equals(rhsOrder[rI]) { + // merge LHS & RHS items + lChild, _ := observedLHS.Get(pe) + rChild, _ := observedRHS.Get(pe) + mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) + errs = append(errs, errs...) + if mergeOut != nil { + out = append(out, *mergeOut) + } + lI++ + rI++ + + nextShared = nil + if len(sharedOrder) > 0 { + nextShared = sharedOrder[0] + sharedOrder = sharedOrder[1:] + } continue } - if _, ok := observedLHS.Get(pe); ok { - errs = append(errs, errorf("lhs: duplicate entries for key %v", pe.String())...) - } - observedLHS.Insert(pe, child) - lhsOrder = append(lhsOrder, pe) - } - } - - // Then merge with RHS children. - observedRHS := fieldpath.MakePathElementSet(rLen) - if rhs != nil { - for i := 0; i < rhs.Length(); i++ { - child := rhs.At(i) - pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) - if err != nil { - errs = append(errs, errorf("rhs: element %v: %v", i, err.Error())...) - // If we can't construct the path element, we can't - // even report errors deeper in the schema, so bail on - // this element. + if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsOrder[lI]) { + // shared item, but not the one we want in this round + lI++ continue } - if observedRHS.Has(pe) { - errs = append(errs, errorf("rhs: duplicate entries for key %v", pe.String())...) + } + if lI < lLen { + pe := lhsOrder[lI] + if _, ok := observedRHS.Get(pe); !ok { + // take LHS item + lChild, _ := observedLHS.Get(pe) + mergeOut, errs := w.mergeListItem(t, pe, lChild, nil) + errs = append(errs, errs...) + if mergeOut != nil { + out = append(out, *mergeOut) + } + lI++ continue } - observedRHS.Insert(pe) - w2 := w.prepareDescent(pe, t.ElementType) - w2.rhs = child - if lchild, ok := observedLHS.Get(pe); ok { - w2.lhs = lchild + } + if rI < rLen { + // Take the RHS item, merge with matching LHS item if possible + pe := rhsOrder[rI] + lChild, _ := observedLHS.Get(pe) // may be nil + rChild, _ := observedRHS.Get(pe) + mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) + errs = append(errs, errs...) + if mergeOut != nil { + out = append(out, *mergeOut) } - errs = append(errs, w2.merge(pe.String)...) - if w2.out != nil { - out = append(out, *w2.out) + rI++ + // Advance nextShared, if we are merging nextShared. + if nextShared != nil && nextShared.Equals(pe) { + nextShared = nil + if len(sharedOrder) > 0 { + nextShared = sharedOrder[0] + sharedOrder = sharedOrder[1:] + } } - w.finishDescent(w2) - } - } - - for _, pe := range lhsOrder { - if observedRHS.Has(pe) { - continue - } - value, _ := observedLHS.Get(pe) - w2 := w.prepareDescent(pe, t.ElementType) - w2.lhs = value - errs = append(errs, w2.merge(pe.String)...) - if w2.out != nil { - out = append(out, *w2.out) } - w.finishDescent(w2) } if len(out) > 0 { @@ -248,6 +266,46 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err return errs } +func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { + var errs ValidationErrors + length := 0 + if list != nil { + length = list.Length() + } + observed := fieldpath.MakePathElementValueMap(length) + pes := make([]fieldpath.PathElement, 0, length) + for i := 0; i < length; i++ { + child := list.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + if _, found := observed.Get(pe); found { + errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) + continue + } + observed.Insert(pe, child) + pes = append(pes, pe) + } + return pes, observed, errs +} + +func (w *mergingWalker) mergeListItem(t *schema.List, pe fieldpath.PathElement, lChild, rChild value.Value) (out *interface{}, errs ValidationErrors) { + w2 := w.prepareDescent(pe, t.ElementType) + w2.lhs = lChild + w2.rhs = rChild + errs = append(errs, w2.merge(pe.String)...) + if w2.out != nil { + out = w2.out + } + w.finishDescent(w2) + return +} + func (w *mergingWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) { if v == nil { return nil, nil